code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package flagship.console.layout
/**
* User: mtrupkin
* Date: 7/21/13
*/
case class LayoutData(snap: Boolean, grab: Boolean)
case class Layout(
left: LayoutData = LayoutData.NONE,
right: LayoutData = LayoutData.NONE,
top: LayoutData = LayoutData.NONE,
bottom: LayoutData = LayoutData.NONE)
object Layout {
val NONE: Layout = new Layout()
}
object LayoutData {
val NONE: LayoutData = new LayoutData(false, false)
val GRAB: LayoutData = new LayoutData(false, true)
val SNAP: LayoutData = new LayoutData(true, false)
}
|
mtrupkin/brace-for-impact
|
console-lib/src/main/scala/flagship/console/layout/Layout.scala
|
Scala
|
mit
| 538 |
/*
Listok is a dialect of LISP
Copyright (C) 2011 Konstantin Boukreev
[email protected]
This file is part of Listok.
Listok is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
Listok is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library. If not, see <http://www.gnu.org/licenses/>.
*/
package ru.listok
import util.parsing.combinator.{JavaTokenParsers, RegexParsers}
object Parser extends JavaTokenParsers {
//object Parser extends RegexParsers {
override def stringLiteral: Parser[String] =
("\\""+"""([^"\\p{Cntrl}\\\\]|\\\\[\\\\/bfnrte]|\\\\u[a-fA-F0-9]{4})*"""+"\\"").r // added \\e
lazy val regex_keyword = """:([a-zA-Z_~%!=#<>\\-\\+\\*\\?\\^\\&\\d])*""".r
lazy val regex_symbol = """[a-zA-Z_~%!=<>\\-\\+\\*\\?\\^\\&\\/\\d\\.]([a-zA-Z_~%!=<>:@#\\-\\+\\*\\?\\^\\&\\/\\d\\.])*""".r
lazy val lparen: Parser[String] = "("
lazy val rparen: Parser[String] = ")"
lazy val str_quote: Parser[String] = """quote\\s+""".r
lazy val char_quote: Parser[String] = "'"
lazy val str_lambda: Parser[String] = "lambda" | "\\u03BB" //"λ" greek small letter lamda
lazy val str_defmacro: Parser[String] = "defmacro"
lazy val sform_name = """(def|defun|defconstant|setf|if|cond|do|and|or|spawn|match|defstruct|assert|collect|let|let\\*)\\s+""".r
//symbol or number
lazy val symbol: Parser[Lcommon] = regex_symbol ^^ {
case "t" => Ltrue
case "nil" => Lnil
case s => isNumber(s) match {
case Some(x) => x
case None => Lsymbol(Symbol(s))
}
}
lazy val keyword: Parser[Lkeyword] = regex_keyword ^^ { s =>
Lkeyword(Symbol(s.substring(1))) }
lazy val sform: Parser[Llist] = lparen ~> sform_name ~ (form+) <~ rparen ^^ {
case name ~ forms => Llist(SpecialForms.make(Symbol(name.trim)) :: forms)
}
lazy val char = new ParserChar
lazy val string: Parser[Lstring] = stringLiteral ^^ {s =>
Lstring(Util.unescape(s.substring(1, s.length - 1)))
}
lazy val lambda_list: Parser[List[Lcommon]] = lparen ~> (symbol*) <~ rparen
lazy val lambda: Parser[Llambda] = lparen ~> str_lambda ~> lambda_list ~ (form*) <~ rparen ^^ {
case ll ~ body => Llambda(ll, body)
}
lazy val list: Parser[Llist] = lparen ~> (form*) <~ rparen ^^ { Llist(_) }
lazy val quote: Parser[Lquote] = lparen ~> str_quote ~> form <~ rparen ^^ { Lquote(_) }
lazy val comma: Parser[Lmacrocomma] = "," ~> form ^^ { Lmacrocomma(_, false) }
lazy val commasplice: Parser[Lmacrocomma] = ",@" ~> form ^^ { Lmacrocomma(_, true) }
lazy val backquote: Parser[Lmacrobackquote] = "`" ~> form ^^ { Lmacrobackquote(_) }
lazy val regex = new ParseRegex
lazy val form: Parser[Lcommon] =
char | keyword | symbol | string | quote | lambda |
sform | list | qform | comma | backquote | commasplice | regex
lazy val qform: Parser[Lquote] = char_quote ~> form ^^ { Lquote(_) }
lazy val macro_comma: Parser[Lmacrocomma] = "," ~> macro_form ^^ { Lmacrocomma(_, false) }
lazy val macro_commasplice: Parser[Lmacrocomma] = ",@" ~> macro_form ^^ { Lmacrocomma(_, true) }
lazy val macro_backquote: Parser[Lmacrobackquote] = "`" ~> macro_form ^^ { Lmacrobackquote(_) }
lazy val macro_list: Parser[Llist] = lparen ~> (macro_form*) <~ rparen ^^ { Llist(_) }
lazy val macro_sform: Parser[Llist] = lparen ~> sform_name ~ (macro_form+) <~ rparen ^^ {
case name ~ forms => Llist(SpecialForms.make(Symbol(name.trim)) :: forms)
}
lazy val macro_form: Parser[Lcommon] =
char | keyword | symbol | string | quote |
macro_sform | macro_list | macro_comma | macro_commasplice | macro_backquote
lazy val defmacro: Parser[Ldefmacro] = lparen ~> str_defmacro ~> regex_symbol ~ lambda_list ~ (macro_form*) <~ rparen ^^ {
case ident ~ ll ~ body => Ldefmacro(Symbol(ident), ll, body)
}
//override protected val whiteSpace = """(\\s+|;.*\\n)""".r
//override protected val whiteSpace = """(\\s+(;.*\\s+)?|;.*\\s+)+""".r
override protected val whiteSpace = """(\\s+|\\s*;.*\\s+)+""".r
lazy val prog: Parser[List[Lcommon]] = (defmacro | form)*
//def read(text: String): Either[String, Llist] = {
def read(text: String): Either[String, List[Lcommon]] = {
parseAll(prog, text) match {
case Success(form, _) => Right(form)
case NoSuccess(msg, in) => Left(ppError(msg, in.pos))
}
}
def ppError(msg: String, pos: scala.util.parsing.input.Position) =
pos.line +": " + msg + "\\n" + pos.longString
class ParseRegex extends Parser[Lregex] {
def apply(in: Input): ParseResult[Lregex] = {
val source = in.source
val offset = in.offset
val start = handleWhiteSpace(source, offset)
if (start + 2 < source.length) { // #// minimum regex
val s = source.subSequence(start, start+2)
if (s != "#/")
return Failure("`#/' expected but "+s+" found", in.drop(start - offset))
var prev = ' '
var i = start + 2
val sb = new StringBuilder
while (i < source.length) {
val ch = source.charAt(i)
if (ch == '/') {
if (prev != '\\\\') {
return Success(Lregex(sb.toString), in.drop(i + 1 - offset))
}
else {
sb.deleteCharAt(sb.length - 1) // drop escape
}
}
sb.append(ch)
prev = ch
i += 1
}
}
Failure("`/' expected but end of source found", in.drop(start - offset))
}
}
def parseRegex(text: String) = {
parseAll(new ParseRegex, text) match {
case Success(r, _) => r
case NoSuccess(msg, in) => throw new RuntimeException(ppError(msg, in.pos))
}
}
class ParserChar extends Parser[Lchar] {
def apply(in: Input): ParseResult[Lchar] = {
val source = in.source
val offset = in.offset
val start = handleWhiteSpace(source, offset)
if (start + 2 >= source.length) // #\\a minimum char
return Failure("char expected but end of source found", in.drop(start - offset))
val s = source.subSequence(start, start+2)
if (s != "#\\\\")
return Failure("`#\\\\' expected but "+s+" found", in.drop(start - offset))
var i = start + 2
val sb = new StringBuilder
var ok = true
while (ok && (i < source.length)) {
val ch = source.charAt(i)
if (ch == ' ')
ok = false
else if (sb.length > 0 && ch == ')')
ok = false
else {
sb.append(ch)
i += 1
}
}
val tin = in.drop(i - offset)
if (sb.length == 1) {
return Success(Lchar(sb.charAt(0)), tin)
}
else {
sb.toString match {
case "Space" =>
return Success(Lchar(' '), tin)
case "Newline" | "Linefeed" =>
return Success(Lchar('\\n'), tin)
case "Return" =>
return Success(Lchar('\\r'), tin)
case "Tab" =>
return Success(Lchar('\\t'), tin)
case "Backspace" =>
return Success(Lchar('\\b'), tin)
case "Page" =>
return Success(Lchar('\\f'), tin)
case _ =>
return Failure("char expected but "+s+" found", in.drop(start - offset))
}
}
}
}
lazy val isnumber = """-?(\\d+(\\.\\d*)?|\\d*\\.\\d+)([eE][+-]?\\d+)?[fFdD]?""".r
lazy val isinteger = """-?\\d+""".r
//def isKeyword(s: String) = s.head == ':' match {
// case false => None
// case true => Lkeyword(Symbol(s.substring(1)))
//}
def isNumber(s: String): Option[Lcommon] = {
//ru.listok.log("isnumber " + s + " m=" + isnumber.pattern.matcher(s).matches())
if (isnumber.pattern.matcher(s).matches()) {
if (isinteger.pattern.matcher(s).matches()) {
if (s.length < 10) // Int.MaxLength = 2147483647, length = 10
Some(Lint(s.toInt))
else if (s.length < 19) // Long.MaxLength = 9223372036854775807, length = 19
Some(builtin.Numbers.toLnumeric(s.toLong))
else
Some(builtin.Numbers.toLnumeric(BigInt(s)))
}
else
Some(Lfloat(s.toDouble))
}
else
None
}
}
|
kolyvan/listok
|
src/main/scala/parser.scala
|
Scala
|
lgpl-3.0
| 8,493 |
package edu.rice.habanero.benchmarks.concdict
import java.util
import akka.actor.{ActorRef, Props}
import edu.rice.habanero.actors.{AkkaActor, AkkaActorState}
import edu.rice.habanero.benchmarks.concdict.DictionaryConfig.{DoWorkMessage, EndWorkMessage, ReadMessage, WriteMessage}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object DictionaryAkkaActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new DictionaryAkkaActorBenchmark)
}
private final class DictionaryAkkaActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
DictionaryConfig.parseArgs(args)
}
def printArgInfo() {
DictionaryConfig.printArgs()
}
def runIteration() {
val numWorkers: Int = DictionaryConfig.NUM_ENTITIES
val numMessagesPerWorker: Int = DictionaryConfig.NUM_MSGS_PER_WORKER
val system = AkkaActorState.newActorSystem("Dictionary")
val master = system.actorOf(Props(new Master(numWorkers, numMessagesPerWorker)))
AkkaActorState.startActor(master)
AkkaActorState.awaitTermination(system)
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
}
private class Master(numWorkers: Int, numMessagesPerWorker: Int) extends AkkaActor[AnyRef] {
private final val workers = new Array[ActorRef](numWorkers)
private final val dictionary = context.system.actorOf(Props(new Dictionary(DictionaryConfig.DATA_MAP)))
private var numWorkersTerminated: Int = 0
override def onPostStart() {
AkkaActorState.startActor(dictionary)
var i: Int = 0
while (i < numWorkers) {
workers(i) = context.system.actorOf(Props(new Worker(self, dictionary, i, numMessagesPerWorker)))
AkkaActorState.startActor(workers(i))
workers(i) ! DoWorkMessage.ONLY
i += 1
}
}
override def process(msg: AnyRef) {
if (msg.isInstanceOf[DictionaryConfig.EndWorkMessage]) {
numWorkersTerminated += 1
if (numWorkersTerminated == numWorkers) {
dictionary ! EndWorkMessage.ONLY
exit()
}
}
}
}
private class Worker(master: ActorRef, dictionary: ActorRef, id: Int, numMessagesPerWorker: Int) extends AkkaActor[AnyRef] {
private final val writePercent = DictionaryConfig.WRITE_PERCENTAGE
private var messageCount: Int = 0
private final val random = new util.Random(id + numMessagesPerWorker + writePercent)
override def process(msg: AnyRef) {
messageCount += 1
if (messageCount <= numMessagesPerWorker) {
val anInt: Int = random.nextInt(100)
if (anInt < writePercent) {
dictionary ! new WriteMessage(self, random.nextInt, random.nextInt)
} else {
dictionary ! new ReadMessage(self, random.nextInt)
}
} else {
master ! EndWorkMessage.ONLY
exit()
}
}
}
private class Dictionary(initialState: util.Map[Integer, Integer]) extends AkkaActor[AnyRef] {
private[concdict] final val dataMap = new util.HashMap[Integer, Integer](initialState)
override def process(msg: AnyRef) {
msg match {
case writeMessage: DictionaryConfig.WriteMessage =>
val key = writeMessage.key
val value = writeMessage.value
dataMap.put(key, value)
val sender = writeMessage.sender.asInstanceOf[ActorRef]
sender ! new DictionaryConfig.ResultMessage(self, value)
case readMessage: DictionaryConfig.ReadMessage =>
val value = dataMap.get(readMessage.key)
val sender = readMessage.sender.asInstanceOf[ActorRef]
sender ! new DictionaryConfig.ResultMessage(self, value)
case _: DictionaryConfig.EndWorkMessage =>
printf(BenchmarkRunner.argOutputFormat, "Dictionary Size", dataMap.size)
exit()
case _ =>
System.err.println("Unsupported message: " + msg)
}
}
}
}
|
shamsmahmood/savina
|
src/main/scala/edu/rice/habanero/benchmarks/concdict/DictionaryAkkaActorBenchmark.scala
|
Scala
|
gpl-2.0
| 4,066 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import java.util.Locale
/**
* Builds a map in which keys are case insensitive. Input map can be accessed for cases where
* case-sensitive information is required. The primary constructor is marked private to avoid
* nested case-insensitive map creation, otherwise the keys in the original map will become
* case-insensitive in this scenario.
* Note: CaseInsensitiveMap is serializable. However, after transformation, e.g. `filterKeys()`,
* it may become not serializable.
*/
class CaseInsensitiveMap[T] private (val originalMap: Map[String, T]) extends Map[String, T]
with Serializable {
val keyLowerCasedMap = originalMap.map(kv => kv.copy(_1 = kv._1.toLowerCase(Locale.ROOT)))
override def get(k: String): Option[T] = keyLowerCasedMap.get(k.toLowerCase(Locale.ROOT))
override def contains(k: String): Boolean =
keyLowerCasedMap.contains(k.toLowerCase(Locale.ROOT))
override def +[B1 >: T](kv: (String, B1)): Map[String, B1] = {
new CaseInsensitiveMap(originalMap + kv)
}
override def iterator: Iterator[(String, T)] = keyLowerCasedMap.iterator
override def -(key: String): Map[String, T] = {
new CaseInsensitiveMap(originalMap.filter(!_._1.equalsIgnoreCase(key)))
}
}
object CaseInsensitiveMap {
def apply[T](params: Map[String, T]): CaseInsensitiveMap[T] = params match {
case caseSensitiveMap: CaseInsensitiveMap[T] => caseSensitiveMap
case _ => new CaseInsensitiveMap(params)
}
}
|
WindCanDie/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala
|
Scala
|
apache-2.0
| 2,295 |
package skuber.api.client.impl
import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext}
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import com.typesafe.config.{Config, ConfigFactory}
import javax.net.ssl.SSLContext
import play.api.libs.json.{Format, Writes, Reads}
import skuber._
import skuber.api.client.exec.PodExecImpl
import skuber.api.client.{K8SException => _, _}
import skuber.api.security.{HTTPRequestAuth, TLS}
import skuber.api.watch.{LongPollingPool, Watch, WatchSource}
import skuber.json.PlayJsonSupportForAkkaHttp._
import skuber.json.format.apiobj.statusReads
import skuber.json.format.{apiVersionsFormat, deleteOptionsFmt, namespaceListFmt}
import skuber.api.patch._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}
/**
* @author David O'Riordan
* This class implements the KubernetesClient API. It uses the Akka HTTP client to handle the requests to
* the Kubernetes API server.
*/
class KubernetesClientImpl private[client] (
val requestMaker: (Uri, HttpMethod) => HttpRequest, // builds the requests to send
override val clusterServer: String, // the url of the target cluster Kubernetes API server
val requestAuth: AuthInfo, // specifies the authentication (if any) to be added to requests
override val namespaceName: String, // by default requests will target this specified namespace on the cluster
val watchContinuouslyRequestTimeout: Duration,
val watchContinuouslyIdleTimeout: Duration,
val watchPoolIdleTimeout: Duration,
val watchSettings: ConnectionPoolSettings,
val podLogSettings: ConnectionPoolSettings,
val sslContext: Option[SSLContext], // provides the Akka client with the SSL details needed for https connections to the API server
override val logConfig: LoggingConfig,
val closeHook: Option[() => Unit])(implicit val actorSystem: ActorSystem, val executionContext: ExecutionContext)
extends KubernetesClient
{
val log = Logging.getLogger(actorSystem, "skuber.api")
val connectionContext = sslContext
.map { ssl =>
ConnectionContext.https(ssl, enabledProtocols = Some(scala.collection.immutable.Seq("TLSv1.2", "TLSv1")))
}
.getOrElse(Http().defaultClientHttpsContext)
private val clusterServerUri = Uri(clusterServer)
private var isClosed = false
private[skuber] def invokeWatch(request: HttpRequest)(implicit lc: LoggingContext): Future[HttpResponse] = invoke(request, watchSettings)
private[skuber] def invokeLog(request: HttpRequest)(implicit lc: LoggingContext): Future[HttpResponse] = invoke(request, podLogSettings)
private[skuber] def invoke(request: HttpRequest, settings: ConnectionPoolSettings = ConnectionPoolSettings(actorSystem))(implicit lc: LoggingContext): Future[HttpResponse] = {
if (isClosed) {
logError("Attempt was made to invoke request on closed API request context")
throw new IllegalStateException("Request context has been closed")
}
logInfo(logConfig.logRequestBasic, s"about to send HTTP request: ${request.method.value} ${request.uri.toString}")
val responseFut = Http().singleRequest(request, settings = settings, connectionContext = connectionContext)
responseFut onComplete {
case Success(response) => logInfo(logConfig.logResponseBasic,s"received response with HTTP status ${response.status.intValue()}")
case Failure(ex) => logError("HTTP request resulted in an unexpected exception",ex)
}
responseFut
}
private[skuber] def buildRequest[T <: TypeMeta](
method: HttpMethod,
rd: ResourceDefinition[_],
nameComponent: Option[String],
query: Option[Uri.Query] = None,
namespace: String = namespaceName): HttpRequest =
{
val nsPathComponent = if (rd.spec.scope == ResourceSpecification.Scope.Namespaced) {
Some("namespaces/" + namespace)
} else {
None
}
val k8sUrlOptionalParts = List(
clusterServer,
rd.spec.apiPathPrefix,
rd.spec.group,
rd.spec.defaultVersion,
nsPathComponent,
rd.spec.names.plural,
nameComponent)
val k8sUrlParts = k8sUrlOptionalParts collect {
case p: String if p != "" => p
case Some(p: String) if p != "" => p
}
val k8sUrlStr = k8sUrlParts.mkString("/")
val uri = query.map { q =>
Uri(k8sUrlStr).withQuery(q)
}.getOrElse {
Uri(k8sUrlStr)
}
val req = requestMaker(uri, method)
HTTPRequestAuth.addAuth(req, requestAuth)
}
private[skuber] def logInfo(enabledLogEvent: Boolean, msg: => String)(implicit lc: LoggingContext) =
{
if (log.isInfoEnabled && enabledLogEvent) {
log.info(s"[ ${lc.output} - ${msg}]")
}
}
private[skuber] def logInfoOpt(enabledLogEvent: Boolean, msgOpt: => Option[String])(implicit lc: LoggingContext) =
{
if (log.isInfoEnabled && enabledLogEvent) {
msgOpt foreach { msg =>
log.info(s"[ ${lc.output} - ${msg}]")
}
}
}
private[skuber] def logWarn(msg: String)(implicit lc: LoggingContext) =
{
log.error(s"[ ${lc.output} - $msg ]")
}
private[skuber] def logError(msg: String)(implicit lc: LoggingContext) =
{
log.error(s"[ ${lc.output} - $msg ]")
}
private[skuber] def logError(msg: String, ex: Throwable)(implicit lc: LoggingContext) =
{
log.error(ex, s"[ ${lc.output} - $msg ]")
}
private[skuber] def logDebug(msg : => String)(implicit lc: LoggingContext) = {
if (log.isDebugEnabled)
log.debug(s"[ ${lc.output} - $msg ]")
}
private[skuber] def logRequestObjectDetails[O <: ObjectResource](method: HttpMethod,resource: O)(implicit lc: LoggingContext) = {
logInfoOpt(logConfig.logRequestBasicMetadata, {
val name = resource.name
val version = resource.metadata.resourceVersion
method match {
case HttpMethods.PUT | HttpMethods.PATCH => Some(s"Requesting update of resource: { name:$name, version:$version ... }")
case HttpMethods.POST => Some(s"Requesting creation of resource: { name: $name ...}")
case _ => None
}
}
)
logInfo(logConfig.logRequestFullObjectResource, s" Marshal and send: ${resource.toString}")
}
private[skuber] def logReceivedObjectDetails[O <: ObjectResource](resource: O)(implicit lc: LoggingContext) =
{
logInfo(logConfig.logResponseBasicMetadata, s" resource: { kind:${resource.kind} name:${resource.name} version:${resource.metadata.resourceVersion} ... }")
logInfo(logConfig.logResponseFullObjectResource, s" received and parsed: ${resource.toString}")
}
private[skuber] def logReceivedListDetails[L <: ListResource[_]](result: L)(implicit lc: LoggingContext) =
{
logInfo(logConfig.logResponseBasicMetadata,s"received list resource of kind ${result.kind}")
logInfo(logConfig.logResponseListSize,s"number of items in received list resource: ${result.items.size}")
logInfo(logConfig.logResponseListNames, s"received ${result.kind} contains item(s): ${result.itemNames}]")
logInfo(logConfig.logResponseFullListResource, s" Unmarshalled list resource: ${result.toString}")
}
private[skuber] def makeRequestReturningObjectResource[O <: ObjectResource](httpRequest: HttpRequest)(
implicit fmt: Format[O], lc: LoggingContext): Future[O] =
{
for {
httpResponse <- invoke(httpRequest)
result <- toKubernetesResponse[O](httpResponse)
_ = logReceivedObjectDetails(result)
} yield result
}
private[skuber] def makeRequestReturningListResource[L <: ListResource[_]](httpRequest: HttpRequest)(
implicit fmt: Format[L], lc: LoggingContext): Future[L] =
{
for {
httpResponse <- invoke(httpRequest)
result <- toKubernetesResponse[L](httpResponse)
_ = logReceivedListDetails(result)
} yield result
}
/**
* Modify the specified K8S resource using a given HTTP method. The modified resource is returned.
* The create, update and partiallyUpdate methods all call this, just passing different HTTP methods
*/
private[skuber] def modify[O <: ObjectResource](method: HttpMethod)(obj: O)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] =
{
// if this is a POST we don't include the resource name in the URL
val nameComponent: Option[String] = method match {
case HttpMethods.POST => None
case _ => Some(obj.name)
}
modify(method, obj, nameComponent)
}
private[skuber] def modify[O <: ObjectResource](method: HttpMethod, obj: O, nameComponent: Option[String])(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] =
{
// Namespace set in the object metadata (if set) has higher priority than that of the
// request context (see Issue #204)
val targetNamespace = if (obj.metadata.namespace.isEmpty) namespaceName else obj.metadata.namespace
logRequestObjectDetails(method, obj)
val marshal = Marshal(obj)
for {
requestEntity <- marshal.to[RequestEntity]
httpRequest = buildRequest(method, rd, nameComponent, namespace = targetNamespace)
.withEntity(requestEntity.withContentType(MediaTypes.`application/json`))
newOrUpdatedResource <- makeRequestReturningObjectResource[O](httpRequest)
} yield newOrUpdatedResource
}
override def create[O <: ObjectResource](obj: O)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] =
{
modify(HttpMethods.POST)(obj)
}
override def update[O <: ObjectResource](obj: O)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] =
{
modify(HttpMethods.PUT)(obj)
}
override def updateStatus[O <: ObjectResource](obj: O)(implicit
fmt: Format[O],
rd: ResourceDefinition[O],
statusEv: HasStatusSubresource[O],
lc: LoggingContext): Future[O] =
{
val statusSubresourcePath=s"${obj.name}/status"
modify(HttpMethods.PUT,obj,Some(statusSubresourcePath))
}
override def getStatus[O <: ObjectResource](name: String)(implicit
fmt: Format[O],
rd: ResourceDefinition[O],
statusEv: HasStatusSubresource[O],
lc: LoggingContext): Future[O] =
{
_get[O](s"${name}/status")
}
override def getNamespaceNames(implicit lc: LoggingContext): Future[List[String]] =
{
list[NamespaceList].map { namespaceList =>
val namespaces = namespaceList.items
namespaces.map(_.name)
}
}
/*
* List by namespace returns a map of namespace (specified by name e.g. "default", "kube-sys") to the list of objects
* of the specified kind in said namespace. All namespaces in the cluster are included in the map.
* For example, it can be used to get a single list of all objects of the given kind across the whole cluster
* e.g. val allPodsInCluster: Future[List[Pod]] = listByNamespace[Pod] map { _.values.flatMap(_.items) }
* which supports the feature requested in issue #20
*/
override def listByNamespace[L <: ListResource[_]]()(
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[Map[String, L]] =
{
listByNamespace[L](rd)
}
private def listByNamespace[L <: ListResource[_]](rd: ResourceDefinition[_])
(implicit fmt: Format[L], lc: LoggingContext): Future[Map[String, L]] =
{
val nsNamesFut: Future[List[String]] = getNamespaceNames
val tuplesFut: Future[List[(String, L)]] = nsNamesFut flatMap { nsNames: List[String] =>
Future.sequence(nsNames map { (nsName: String) =>
listInNamespace[L](nsName, rd) map { l => (nsName, l) }
})
}
tuplesFut map {
_.toMap[String, L]
}
}
/*
* List all objects of given kind in the specified namespace on the cluster
*/
override def listInNamespace[L <: ListResource[_]](theNamespace: String)(
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] =
{
listInNamespace[L](theNamespace, rd)
}
private def listInNamespace[L <: ListResource[_]](theNamespace: String, rd: ResourceDefinition[_])(
implicit fmt: Format[L], lc: LoggingContext): Future[L] =
{
val req = buildRequest(HttpMethods.GET, rd, None, namespace = theNamespace)
makeRequestReturningListResource[L](req)
}
/*
* List objects of specific resource kind in current namespace
*/
override def list[L <: ListResource[_]]()(
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] =
{
_list[L](rd, None)
}
/*
* Retrieve the list of objects of given type in the current namespace that match the supplied label selector
*/
override def listSelected[L <: ListResource[_]](labelSelector: LabelSelector)(
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] =
{
_list[L](rd, Some(ListOptions(labelSelector=Some(labelSelector))))
}
override def listWithOptions[L <: ListResource[_]](options: ListOptions)(
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] =
{
_list[L](rd, Some(options))
}
private def _list[L <: ListResource[_]](rd: ResourceDefinition[_], maybeOptions: Option[ListOptions])(
implicit fmt: Format[L], lc: LoggingContext): Future[L] =
{
val queryOpt = maybeOptions map { opts =>
Uri.Query(opts.asMap)
}
if (log.isDebugEnabled) {
val optsInfo = maybeOptions map { opts => s" with options '${opts.asMap.toString}'" } getOrElse ""
logDebug(s"[List request: resources of kind '${rd.spec.names.kind}'${optsInfo}")
}
val req = buildRequest(HttpMethods.GET, rd, None, query = queryOpt)
makeRequestReturningListResource[L](req)
}
override def getOption[O <: ObjectResource](name: String)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Option[O]] =
{
_get[O](name) map { result =>
Some(result)
} recover {
case ex: K8SException if ex.status.code.contains(StatusCodes.NotFound.intValue) => None
}
}
override def get[O <: ObjectResource](name: String)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] =
{
_get[O](name)
}
override def getInNamespace[O <: ObjectResource](name: String, namespace: String)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] =
{
_get[O](name, namespace)
}
private[api] def _get[O <: ObjectResource](name: String, namespace: String = namespaceName)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] =
{
val req = buildRequest(HttpMethods.GET, rd, Some(name), namespace = namespace)
makeRequestReturningObjectResource[O](req)
}
override def delete[O <: ObjectResource](name: String, gracePeriodSeconds: Int = -1)(
implicit rd: ResourceDefinition[O], lc: LoggingContext): Future[Unit] =
{
val grace=if (gracePeriodSeconds >= 0) Some(gracePeriodSeconds) else None
val options = DeleteOptions(gracePeriodSeconds = grace)
deleteWithOptions[O](name, options)
}
override def deleteWithOptions[O <: ObjectResource](name: String, options: DeleteOptions)(
implicit rd: ResourceDefinition[O], lc: LoggingContext): Future[Unit] =
{
val marshalledOptions = Marshal(options)
for {
requestEntity <- marshalledOptions.to[RequestEntity]
request = buildRequest(HttpMethods.DELETE, rd, Some(name))
.withEntity(requestEntity.withContentType(MediaTypes.`application/json`))
response <- invoke(request)
_ <- checkResponseStatus(response)
_ <- ignoreResponseBody(response)
} yield ()
}
override def deleteAll[L <: ListResource[_]]()(
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] =
{
_deleteAll[L](rd, None)
}
override def deleteAllSelected[L <: ListResource[_]](labelSelector: LabelSelector)(
implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] =
{
_deleteAll[L](rd, Some(labelSelector))
}
private def _deleteAll[L <: ListResource[_]](rd: ResourceDefinition[_], maybeLabelSelector: Option[LabelSelector])(
implicit fmt: Format[L], lc: LoggingContext): Future[L] =
{
val queryOpt = maybeLabelSelector map { ls =>
Uri.Query("labelSelector" -> ls.toString)
}
if (log.isDebugEnabled) {
val lsInfo = maybeLabelSelector map { ls => s" with label selector '${ls.toString}'" } getOrElse ""
logDebug(s"[Delete request: resources of kind '${rd.spec.names.kind}'${lsInfo}")
}
val req = buildRequest(HttpMethods.DELETE, rd, None, query = queryOpt)
makeRequestReturningListResource[L](req)
}
override def getPodLogSource(name: String, queryParams: Pod.LogQueryParams, namespace: Option[String] = None)(
implicit lc: LoggingContext): Future[Source[ByteString, _]] =
{
val targetNamespace=namespace.getOrElse(this.namespaceName)
val queryMap=queryParams.asMap
val query: Option[Uri.Query] = if (queryMap.isEmpty) {
None
} else {
Some(Uri.Query(queryMap))
}
val nameComponent=s"${name}/log"
val rd = implicitly[ResourceDefinition[Pod]]
val request = buildRequest(HttpMethods.GET, rd, Some(nameComponent), query, targetNamespace)
invokeLog(request).flatMap { response =>
val statusOptFut = checkResponseStatus(response)
statusOptFut map {
case Some(status) =>
throw new K8SException(status)
case _ =>
response.entity.dataBytes
}
}
}
// The Watch methods place a Watch on the specified resource on the Kubernetes cluster.
// The methods return Akka streams sources that will reactively emit a stream of updated
// values of the watched resources.
override def watch[O <: ObjectResource](obj: O)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] =
{
watch(obj.name)
}
// The Watch methods place a Watch on the specified resource on the Kubernetes cluster.
// The methods return Akka streams sources that will reactively emit a stream of updated
// values of the watched resources.
override def watch[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] =
{
Watch.events(this, name, sinceResourceVersion, bufSize)
}
// watch events on all objects of specified kind in current namespace
override def watchAll[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] =
{
Watch.eventsOnKind[O](this, sinceResourceVersion, bufSize)
}
override def watchContinuously[O <: ObjectResource](obj: O)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] =
{
watchContinuously(obj.name)
}
override def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] =
{
val options=ListOptions(resourceVersion = sinceResourceVersion, timeoutSeconds = Some(watchContinuouslyRequestTimeout.toSeconds) )
WatchSource(this, buildLongPollingPool(), Some(name), options, bufSize)
}
override def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] =
{
val options=ListOptions(resourceVersion = sinceResourceVersion, timeoutSeconds = Some(watchContinuouslyRequestTimeout.toSeconds))
WatchSource(this, buildLongPollingPool(), None, options, bufSize)
}
override def watchWithOptions[O <: skuber.ObjectResource](options: ListOptions, bufsize: Int = 10000)(
implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] =
{
WatchSource(this, buildLongPollingPool(), None, options, bufsize)
}
private def buildLongPollingPool[O <: ObjectResource]() = {
LongPollingPool[WatchSource.Start[O]](
clusterServerUri.scheme,
clusterServerUri.authority.host.address(),
clusterServerUri.effectivePort,
watchPoolIdleTimeout,
sslContext.map(new HttpsConnectionContext(_)),
ClientConnectionSettings(actorSystem.settings.config).withIdleTimeout(watchContinuouslyIdleTimeout)
)
}
// Operations on scale subresource
// Scale subresource Only exists for certain resource types like RC, RS, Deployment, StatefulSet so only those types
// define an implicit Scale.SubresourceSpec, which is required to be passed to these methods.
override def getScale[O <: ObjectResource](objName: String)(
implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext) : Future[Scale] =
{
val req = buildRequest(HttpMethods.GET, rd, Some(objName+ "/scale"))
makeRequestReturningObjectResource[Scale](req)
}
@deprecated("use getScale followed by updateScale instead")
override def scale[O <: ObjectResource](objName: String, count: Int)(
implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): Future[Scale] =
{
val scale = Scale(
apiVersion = sc.apiVersion,
metadata = ObjectMeta(name = objName, namespace = namespaceName),
spec = Scale.Spec(replicas = Some(count))
)
updateScale[O](objName, scale)
}
override def updateScale[O <: ObjectResource](objName: String, scale: Scale)(
implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc:LoggingContext): Future[Scale] =
{
implicit val dispatcher = actorSystem.dispatcher
val marshal = Marshal(scale)
for {
requestEntity <- marshal.to[RequestEntity]
httpRequest = buildRequest(HttpMethods.PUT, rd, Some(s"${objName}/scale"))
.withEntity(requestEntity.withContentType(MediaTypes.`application/json`))
scaledResource <- makeRequestReturningObjectResource[Scale](httpRequest)
} yield scaledResource
}
override def patch[P <: Patch, O <: ObjectResource](name: String, patchData: P, namespace: Option[String] = None)
(implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): Future[O] = {
val targetNamespace = namespace.getOrElse(namespaceName)
val contentType = patchData.strategy match {
case StrategicMergePatchStrategy =>
CustomMediaTypes.`application/strategic-merge-patch+json`
case JsonMergePatchStrategy =>
CustomMediaTypes.`application/merge-patch+json`
case JsonPatchStrategy =>
MediaTypes.`application/json-patch+json`
}
logInfo(logConfig.logRequestBasicMetadata, s"Requesting patch of resource: { name:$name ... }")
logInfo(logConfig.logRequestFullObjectResource, s" Marshal and send: ${patchData.toString}")
val marshal = Marshal(patchData)
for {
requestEntity <- marshal.to[RequestEntity]
httpRequest = buildRequest(HttpMethods.PATCH, rd, Some(name), namespace = targetNamespace)
.withEntity(requestEntity.withContentType(contentType))
newOrUpdatedResource <- makeRequestReturningObjectResource[O](httpRequest)
} yield newOrUpdatedResource
}
/**
* Perform a Json merge patch on a resource
* The patch is passed a String type which should contain the JSON patch formatted per https://tools.ietf.org/html/rfc7386
* It is a String type instead of a JSON object in order to allow clients to use their own favourite JSON library to create the
* patch, or alternatively to simply manually craft the JSON and insert it into a String. Also patches are generally expected to be
* relatively small, so storing the whole patch in memory should not be problematic.
* It is thus the responsibility of the client to ensure that the `patch` parameter contains a valid JSON merge patch entity for the
* targetted Kubernetes resource `obj`
* @param obj The resource to update with the patch
* @param patch A string containing the JSON patch entity
* @return The patched resource (in a Future)
*/
override def jsonMergePatch[O <: ObjectResource](obj: O, patch: String)(
implicit rd: ResourceDefinition[O], fmt: Format[O], lc:LoggingContext): Future[O] =
{
val patchRequestEntity = HttpEntity.Strict(`application/merge-patch+json`, ByteString(patch))
val httpRequest = buildRequest(HttpMethods.PATCH, rd, Some(obj.name)).withEntity(patchRequestEntity)
makeRequestReturningObjectResource[O](httpRequest)
}
// get API versions supported by the cluster
override def getServerAPIVersions(implicit lc: LoggingContext): Future[List[String]] = {
val url = clusterServer + "/api"
val noAuthReq = requestMaker(Uri(url), HttpMethods.GET)
val request = HTTPRequestAuth.addAuth(noAuthReq, requestAuth)
for {
response <- invoke(request)
apiVersionResource <- toKubernetesResponse[APIVersions](response)
} yield apiVersionResource.versions
}
/*
* Execute a command in a pod
*/
override def exec(
podName: String,
command: Seq[String],
maybeContainerName: Option[String] = None,
maybeStdin: Option[Source[String, _]] = None,
maybeStdout: Option[Sink[String, _]] = None,
maybeStderr: Option[Sink[String, _]] = None,
tty: Boolean = false,
maybeClose: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): Future[Unit] =
{
PodExecImpl.exec(this, podName, command, maybeContainerName, maybeStdin, maybeStdout, maybeStderr, tty, maybeClose)
}
override def close: Unit =
{
isClosed = true
closeHook foreach {
_ ()
} // invoke the specified close hook if specified
}
/*
* Lightweight switching of namespace for applications that need to access multiple namespaces on same cluster
* and using same credentials and other configuration.
*/
override def usingNamespace(newNamespace: String): KubernetesClientImpl =
new KubernetesClientImpl(requestMaker, clusterServer, requestAuth,
newNamespace, watchContinuouslyRequestTimeout, watchContinuouslyIdleTimeout,
watchPoolIdleTimeout, watchSettings, podLogSettings, sslContext, logConfig, closeHook
)
private[skuber] def toKubernetesResponse[T](response: HttpResponse)(implicit reader: Reads[T], lc: LoggingContext): Future[T] =
{
val statusOptFut = checkResponseStatus(response)
statusOptFut flatMap {
case Some(status) =>
throw new K8SException(status)
case None =>
try {
Unmarshal(response).to[T]
}
catch {
case ex: Exception =>
logError("Unable to unmarshal resource from response", ex)
throw new K8SException(Status(message = Some("Error unmarshalling resource from response"), details = Some(ex.getMessage)))
}
}
}
// check for non-OK status, returning (in a Future) some Status object if not ok or otherwise None
private[skuber] def checkResponseStatus(response: HttpResponse)(implicit lc: LoggingContext): Future[Option[Status]] =
{
response.status.intValue match {
case code if code < 300 =>
Future.successful(None)
case code =>
// a non-success or unexpected status returned - we should normally have a Status in the response body
val statusFut: Future[Status] = Unmarshal(response).to[Status]
statusFut map { status =>
if (log.isInfoEnabled)
log.info(s"[Response: non-ok status returned - $status")
Some(status)
} recover { case ex =>
if (log.isErrorEnabled)
log.error(s"[Response: could not read Status for non-ok response, exception : ${ex.getMessage}]")
val status: Status = Status(
code = Some(response.status.intValue),
message = Some("Non-ok response and unable to parse Status from response body to get further details"),
details = Some(ex.getMessage)
)
Some(status)
}
}
}
/**
* Discards the response
* This is for requests (e.g. delete) for which we normally have no interest in the response body, but Akka Http
* requires us to drain it anyway
* (see https://doc.akka.io/docs/akka-http/current/scala/http/implications-of-streaming-http-entity.html)
* @param response the Http Response that we need to drain
* @return A Future[Unit] that will be set to Success or Failure depending on outcome of draining
*/
private def ignoreResponseBody(response: HttpResponse): Future[Unit] = {
response.discardEntityBytes().future.map(done => ())
}
}
object KubernetesClientImpl {
def apply(k8sContext: Context, logConfig: LoggingConfig, closeHook: Option[() => Unit], appConfig: Config)
(implicit actorSystem: ActorSystem): KubernetesClientImpl =
{
appConfig.checkValid(ConfigFactory.defaultReference(), "skuber")
def getSkuberConfig[T](key: String, fromConfig: String => Option[T], default: T): T = {
val skuberConfigKey = s"skuber.$key"
if (appConfig.getIsNull(skuberConfigKey)) {
default
} else {
fromConfig(skuberConfigKey) match {
case None => default
case Some(t) => t
}
}
}
def dispatcherFromConfig(configKey: String): Option[ExecutionContext] = if (appConfig.getString(configKey).isEmpty) {
None
} else {
Some(actorSystem.dispatchers.lookup(appConfig.getString(configKey)))
}
implicit val dispatcher: ExecutionContext = getSkuberConfig("akka.dispatcher", dispatcherFromConfig, actorSystem.dispatcher)
def durationFomConfig(configKey: String): Option[Duration] = Some(Duration.fromNanos(appConfig.getDuration(configKey).toNanos))
val watchIdleTimeout: Duration = getSkuberConfig("watch.idle-timeout", durationFomConfig, Duration.Inf)
val podLogIdleTimeout: Duration = getSkuberConfig("pod-log.idle-timeout", durationFomConfig, Duration.Inf)
val watchContinuouslyRequestTimeout: Duration = getSkuberConfig("watch-continuously.request-timeout", durationFomConfig, 30.seconds)
val watchContinuouslyIdleTimeout: Duration = getSkuberConfig("watch-continuously.idle-timeout", durationFomConfig, 60.seconds)
val watchPoolIdleTimeout: Duration = getSkuberConfig("watch-continuously.pool-idle-timeout", durationFomConfig, 60.seconds)
//The watch idle timeout needs to be greater than watch api request timeout
require(watchContinuouslyIdleTimeout > watchContinuouslyRequestTimeout)
if (logConfig.logConfiguration) {
val log = Logging.getLogger(actorSystem, "skuber.api")
log.info("Using following context for connecting to Kubernetes cluster: {}", k8sContext)
}
val sslContext = TLS.establishSSLContext(k8sContext)
val theNamespaceName = k8sContext.namespace.name match {
case "" => "default"
case name => name
}
val requestMaker = (uri: Uri, method: HttpMethod) => HttpRequest(method = method, uri = uri)
val defaultClientSettings = ConnectionPoolSettings(actorSystem.settings.config)
val watchConnectionSettings = defaultClientSettings.connectionSettings.withIdleTimeout(watchIdleTimeout)
val watchSettings = defaultClientSettings.withConnectionSettings(watchConnectionSettings)
val podLogConnectionSettings = defaultClientSettings.connectionSettings.withIdleTimeout(podLogIdleTimeout)
val podLogSettings = defaultClientSettings.withConnectionSettings(podLogConnectionSettings)
new KubernetesClientImpl(
requestMaker, k8sContext.cluster.server, k8sContext.authInfo,
theNamespaceName, watchContinuouslyRequestTimeout, watchContinuouslyIdleTimeout,
watchPoolIdleTimeout, watchSettings, podLogSettings, sslContext, logConfig, closeHook
)
}
}
|
doriordan/skuber
|
client/src/main/scala/skuber/api/client/impl/KubernetesClientImpl.scala
|
Scala
|
apache-2.0
| 32,453 |
/*
* Copyright 2016 Guy Van den Broeck and Wannes Meert (UCLA and KU Leuven)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.ucla.cs.starai.forclift.examples.models.mln
import edu.ucla.cs.starai.forclift._
import examples.models._
class OriginalFriendsSmokerModel(
nbPeople: Int,
knownPeople: Seq[String] = Nil,
evidence: Seq[String] = Nil) extends MLNModel {
def theoryString = (
"person = " + (1 to nbPeople).map { "P" + _ }.mkString("{", ", ", "}") + """
Friends(person,person)
Smokes(person)
Cancer(person)
1.4 !Smokes(x)
2.3 !Cancer(x)
4.6 !Friends(x,y)
1.5 Smokes(x) => Cancer(x)
1.1 Friends(x,y) ^ Smokes(x) => Smokes(y)
""" + evidence.map { _ + "." }.mkString("\\n"))
//1000 Friends(x,y) ^ Friends(y,z) => Friends(x,z)
//2 Friends(x,y) ^ Friends(y,z) => Friends(x,z)
}
|
UCLA-StarAI/Forclift
|
src/main/scala/edu/ucla/cs/starai/forclift/examples/models/mln/OriginalFriendsSmokerModel.scala
|
Scala
|
apache-2.0
| 1,324 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.ElasticDsl._
import com.sksamuel.elastic4s.source.StringDocumentSource
import org.elasticsearch.action.ActionListener
import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse
import org.elasticsearch.action.admin.indices.flush.FlushResponse
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse
import org.elasticsearch.action.search.SearchResponse
import org.elasticsearch.client.Client
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.settings.{ImmutableSettings, Settings}
import org.elasticsearch.common.transport.InetSocketTransportAddress
import org.elasticsearch.node.{Node, NodeBuilder}
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.implicitConversions
/** @author Stephen Samuel */
class ElasticClient(val client: org.elasticsearch.client.Client) {
def execute[T, R](t: T)(implicit executable: Executable[T, R]): Future[R] = executable(client, t)
def shutdown: Future[NodesShutdownResponse] = shutdown("_local")
def shutdown(nodeIds: String*): Future[NodesShutdownResponse] = {
injectFuture[NodesShutdownResponse](java.admin.cluster.prepareNodesShutdown(nodeIds: _*).execute)
}
@deprecated("deprecated in favour of: client.execute { index exists <index> }", "1.5.5")
def exists(indexes: String*): Future[IndicesExistsResponse] =
injectFuture[IndicesExistsResponse](client.admin.indices.prepareExists(indexes: _*).execute)
@deprecated("deprecated in favour of: client.execute { types exist <types> in <index>> }", "1.5.5")
def typesExist(indices: String*)(types: String*): Future[TypesExistsResponse] =
injectFuture[TypesExistsResponse](client.admin.indices.prepareTypesExists(indices: _*).setTypes(types: _*).execute)
@deprecated("deprecated in favour of: client.execute { search scroll <id> }", "1.5.5")
def searchScroll(scrollId: String) =
injectFuture[SearchResponse](client.prepareSearchScroll(scrollId).execute)
@deprecated("deprecated in favour of: client.execute { search scroll <id> }", "1.5.5")
def searchScroll(scrollId: String, keepAlive: String) =
injectFuture[SearchResponse](client.prepareSearchScroll(scrollId).setScroll(keepAlive).execute)
@deprecated("deprecated in favour of: client.execute { flush index <index> }", "1.5.5")
def flush(indexes: String*): Future[FlushResponse] =
injectFuture[FlushResponse](client.admin.indices.prepareFlush(indexes: _*).execute)
@deprecated("deprecated in favour of: client.execute { refresh index <index> }", "1.5.5")
def refresh(indexes: String*): Future[RefreshResponse] =
injectFuture[RefreshResponse](client.admin.indices.prepareRefresh(indexes: _*).execute)
@deprecated("deprecated in favour of the typeclass approach; use client.execute { open index <index> }", "1.5.5")
def open(index: String): Future[OpenIndexResponse] =
injectFuture[OpenIndexResponse](client.admin.indices.prepareOpen(index).execute)
def close(): Unit = client.close()
@deprecated("deprecated in favour of the typeclass approach; use client.execute { close index <index> }", "1.5.5")
def close(index: String): Future[CloseIndexResponse] =
injectFuture[CloseIndexResponse](client.admin.indices.prepareClose(index).execute)
@deprecated("deprecated in favour of the typeclass approach; use client.execute { get segments <index> }", "1.5.5")
def segments(indexes: String*): Future[IndicesSegmentResponse] =
injectFuture[IndicesSegmentResponse](client.admin.indices.prepareSegments(indexes: _*).execute)
def reindex(sourceIndex: String,
targetIndex: String,
chunkSize: Int = 500,
keepAlive: String = "5m",
preserveId: Boolean = true)(implicit ec: ExecutionContext): Future[Unit] = {
execute {
ElasticDsl.search in sourceIndex limit chunkSize scroll keepAlive searchType SearchType.Scan query matchall
} flatMap { response =>
def _scroll(scrollId: String): Future[Unit] = {
execute {
search scroll scrollId keepAlive keepAlive
} flatMap { response =>
val hits = response.getHits.hits
if (hits.nonEmpty) {
Future
.sequence(hits.map(hit => (hit.`type`, hit.getId, hit.sourceAsString)).grouped(chunkSize).map { pairs =>
execute {
ElasticDsl.bulk(
pairs map {
case (typ, _id, source) =>
val expr = index into targetIndex -> typ
(if (preserveId) expr id _id else expr) doc StringDocumentSource(source)
}: _*
)
}
})
.flatMap(_ => _scroll(response.getScrollId))
} else {
Future.successful(())
}
}
}
val scrollId = response.getScrollId
_scroll(scrollId)
}
}
protected def injectFuture[R](f: ActionListener[R] => Unit): Future[R] = {
val p = Promise[R]()
f(new ActionListener[R] {
def onFailure(e: Throwable): Unit = p.tryFailure(e)
def onResponse(response: R): Unit = p.trySuccess(response)
})
p.future
}
def java = client
def admin = client.admin
@deprecated("Use .await() on future of async client", "1.3.0")
def sync(implicit duration: Duration = 10.seconds) = new SyncClient(this)(duration)
}
object ElasticClient {
def fromClient(client: Client): ElasticClient = new ElasticClient(client)
@deprecated("timeout is no longer needed, it is ignored, use the fromClient(client) method instead", "1.4.2")
def fromClient(client: Client, timeout: Long): ElasticClient = fromClient(client)
def fromNode(node: Node): ElasticClient = fromClient(node.client)
@deprecated("timeout is no longer needed, it is ignored, use the fromNode(client) method instead", "1.4.2")
def fromNode(node: Node, timeout: Long): ElasticClient = fromNode(node)
/** Connect this client to the single remote elasticsearch process.
* Note: Remote means out of process, it can of course be on the local machine.
*/
def remote(host: String, port: Int): ElasticClient = remote(ImmutableSettings.builder.build, host, port)
def remote(settings: Settings, host: String, port: Int): ElasticClient = {
val client = new TransportClient(settings)
client.addTransportAddress(new InetSocketTransportAddress(host, port))
fromClient(client)
}
def remote(uri: ElasticsearchClientUri): ElasticClient = remote(ImmutableSettings.builder.build, uri)
def remote(settings: Settings, uri: ElasticsearchClientUri): ElasticClient = {
val client = new TransportClient(settings)
for ( (host, port) <- uri.hosts ) client.addTransportAddress(new InetSocketTransportAddress(host, port))
fromClient(client)
}
@deprecated("For multiple hosts, prefer the methods that use ElasticsearchClientUri", "1.4.2")
def remote(addresses: (String, Int)*): ElasticClient = remote(ImmutableSettings.builder().build(), addresses: _*)
@deprecated("For multiple hosts, Prefer the methods that use ElasticsearchClientUri", "1.4.2")
def remote(settings: Settings, addresses: (String, Int)*): ElasticClient = {
val client = new TransportClient(settings)
for ( (host, port) <- addresses ) client.addTransportAddress(new InetSocketTransportAddress(host, port))
fromClient(client)
}
def data : ElasticClient = data(ImmutableSettings.builder.build)
def data(settings: Settings): ElasticClient = fromNode(NodeBuilder.nodeBuilder().data(true).settings(settings).node())
def local: ElasticClient = local(ImmutableSettings.settingsBuilder().build())
def local(settings: Settings): ElasticClient = {
fromNode(NodeBuilder.nodeBuilder().local(true).data(true).settings(settings).node())
}
@deprecated("timeout is no longer needed, it is ignored, so you can use the local(client) method instead", "1.4.2")
def local(settings: Settings, timeout: Long): ElasticClient = local(settings)
}
object ElasticsearchClientUri {
private val PREFIX = "elasticsearch://"
implicit def stringtoUri(str: String): ElasticsearchClientUri = ElasticsearchClientUri(str)
def apply(str: String): ElasticsearchClientUri = {
require(str != null && str.trim.nonEmpty, "Invalid uri, must be in format elasticsearch://host:port,host:port,...")
val withoutPrefix = str.replace(PREFIX, "")
val hosts = withoutPrefix.split(',').map { host =>
val parts = host.split(':')
if (parts.length == 2) {
parts(0) -> parts(1).toInt
} else {
throw new IllegalArgumentException("Invalid uri, must be in format elasticsearch://host:port,host:port,...")
}
}
ElasticsearchClientUri(str, hosts.toList)
}
}
case class ElasticsearchClientUri(uri: String, hosts: List[(String, Int)])
|
l15k4/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/ElasticClient.scala
|
Scala
|
apache-2.0
| 9,251 |
package com.github.mdr.mash.compiler
import com.github.mdr.mash.parser.AbstractSyntax._
import com.github.mdr.mash.parser.{ Abstractifier, MashParser, Provenance }
import org.scalatest.{ FlatSpec, Matchers }
class ParenRemoverTest extends FlatSpec with Matchers {
"ParenRemover" should "remove parens" in {
val s = "(foo)"
val expr = parse(s)
val ParenExpr(subExpr, _) = expr
removeParens(expr) should equal(subExpr)
}
private def removeParens(expr: Expr): Expr = ParenRemover.removeParens(Program(None, expr)).body
it should "remove nested parens" in {
val s = "((foo))"
val expr = parse(s)
val ParenExpr(ParenExpr(subExpr, _), _) = expr
removeParens(expr) should equal(subExpr)
}
"foo --bar=(baz)" afterParensRemovedShouldBe "foo --bar=baz"
private implicit class RichString(s: String) {
def afterParensRemovedShouldBe(s2: String) {
"ParenRemover" should s"remove parens from '$s'" in {
val actualExpr = removeSourceInfo(removeParens(parse(s)))
val expectedExpr = removeSourceInfo(parse(s2))
actualExpr should equal(expectedExpr)
}
}
}
private def parse(s: String): Expr = {
val abstractifier = new Abstractifier(Provenance.internal(s))
abstractifier.abstractify(MashParser.parseForgiving(s)).body
}
private def removeSourceInfo(expr: Expr) = expr.transform { case e ⇒ e.withSourceInfoOpt(None) }
}
|
mdr/mash
|
src/test/scala/com/github/mdr/mash/compiler/ParenRemoverTest.scala
|
Scala
|
mit
| 1,419 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.zipkin.storm
import com.twitter.bijection._
import com.twitter.bijection.Codec
import com.twitter.summingbird.batch.BatchID
import com.twitter.bijection.scrooge.BinaryScalaCodec
import com.twitter.zipkin.common.{Annotation, BinaryAnnotation, Span}
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.gen
import com.twitter.algebird.Monoid
object Serialization {
val gen2span = new AbstractBijection[Span, gen.Span] {
override def invert(g: gen.Span) = g.toSpan
def apply(span: Span) = span.toThrift
}
val bytes2genspan = BinaryScalaCodec(gen.Span)
implicit val bytes2spanInj: Injection[Span, Array[Byte]] = bytes2genspan compose gen2span
implicit def kInj[T: Codec]: Injection[(T, BatchID), Array[Byte]] = {
implicit val buf =
Bufferable.viaInjection[(T, BatchID), (Array[Byte], Array[Byte])]
Bufferable.injectionOf[(T, BatchID)]
}
implicit def vInj[V: Codec]: Injection[(BatchID, V), Array[Byte]] =
Injection.connect[(BatchID, V), (V, BatchID), Array[Byte]]
implicit val mapInj: Injection[Map[String, Long], Array[Byte]] =
Bufferable.injectionOf[Map[String, Long]]
implicit val spanMonoid: Monoid[Span] = new Monoid[Span] {
val zero = Span(0, "zero", 0, None, Nil, Nil, true)
val invalid = Span(0, "invalid", 0, None, Nil, Nil, true)
def plus(l: Span, r: Span) = {
if (l == zero || r == invalid) r
else if (r == zero || l == invalid) l
else if (l.id == r.id) l.mergeSpan(r)
else invalid
}
}
}
|
suchang/zipkin
|
zipkin-storm/src/main/scala/com/twitter/zipkin/Serialization.scala
|
Scala
|
apache-2.0
| 2,101 |
var count = 1
/*start*/count += 1/*end*/
//Unit
|
ilinum/intellij-scala
|
testdata/typeInference/statements/AssignWithFunction.scala
|
Scala
|
apache-2.0
| 47 |
package hotpepper4s
/**
* @author ponkotuy
*/
trait CodeName {
import CodeName._
def code: String
def name: String
def toCodeName: CodeNameImpl = CodeNameImpl(code, name)
def toTuple: (String, String) = code -> name
}
object CodeName {
def apply(code: String, name :String): CodeName = CodeNameImpl(code, name)
case class CodeNameImpl(code: String, name: String) extends CodeName
}
|
ponkotuy/hotpepper4s
|
src/main/scala/hotpepper4s/CodeName.scala
|
Scala
|
mit
| 400 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package noop.interpreter;
import collection.mutable.{Map, Stack};
import types.{NoopType, NoopObject};
/**
* A handy little data structure which wraps the stack of scopes that exist on a single stack frame.
* @author [email protected] (Alex Eagle)
*/
class BlockScopes(val scopes: Stack[Map[String, Tuple2[NoopType, NoopObject]]]) {
def inScope(name: String)(f: => Any) {
scopes += Map.empty[String, Tuple2[NoopType, NoopObject]];
try {
f;
} finally {
scopes.pop;
}
}
def registerIdentifier(name: String, value: Tuple2[NoopType, NoopObject]) = {
if (scopes.isEmpty) {
throw new RuntimeException("Cannot declare an identifier unless we are in a scope");
}
if (hasIdentifier(name)) {
throw new RuntimeException("Identifier " + name + " is already declared");
}
scopes.top += Pair(name, value);
}
def setValue(name: String, value: Tuple2[NoopType, NoopObject]): Unit = {
for (identifiers <- scopes.elements) {
if (identifiers.contains(name)) {
identifiers(name) = value;
return;
}
}
throw new RuntimeException("No such identifier to assign: " + name);
}
def hasIdentifier(name: String): Boolean = getIdentifier(name) != null;
def getIdentifier(name: String): Tuple2[NoopType, NoopObject] = {
for (identifiers <- scopes.elements) {
if (identifiers.contains(name)) {
return identifiers(name);
}
}
null;
}
}
|
masterx2/noop
|
interpreter/src/main/scala/noop/interpreter/BlockScopes.scala
|
Scala
|
apache-2.0
| 2,061 |
package models.daos
import scala.Left
import scala.Right
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import javax.inject.Inject
import javax.inject.Singleton
import models.Gallery
import play.api.libs.json.JsArray
import play.api.libs.json.JsBoolean
import play.api.libs.json.JsNull
import play.api.libs.json.JsNumber
import play.api.libs.json.JsObject
import play.api.libs.json.JsString
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import play.api.libs.json.Json.toJsFieldJsValueWrapper
import play.api.libs.json.Writes
import play.modules.reactivemongo.json.JsObjectDocumentWriter
import play.modules.reactivemongo.json.collection.JSONCollection
import reactivemongo.api.DB
import reactivemongo.api.ReadPreference
/**
* @import models.daos.GalleryDAO
* author carlos
*/
@Singleton
class GalleryDAOImp @Inject() (db: DB) extends GalleryDAO{
def collection: JSONCollection = db.collection[JSONCollection]("gallery")
def find(): Future[List[Gallery]] = {
val query = Json.obj()
Some(collection.find(query).cursor[Gallery]().collect[List]()).get
}
def find(id: String): Future[Option[Gallery]] = {
val query = Json.obj("_id" -> id)
collection.find(query).one[Gallery] map {
case Some(x) => Option.apply(x)
case _ => None
}
}
def findByCriteria(criteria: Map[String, Any], limit: Int): Future[Traversable[Gallery]] =
findByCriteria(CriteriaJSONWriter.writes(criteria), limit)
private def findByCriteria(criteria: JsObject, limit: Int): Future[Traversable[Gallery]] =
collection.
find(criteria).
cursor[Gallery](readPreference = ReadPreference.primary).
collect[List](limit)
def add(gall: Gallery): Future[Either[Exception, Gallery]] = {
collection.insert(gall).map {
case le if le.ok == true => Right(gall)
case le => Left(le)
}
}
def remove(gallId: String): Future[Either[Exception, Boolean]] = {
val query = Json.obj("_id" -> gallId)
collection.remove(query, firstMatchOnly = true).map {
case le if le.ok == true => Right(le.ok)
case le => Left(le)
}
}
def update(gall: Gallery): Future[Either[Exception, Gallery]] = {
val query = Json.obj("_id" -> gall._id)
val modifier = Json.obj(
"$set" -> Json.obj(
"_id" -> gall._id,
"galName" -> gall.galName,
"galDesc" -> gall.galDesc,
"galURLSmall" -> gall.galURLSmall,
"galURLLarge" -> gall.galURLLarge))
println(modifier.toString())
collection.update(query, modifier).map {
case le if le.ok == true => Right(gall)
case le => Left(le)
}
}
}
object CriteriaJSONWriter extends Writes[Map[String, Any]] {
override def writes(criteria: Map[String, Any]): JsObject = JsObject(criteria.mapValues(toJsValue(_)).toSeq)
val toJsValue: PartialFunction[Any, JsValue] = {
case v: String => JsString(v)
case v: Int => JsNumber(v)
case v: Long => JsNumber(v)
case v: Double => JsNumber(v)
case v: Boolean => JsBoolean(v)
case obj: JsValue => obj
case map: Map[String, Any] @unchecked => CriteriaJSONWriter.writes(map)
case coll: Traversable[_] => JsArray(coll.map(toJsValue(_)).toSeq)
case null => JsNull
case other => throw new IllegalArgumentException(s"Criteria value type not supported: $other")
}
}
trait GalleryDAO {
def add(gall: Gallery): Future[Either[Exception, Gallery]]
def remove(gallId: String): Future[Either[Exception, Boolean]] //Either -> A common use of Either is as an alternative to scala.Option for dealing with possible missing values. In this usage, scala.None is replaced with a scala.util.Left qual can contain useful information. scala.util.Right takes the place of scala.Some
def find(): Future[List[Gallery]]
def find(id: String): Future[Option[Gallery]]
def findByCriteria(criteria: Map[String, Any], limit: Int): Future[Traversable[Gallery]]
def update(gall: Gallery): Future[Either[Exception, Gallery]]
}
|
carlosFattor/DoceTentacaoScala
|
app/models/daos/GalleryDAOImp.scala
|
Scala
|
apache-2.0
| 4,271 |
package org.scaladebugger.api.profiles.java.info
import com.sun.jdi._
import org.scaladebugger.api.profiles.traits.info._
import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FunSpec, Matchers, ParallelTestExecution}
import test.InfoTestClasses.TestMiscInfoTrait
class JavaFieldInfoSpec extends ParallelMockFunSpec
{
private val mockNewTypeProfile = mockFunction[Type, TypeInfo]
private val mockScalaVirtualMachine = mock[ScalaVirtualMachine]
private val mockInfoProducerProfile = mock[InfoProducer]
private val mockVirtualMachine = mock[VirtualMachine]
private val mockObjectReference = mock[ObjectReference]
private val mockField = mock[Field]
private val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Left(mockObjectReference),
mockField
)(mockVirtualMachine) {
override protected def newTypeProfile(_type: Type): TypeInfo =
mockNewTypeProfile(_type)
}
describe("JavaFieldInfo") {
describe("#toJavaInfo") {
it("should return a new instance of the Java profile representation when wrapping an object reference") {
val expected = mock[FieldVariableInfo]
val offsetIndex = 999
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Left(mockObjectReference),
mockField,
offsetIndex
)(mockVirtualMachine)
// Get Java version of info producer
(mockInfoProducerProfile.toJavaInfo _).expects()
.returning(mockInfoProducerProfile).once()
// Create new info profile using Java version of info producer
// NOTE: Cannot validate second set of args because they are
// call-by-name, which ScalaMock does not support presently
(mockInfoProducerProfile.newFieldInfo(
_: ScalaVirtualMachine,
_: Either[ObjectReference, ReferenceType],
_: Field,
_: Int
)(
_: VirtualMachine
)).expects(
mockScalaVirtualMachine,
Left(mockObjectReference),
mockField,
offsetIndex,
*
).returning(expected).once()
val actual = javaFieldInfoProfile.toJavaInfo
actual should be (expected)
}
it("should return a new instance of the Java profile representation when wrapping a reference type") {
val expected = mock[FieldVariableInfo]
val mockReferenceType = mock[ReferenceType]
val offsetIndex = 999
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Right(mockReferenceType),
mockField,
offsetIndex
)(mockVirtualMachine)
// Get Java version of info producer
(mockInfoProducerProfile.toJavaInfo _).expects()
.returning(mockInfoProducerProfile).once()
// Create new info profile using Java version of info producer
// NOTE: Cannot validate second set of args because they are
// call-by-name, which ScalaMock does not support presently
(mockInfoProducerProfile.newFieldInfo(
_: ScalaVirtualMachine,
_: Either[ObjectReference, ReferenceType],
_: Field,
_: Int
)(
_: VirtualMachine
)).expects(
mockScalaVirtualMachine,
Right(mockReferenceType),
mockField,
offsetIndex,
*
).returning(expected).once()
val actual = javaFieldInfoProfile.toJavaInfo
actual should be (expected)
}
}
describe("#isJavaInfo") {
it("should return true") {
val expected = true
val actual = javaFieldInfoProfile.isJavaInfo
actual should be (expected)
}
}
describe("#toJdiInstance") {
it("should return the JDI instance this profile instance represents") {
val expected = mockField
val actual = javaFieldInfoProfile.toJdiInstance
actual should be (expected)
}
}
describe("#name") {
it("should return the field's name") {
val expected = "someName"
(mockField.name _).expects().returning(expected).once()
val actual = javaFieldInfoProfile.name
actual should be (expected)
}
}
describe("#typeName") {
it("should return the field's type name") {
val expected = "some.type.name"
(mockField.typeName _).expects().returning(expected).once()
val actual = javaFieldInfoProfile.typeName
actual should be (expected)
}
}
describe("#typeInfo") {
it("should should return a new type info profile wrapping the type") {
val expected = mock[TypeInfo]
val mockType = mock[Type]
(mockField.`type` _).expects().returning(mockType).once()
mockNewTypeProfile.expects(mockType)
.returning(expected).once()
val actual = javaFieldInfoProfile.`type`
actual should be (expected)
}
}
describe("#parent") {
it("should return Left(object) if the parent of the field is an object") {
val expected = Left(mock[ObjectInfo])
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Left(mock[ObjectReference]),
mockField
)(mockVirtualMachine) {
override protected def newObjectProfile(
objectReference: ObjectReference
): ObjectInfo = expected.left.get
}
val actual = javaFieldInfoProfile.parent
actual should be (expected)
}
it("should return Right(type) if the parent of the field is a type") {
val expected = Right(mock[ReferenceTypeInfo])
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Right(mock[ReferenceType]),
mockField
)(mockVirtualMachine) {
override protected def newReferenceTypeProfile(
referenceType: ReferenceType
): ReferenceTypeInfo = expected.right.get
}
val actual = javaFieldInfoProfile.parent
actual should be (expected)
}
}
describe("#declaringTypeInfo") {
it("should return a new type info profile wrapping the type that declared this field") {
val expected = mock[ReferenceTypeInfo]
val mockReferenceType = mock[ReferenceType]
(mockField.declaringType _).expects()
.returning(mockReferenceType).once()
(mockInfoProducerProfile.newReferenceTypeInfo _)
.expects(mockScalaVirtualMachine, mockReferenceType)
.returning(expected)
.once()
val actual = javaFieldInfoProfile.declaringType
actual should be (expected)
}
}
describe("#isField") {
it("should return true") {
val expected = true
val actual = javaFieldInfoProfile.isField
actual should be (expected)
}
}
describe("#isArgument") {
it("should return false") {
val expected = false
val actual = javaFieldInfoProfile.isArgument
actual should be (expected)
}
}
describe("#isLocal") {
it("should return false") {
val expected = false
val actual = javaFieldInfoProfile.isLocal
actual should be (expected)
}
}
describe("#setValueFromInfo") {
it("should throw an exception if no object reference or class type available") {
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Right(mock[ReferenceType]),
mockField
)(mockVirtualMachine)
// Retrieval of JDI value still happens first
val mockValueInfoProfile = mock[ValueInfo]
(mockValueInfoProfile.toJdiInstance _).expects()
.returning(mock[Value]).once()
intercept[Exception] {
javaFieldInfoProfile.setValueFromInfo(mockValueInfoProfile)
}
}
it("should be able to set instance fields") {
val expected = mock[ValueInfo]
val mockStringReference = mock[StringReference]
(expected.toJdiInstance _).expects()
.returning(mockStringReference).once()
// Ensure setting the value on the object is verified
(mockObjectReference.setValue _)
.expects(mockField, mockStringReference)
.once()
javaFieldInfoProfile.setValueFromInfo(expected) should be (expected)
}
it("should be able to set static fields") {
val expected = mock[ValueInfo]
val mockClassType = mock[ClassType]
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Right(mockClassType),
mockField
)(mockVirtualMachine)
val mockStringReference = mock[StringReference]
(expected.toJdiInstance _).expects()
.returning(mockStringReference).once()
// Ensure setting the value on the object is verified
(mockClassType.setValue _)
.expects(mockField, mockStringReference)
.once()
javaFieldInfoProfile.setValueFromInfo(expected) should be (expected)
}
}
describe("#toValueInfo") {
it("should return a wrapper around the value of a class' static field") {
val expected = mock[ValueInfo]
val mockValue = mock[Value]
// Retrieving the value of the field returns our mock
val mockClassType = mock[ClassType]
(mockClassType.getValue _).expects(mockField)
.returning(mockValue).once()
val mockNewValueProfile = mockFunction[Value, ValueInfo]
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Right(mockClassType),
mockField
)(mockVirtualMachine) {
override protected def newValueProfile(value: Value): ValueInfo =
mockNewValueProfile(value)
}
mockNewValueProfile.expects(mockValue).returning(expected).once()
javaFieldInfoProfile.toValueInfo should be (expected)
}
it("should return a wrapper around the value of an object's field instance") {
val expected = mock[ValueInfo]
val mockValue = mock[Value]
// Retrieving the value of the field returns our mock
(mockObjectReference.getValue _).expects(mockField)
.returning(mockValue).once()
val mockNewValueProfile = mockFunction[Value, ValueInfo]
val javaFieldInfoProfile = new JavaFieldInfo(
mockScalaVirtualMachine,
mockInfoProducerProfile,
Left(mockObjectReference),
mockField
)(mockVirtualMachine) {
override protected def newValueProfile(value: Value): ValueInfo =
mockNewValueProfile(value)
}
mockNewValueProfile.expects(mockValue).returning(expected).once()
javaFieldInfoProfile.toValueInfo should be (expected)
}
}
}
}
|
ensime/scala-debugger
|
scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/java/info/JavaFieldInfoSpec.scala
|
Scala
|
apache-2.0
| 11,355 |
/**
* Copyright (C) 2012-2013 Vadim Bartko ([email protected]).
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* See file LICENSE.txt for License information.
*/
package com.nevilon.nomad.crawler
import com.nevilon.nomad.storage.graph.SynchronizedDBService
class DomainInjector(dbService: SynchronizedDBService) {
def inject(url: String) {
val normalizedUrl = URLUtils.normalize(url)
val domainStr = URLUtils.getDomainName(normalizedUrl)
dbService.createDomainIfNeeded(new Domain(domainStr, DomainStatus.NEW))
dbService.getUrl(normalizedUrl) match {
case None => {
val url = dbService.saveOrUpdateUrl(new Url(normalizedUrl, UrlStatus.NEW))
dbService.addUrlToDomain(url)
}
case Some(v) => //should exists
}
}
}
|
hudvin/nomad
|
src/main/scala/com/nevilon/nomad/crawler/DomainInjector.scala
|
Scala
|
gpl-2.0
| 997 |
package mesosphere.marathon
package api.v2
import java.time.Clock
import java.net.URI
import javax.inject.Inject
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.container.{AsyncResponse, Suspended}
import javax.ws.rs.core.{Context, MediaType, Response}
import akka.event.EventStream
import mesosphere.marathon.api.v2.Validation._
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.api.{AuthResource, PATCH, RestResource}
import mesosphere.marathon.core.appinfo._
import mesosphere.marathon.core.event.ApiPostEvent
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.plugin.auth._
import mesosphere.marathon.raml.Raml
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.stream.Implicits._
import org.glassfish.jersey.server.ManagedAsync
import play.api.libs.json.{JsObject, Json}
import scala.concurrent.{ExecutionContext, Future}
import scala.async.Async._
@Path("v2/apps")
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MediaType.APPLICATION_JSON))
class AppsResource @Inject() (
clock: Clock,
eventBus: EventStream,
appTasksRes: AppTasksResource,
service: MarathonSchedulerService,
appInfoService: AppInfoService,
val config: MarathonConf,
groupManager: GroupManager,
pluginManager: PluginManager)(implicit
val authenticator: Authenticator,
val authorizer: Authorizer,
val executionContext: ExecutionContext) extends RestResource with AuthResource {
import AppHelpers._
import Normalization._
private[this] val ListApps = """^((?:.+/)|)\*$""".r
private implicit lazy val appDefinitionValidator = AppDefinition.validAppDefinition(config.availableFeatures)(pluginManager)
private val normalizationConfig = AppNormalization.Configuration(
config.defaultNetworkName.toOption,
config.mesosBridgeName())
private implicit val validateAndNormalizeApp: Normalization[raml.App] =
appNormalization(config.availableFeatures, normalizationConfig)(AppNormalization.withCanonizedIds())
private implicit val normalizeAppUpdate: Normalization[raml.AppUpdate] =
appUpdateNormalization(normalizationConfig)(AppNormalization.withCanonizedIds())
@GET
def index(
@QueryParam("cmd") cmd: String,
@QueryParam("id") id: String,
@QueryParam("label") label: String,
@QueryParam("embed") embed: java.util.Set[String],
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val selector = selectAuthorized(search(Option(cmd), Option(id), Option(label)))
// additional embeds are deprecated!
val resolvedEmbed = InfoEmbedResolver.resolveApp(embed) +
AppInfo.Embed.Counts + AppInfo.Embed.Deployments
val mapped = await(appInfoService.selectAppsBy(selector, resolvedEmbed))
Response.ok(jsonObjString("apps" -> mapped)).build()
}
}
@POST
@ManagedAsync
def create(
body: Array[Byte],
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val rawApp = Raml.fromRaml(Json.parse(body).as[raml.App].normalize)
val now = clock.now()
val app = validateOrThrow(rawApp).copy(versionInfo = VersionInfo.OnlyVersion(now))
checkAuthorization(CreateRunSpec, app)
def createOrThrow(opt: Option[AppDefinition]) = opt
.map(_ => throw ConflictingChangeException(s"An app with id [${app.id}] already exists."))
.getOrElse(app)
val plan = await(groupManager.updateApp(app.id, createOrThrow, app.version, force))
val appWithDeployments = AppInfo(
app,
maybeCounts = Some(TaskCounts.zero),
maybeTasks = Some(Seq.empty),
maybeDeployments = Some(Seq(Identifiable(plan.id)))
)
maybePostEvent(req, appWithDeployments.app)
// servletRequest.getAsyncContext
Response
.created(new URI(app.id.toString))
.header(RestResource.DeploymentHeader, plan.id)
.entity(jsonString(appWithDeployments))
.build()
}
}
@GET
@Path("""{id:.+}""")
def show(
@PathParam("id") id: String,
@QueryParam("embed") embed: java.util.Set[String],
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val resolvedEmbed = InfoEmbedResolver.resolveApp(embed) ++ Set(
// deprecated. For compatibility.
AppInfo.Embed.Counts, AppInfo.Embed.Tasks, AppInfo.Embed.LastTaskFailure, AppInfo.Embed.Deployments
)
id match {
case ListApps(gid) =>
val groupId = gid.toRootPath
groupManager.group(groupId) match {
case Some(group) =>
checkAuthorization(ViewGroup, group)
val appsWithTasks = await(appInfoService.selectAppsInGroup(groupId, authzSelector, resolvedEmbed))
ok(jsonObjString("*" -> appsWithTasks))
case None =>
unknownGroup(groupId)
}
case _ =>
val appId = id.toRootPath
await(appInfoService.selectApp(appId, authzSelector, resolvedEmbed)) match {
case Some(appInfo) =>
checkAuthorization(ViewRunSpec, appInfo.app)
ok(jsonObjString("app" -> appInfo))
case None => unknownApp(appId)
}
}
}
}
/**
* Validate and normalize a single application update submitted via the REST API. Validation exceptions are not
* handled here, that's left as an exercise for the caller.
*
* @param appId used as the id of the generated app update (vs. whatever might be in the JSON body)
* @param body is the raw, unparsed JSON
* @param updateType CompleteReplacement if we want to replace the app entirely, PartialUpdate if we only want to update provided parts
*/
def canonicalAppUpdateFromJson(appId: PathId, body: Array[Byte], updateType: UpdateType): raml.AppUpdate = {
updateType match {
case CompleteReplacement =>
// this is a complete replacement of the app as we know it, so parse and normalize as if we're dealing
// with a brand new app because the rules are different (for example, many fields are non-optional with brand-new apps).
// however since this is an update, the user isn't required to specify an ID as part of the definition so we do
// some hackery here to pass initial JSON parsing.
val jsObj = Json.parse(body).as[JsObject] + ("id" -> Json.toJson(appId.toString))
// the version is thrown away in conversion to AppUpdate
jsObj.as[raml.App].normalize.toRaml[raml.AppUpdate]
case PartialUpdate(existingApp) =>
import mesosphere.marathon.raml.AppConversion.appUpdateRamlReader
val appUpdate = Json.parse(body).as[raml.AppUpdate].normalize
Raml.fromRaml(appUpdate -> existingApp)(appUpdateRamlReader).normalize //validate if the resulting app is correct
appUpdate.copy(id = Some(appId.toString))
}
}
/**
* Validate and normalize an array of application updates submitted via the REST API. Validation exceptions are not
* handled here, that's left as an exercise for the caller.
*
* @param body is the raw, unparsed JSON
* @param partialUpdate true if the JSON should be parsed as a partial application update (all fields optional)
* or as a wholesale replacement (parsed like an app definition would be)
*/
def canonicalAppUpdatesFromJson(body: Array[Byte], partialUpdate: Boolean): Seq[raml.AppUpdate] = {
if (partialUpdate) {
Json.parse(body).as[Seq[raml.AppUpdate]].map(_.normalize)
} else {
// this is a complete replacement of the app as we know it, so parse and normalize as if we're dealing
// with a brand new app because the rules are different (for example, many fields are non-optional with brand-new apps).
// the version is thrown away in toUpdate so just pass `zero` for now.
Json.parse(body).as[Seq[raml.App]].map { app =>
app.normalize.toRaml[raml.AppUpdate]
}
}
}
@PUT
@Path("""{id:.+}""")
def replace(
@PathParam("id") id: String,
body: Array[Byte],
@DefaultValue("false")@QueryParam("force") force: Boolean,
@DefaultValue("true")@QueryParam("partialUpdate") partialUpdate: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
await(update(id, body, force, partialUpdate, req, allowCreation = true))
}
}
@PATCH
@Path("""{id:.+}""")
def patch(
@PathParam("id") id: String,
body: Array[Byte],
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
await(update(id, body, force, partialUpdate = true, req, allowCreation = false))
}
}
@PUT
def replaceMultiple(
@DefaultValue("false")@QueryParam("force") force: Boolean,
@DefaultValue("true")@QueryParam("partialUpdate") partialUpdate: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
await(updateMultiple(force, partialUpdate, body, allowCreation = true))
}
}
@PATCH
def patchMultiple(
@DefaultValue("false")@QueryParam("force") force: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
await(updateMultiple(force, partialUpdate = true, body, allowCreation = false))
}
}
@DELETE
@Path("""{id:.+}""")
def delete(
@DefaultValue("true")@QueryParam("force") force: Boolean,
@PathParam("id") id: String,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val appId = id.toRootPath
val version = Timestamp.now()
def deleteApp(rootGroup: RootGroup) = {
checkAuthorization(DeleteRunSpec, rootGroup.app(appId), AppNotFoundException(appId))
rootGroup.removeApp(appId, version)
}
deploymentResult(await(groupManager.updateRoot(appId.parent, deleteApp, version = version, force = force)))
}
}
@DELETE
@Path("""{id:.+}/restart""")
def deleteRestart(
@DefaultValue("true")@QueryParam("force") force: Boolean,
@PathParam("id") id: String,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = delete(force, id + "/restart", req, asyncResponse)
@Path("{appId:.+}/tasks")
def appTasksResource(): AppTasksResource = appTasksRes
@Path("{appId:.+}/versions")
def appVersionsResource(): AppVersionsResource = new AppVersionsResource(service, groupManager, authenticator,
authorizer, config)
@POST
@Path("{id:.+}/restart")
def restart(
@PathParam("id") id: String,
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse): Unit = sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val appId = id.toRootPath
def markForRestartingOrThrow(opt: Option[AppDefinition]) = {
opt
.map(checkAuthorization(UpdateRunSpec, _))
.map(_.markedForRestarting)
.getOrElse(throw AppNotFoundException(appId))
}
val newVersion = clock.now()
val restartDeployment = await(
groupManager.updateApp(id.toRootPath, markForRestartingOrThrow, newVersion, force)
)
deploymentResult(restartDeployment)
}
}
/**
* Internal representation of `replace or update` logic.
*
* @param id appId
* @param body request body
* @param force force update?
* @param partialUpdate partial update?
* @param req http servlet request
* @param allowCreation is creation allowed?
* @param identity implicit identity
* @return http servlet response
*/
private[this] def update(id: String, body: Array[Byte], force: Boolean, partialUpdate: Boolean,
req: HttpServletRequest, allowCreation: Boolean)(implicit identity: Identity): Future[Response] = async {
val appId = id.toRootPath
// can lead to race condition where two non-existent apps with the same id are inserted concurrently,
// one of them will be overwritten by another
val maybeExistingApp = groupManager.app(appId)
val updateType = (maybeExistingApp, partialUpdate) match {
case (None, _) => CompleteReplacement
case (Some(app), true) => PartialUpdate(app)
case (_, false) => CompleteReplacement
}
val appUpdate = canonicalAppUpdateFromJson(appId, body, updateType)
val version = clock.now()
val plan = await(groupManager.updateApp(appId, AppHelpers.updateOrCreate(appId, _, appUpdate, partialUpdate, allowCreation, clock.now(), service), version, force))
val response = plan.original.app(appId)
.map(_ => Response.ok())
.getOrElse(Response.created(new URI(appId.toString)))
plan.target.app(appId).foreach { appDef =>
maybePostEvent(req, appDef)
}
deploymentResult(plan, response)
}
/**
* Internal representation of `replace or update` logic for multiple apps.
*
* @param force force update?
* @param partialUpdate partial update?
* @param body request body
* @param allowCreation is creation allowed?
* @param identity implicit identity
* @return http servlet response
*/
private[this] def updateMultiple(force: Boolean, partialUpdate: Boolean,
body: Array[Byte], allowCreation: Boolean)(implicit identity: Identity): Future[Response] = async {
val version = clock.now()
val updates = canonicalAppUpdatesFromJson(body, partialUpdate)
def updateGroup(rootGroup: RootGroup): RootGroup = updates.foldLeft(rootGroup) { (group, update) =>
update.id.map(PathId(_)) match {
case Some(id) => group.updateApp(id, AppHelpers.updateOrCreate(id, _, update, partialUpdate, allowCreation = allowCreation, clock.now(), service), version)
case None => group
}
}
deploymentResult(await(groupManager.updateRoot(PathId.empty, updateGroup, version, force)))
}
private def maybePostEvent(req: HttpServletRequest, app: AppDefinition) =
eventBus.publish(ApiPostEvent(req.getRemoteAddr, req.getRequestURI, app))
private[v2] def search(cmd: Option[String], id: Option[String], label: Option[String]): AppSelector = {
def containCaseInsensitive(a: String, b: String): Boolean = b.toLowerCase contains a.toLowerCase
val selectors = Seq[Option[Selector[AppDefinition]]](
cmd.map(c => Selector(_.cmd.exists(containCaseInsensitive(c, _)))),
id.map(s => Selector(app => containCaseInsensitive(s, app.id.toString))),
label.map(new LabelSelectorParsers().parsed)
).flatten
Selector.forall(selectors)
}
def selectAuthorized(fn: => AppSelector)(implicit identity: Identity): AppSelector = {
Selector.forall(Seq(authzSelector, fn))
}
}
sealed trait UpdateType
case object CompleteReplacement extends UpdateType
case class PartialUpdate(existingApp: AppDefinition) extends UpdateType
|
gsantovena/marathon
|
src/main/scala/mesosphere/marathon/api/v2/AppsResource.scala
|
Scala
|
apache-2.0
| 16,081 |
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.api.impl
/**
* Represents an event that must be ordered across the cluster, so probably a PUT or DELETE.
*
* Only requests that do not commute must be ordered. We're willing to do 'dirty reads' for
* commutative requests.
*
* @param sequence Sequence number that totally orders this event across all events and nodes in the cluster.
* @param timestamp Timestamp which indicates when the event was created.
* @param request The request to order.
*/
case class OrderedEvent(sequence: Long, timestamp: Long, request: NonCommutativeSiriusRequest)
|
weggert/sirius
|
src/main/scala/com/comcast/xfinity/sirius/api/impl/OrderedEvent.scala
|
Scala
|
apache-2.0
| 1,235 |
package org.trustedanalytics.sparktk.saveload
import org.apache.spark.SparkContext
import org.json4s.JsonAST.JValue
import org.trustedanalytics.sparktk.frame.Frame
import org.trustedanalytics.sparktk.models.dimreduction.pca.PcaModel
import org.trustedanalytics.sparktk.models.classification.naive_bayes.NaiveBayesModel
import org.trustedanalytics.sparktk.models.classification.random_forest_classifier.RandomForestClassifierModel
import org.trustedanalytics.sparktk.models.classification.svm.SvmModel
import org.trustedanalytics.sparktk.models.clustering.kmeans.KMeansModel
import org.trustedanalytics.sparktk.models.clustering.gmm.GaussianMixtureModel
import org.trustedanalytics.sparktk.models.timeseries.arima.ArimaModel
import org.trustedanalytics.sparktk.models.timeseries.arx.ArxModel
import org.trustedanalytics.sparktk.models.regression.random_forest_regressor.RandomForestRegressorModel
object Loaders {
def load(sc: SparkContext, path: String): Any = {
val result = TkSaveLoad.loadTk(sc, path)
val loader = loaders.getOrElse(result.formatId, throw new RuntimeException(s"Could not find a registered loader for '${result.formatId}' stored at $path.\\nRegistered loaders include: ${loaders.keys.mkString("\\n")}"))
loader(sc, path, result.formatVersion, result.data)
}
/**
* required signature for a Loader
*
* sc: SparkContext
* path: String the location of the file to load
* formatVersion: Int the version of SaveLoad format found in the accompanying tk/ folder
* tkMetadata: JValue the metadata loaded from the accompanying tk/ folder
*/
type LoaderType = (SparkContext, String, Int, JValue) => Any
// todo: use a fancier technique that probably involves reflections/macros
/**
* Registry of all the loaders
*
* If you have an class that wants to play TkSaveLoad, it needs an entry in here:
*
* formatId -> loader function
*/
private lazy val loaders: Map[String, LoaderType] = {
val entries: Seq[TkSaveableObject] = List(ArimaModel,
ArxModel,
Frame,
GaussianMixtureModel,
KMeansModel,
NaiveBayesModel,
PcaModel,
RandomForestClassifierModel,
RandomForestRegressorModel,
SvmModel)
entries.map(e => e.formatId -> e.load _).toMap
}
}
|
shibanis1/spark-tk
|
core/src/main/scala/org/trustedanalytics/sparktk/saveload/Loaders.scala
|
Scala
|
apache-2.0
| 2,280 |
package meerkat
import org.scalatest.{FlatSpec, Matchers}
class FSMTest extends FlatSpec with Matchers {
import TrafficLight._
"An FSM" should "transit correctly" in {
fsm ! Command.ready ! Command.go ! Command.stop ! Command.go
fsm.currentState should be (Light.green)
}
object TrafficLight {
case class Light private(name:String)
object Light {
val red = new Light("red")
val yellow = new Light("yellow")
val green = new Light("green")
}
case class Command private(name:String)
object Command {
val stop = new Command("stop")
val ready = new Command("ready")
val go = new Command("go")
}
val fsm = new FSM[Light, Command](Light.red) {
override def handleEvent = {
case Light.yellow -> Command.stop => Light.red
case _ -> Command.ready => Light.yellow
case Light.yellow -> Command.go => Light.green
}
override def onTransition = {
case (from -> to) -> cmd => println(s"${from.name} --(${cmd.name})--> ${to.name}")
}
override def onRejectedEvent(state:State, event:Event):Any = println(s"${state.name} --(${event.name})--x")
}
}
}
|
dant3/meerkat
|
src/test/scala/meerkat/FSMTest.scala
|
Scala
|
apache-2.0
| 1,205 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.step
import javax.servlet.FilterChain
import com.fasterxml.jackson.databind.JsonNode
import com.github.fge.jsonschema.exceptions.ProcessingException
import com.github.fge.jsonschema.main.JsonSchema
import com.rackspace.com.papi.components.checker.servlet._
import com.rackspace.com.papi.components.checker.step.base.{ConnectedStep, Step, StepContext}
class JSONSchema(id : String, label : String, schema : JsonSchema, val priority : Long, next : Array[Step]) extends ConnectedStep(id, label, next) {
override val mismatchMessage : String = "The JSON does not validate against the schema."
override def checkStep(req : CheckerServletRequest, resp : CheckerServletResponse, chain : FilterChain, context : StepContext) : Option[StepContext] = {
try {
schema.validate(req.parsedJSON)
Some(context)
} catch {
case pe : ProcessingException => {
val message = {
val fmsg = formatMessage(pe.getProcessingMessage().asJson())
if (fmsg != null) fmsg else pe.getProcessingMessage().toString()
}
req.contentError = new Exception(message, pe)
req.contentErrorPriority = priority
None
}
case e : Exception => {
req.contentError = e
req.contentErrorPriority = priority
None
}
}
}
private def formatMessage (jpmsg : JsonNode) : String = {
val pointer = getPointer(jpmsg)
if (pointer != null) {
"In "+pointer+", "+getMessage(jpmsg)
} else {
getMessage(jpmsg)
}
}
private def nullEmpty (in : String) : String = {
if (in == null || in == "") {
null
} else {
in
}
}
private def getMessage (jpmsg : JsonNode) : String = {
val jms = jpmsg.findValue("message")
if (jms != null) nullEmpty(jms.asText()) else null
}
private def getPointer(jpmsg : JsonNode) : String = {
val inst = jpmsg.findValue("instance")
if (inst != null) {
val p = inst.findValue("pointer")
if (p != null) {
nullEmpty(p.asText())
} else {
null
}
} else {
null
}
}
}
|
tylerroyal/api-checker
|
core/src/main/scala/com/rackspace/com/papi/components/checker/step/JSONSchema.scala
|
Scala
|
apache-2.0
| 2,772 |
package org.coursera.courier
import org.coursera.courier.generator.NilGeneratorMixin
class ScalaGenerator() extends ConfigurableScalaGenerator(NilGeneratorMixin)
|
coursera/courier
|
scala/generator/src/main/scala/org/coursera/courier/ScalaGenerator.scala
|
Scala
|
apache-2.0
| 164 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package models.form
import kafka.manager.model.ClusterConfig
/**
* @author hiral
*/
sealed trait Operation
case object Enable extends Operation
case object Disable extends Operation
case object Delete extends Operation
case object Update extends Operation
case class Unknown(operation: String) extends Operation
object Operation {
implicit def fromString(s:String) : Operation = {
s match {
case "Enable" => Enable
case "Disable" => Disable
case "Delete" => Delete
case "Update" => Update
case a: Any => Unknown(a.toString)
}
}
}
object ClusterOperation {
def apply(operation: String,
name: String,
version: String,
zkHosts: String,
zkMaxRetry: Int,
jmxEnabled: Boolean,
jmxUser: Option[String],
jmxPass: Option[String],
pollConsumers: Boolean,
filterConsumers: Boolean,
logkafkaEnabled: Boolean,
activeOffsetCacheEnabled: Boolean,
displaySizeEnabled: Boolean): ClusterOperation = {
ClusterOperation(operation,ClusterConfig(name, version, zkHosts, zkMaxRetry, jmxEnabled, jmxUser, jmxPass,
pollConsumers, filterConsumers, logkafkaEnabled, activeOffsetCacheEnabled, displaySizeEnabled))
}
def customUnapply(co: ClusterOperation) : Option[(String, String, String, String, Int, Boolean, Option[String], Option[String], Boolean, Boolean, Boolean, Boolean, Boolean)] = {
Option((co.op.toString, co.clusterConfig.name, co.clusterConfig.version.toString,
co.clusterConfig.curatorConfig.zkConnect, co.clusterConfig.curatorConfig.zkMaxRetry,
co.clusterConfig.jmxEnabled, co.clusterConfig.jmxUser, co.clusterConfig.jmxPass,
co.clusterConfig.pollConsumers, co.clusterConfig.filterConsumers, co.clusterConfig.logkafkaEnabled,
co.clusterConfig.activeOffsetCacheEnabled, co.clusterConfig.displaySizeEnabled))
}
}
case class ClusterOperation private(op: Operation, clusterConfig: ClusterConfig)
|
xuwei-k/kafka-manager
|
app/models/form/ClusterOperation.scala
|
Scala
|
apache-2.0
| 2,166 |
package com.sksamuel.elastic4s.index
import com.sksamuel.elastic4s.indexes.{IndexDefinition, IndexShowImplicits}
import com.sksamuel.elastic4s.{Executable, XContentFieldValueWriter}
import org.elasticsearch.action.index.{IndexRequestBuilder, IndexResponse}
import org.elasticsearch.client.Client
import org.elasticsearch.common.xcontent.XContentFactory
import scala.concurrent.Future
trait IndexExecutables extends IndexShowImplicits {
implicit object IndexDefinitionExecutable
extends Executable[IndexDefinition, IndexResponse, RichIndexResponse] {
def builder(c: Client, t: IndexDefinition): IndexRequestBuilder = {
val builder = c.prepareIndex(t.indexAndType.index, t.indexAndType.`type`)
t.id.map(_.toString).foreach(builder.setId)
t.source match {
case Some(json) => builder.setSource(json)
case _ =>
val source = XContentFactory.jsonBuilder().startObject()
t.fields.foreach(XContentFieldValueWriter(source, _))
source.endObject()
builder.setSource(source)
}
t.parent.foreach(builder.setParent)
t.refresh.foreach(builder.setRefreshPolicy)
t.version.foreach(builder.setVersion)
t.versionType.foreach(builder.setVersionType)
t.routing.foreach(builder.setRouting)
t.pipeline.foreach(builder.setPipeline)
t.timestamp.foreach(builder.setTimestamp)
t.source.foreach(builder.setSource)
t.opType.foreach(builder.setOpType)
builder
}
override def apply(c: Client,
t: IndexDefinition): Future[RichIndexResponse] = {
val req = builder(c, t)
injectFutureAndMap(req.execute)(RichIndexResponse.apply)
}
}
implicit class IndexDefinitionShowOps(f: IndexDefinition) {
def show: String = IndexShow.show(f)
}
}
|
tyth/elastic4s
|
elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/index/IndexExecutables.scala
|
Scala
|
apache-2.0
| 1,811 |
package edu.gemini.model.p1.targetio.table
import edu.gemini.model.p1.targetio.api._
import uk.ac.starlink.table.{StarTableOutput, RowListStarTable, StarTable}
import java.io.{FileOutputStream, BufferedOutputStream, OutputStream, File}
object TableWriter {
def toStarTable[R](rows: Iterable[R], cols: List[Column[R,_]], format: FileFormat): StarTable = {
val infos = cols map { _.info(format) }
val tab = new RowListStarTable(infos.toArray)
val sers = cols map { col => col.writer(format) }
val tabList = sers map { ser => rows map { row => ser(row).asInstanceOf[AnyRef] } }
tabList.transpose foreach { row => tab.addRow(row.toArray) }
tab
}
private def withOutputStream[T](os: OutputStream)(f: OutputStream => T): T = {
try {
f(os)
} finally {
if (os != null) os.close()
}
}
def write[R](rows: Iterable[R], cols: List[Column[R,_]], file: File, ftype: FileType): Either[DataSourceError, Unit] =
for {
fos <- open(file).right
res <- withOutputStream(new BufferedOutputStream(fos)) { os => write(rows, cols, os, ftype) }.right
} yield res
private def open(file: File): Either[DataSourceError, FileOutputStream] =
try {
Right(new FileOutputStream(file))
} catch {
case ex: Exception => Left(DataSourceError(s"Could not open ${file.getName}' for writing."))
}
def write[R](rows: Iterable[R], cols: List[Column[R,_]], outs: OutputStream, ftype: FileType): Either[DataSourceError, Unit] = {
val starTab = toStarTable(rows, cols, ftype.format)
try {
Right(new StarTableOutput().writeStarTable(starTab, outs, stilWriter(ftype)))
} catch {
case ex: Exception =>
val base = "There was an unexpected problem while writing targets"
val msg = Option(ex.getMessage) map { m => base + ":\\n" + m } getOrElse base + "."
Left(DataSourceError(msg))
}
}
}
|
arturog8m/ocs
|
bundle/edu.gemini.model.p1.targetio/src/main/scala/edu/gemini/model/p1/targetio/table/TableWriter.scala
|
Scala
|
bsd-3-clause
| 1,911 |
package asobu.distributed.service
import akka.ConfigurationException
import akka.actor.ActorSystem
import akka.util.Timeout
import asobu.distributed.protocol.EndpointDefinition
import asobu.distributed.{DefaultEndpointsRegistry, EndpointsRegistry, SystemValidator}
import asobu.distributed.protocol.Prefix
import play.api.libs.json.JsObject
import play.routes.compiler.{HandlerCall, Route}
import scala.concurrent.{ExecutionContext, Future}
trait ControllerRegister {
type ApiDocGenerator = (Prefix, Seq[Route]) ⇒ Option[JsObject]
val voidApiDocGenerator: ApiDocGenerator = (_, _) ⇒ None
//TODO: don't have implicits for all these arguments
def init(prefix: Prefix)(controllers: Controller*)(
implicit
ec: ExecutionContext,
system: ActorSystem,
ao: Timeout,
buildNumber: Option[BuildNumber] = None,
apiDocGenerator: ApiDocGenerator = voidApiDocGenerator
): Future[List[EndpointDefinition]] = init(prefix → controllers.toList)
/**
* Init controllers (add their actions to [[ EndpointRegistry ]]
*
* @param controllers
* @param system
* @param ao
* @param buildNumber
* @param apiDocGenerator
*/
def init(controllers: (Prefix, List[Controller])*)(
implicit
ec: ExecutionContext,
system: ActorSystem,
ao: Timeout,
buildNumber: Option[BuildNumber],
apiDocGenerator: ApiDocGenerator
): Future[List[EndpointDefinition]] = {
val registry: EndpointsRegistry = DefaultEndpointsRegistry(system)
val rec: EndpointsRegistryClient = EndpointsRegistryClientImp(registry, buildNumber)
val version = buildNumber.map(_.buildInfoBuildNumber)
def registerController(prefix: Prefix, controller: Controller): Seq[Future[EndpointDefinition]] = {
def findRoute(action: Action): Route = controller.routes.find { r ⇒
val HandlerCall(packageName, controllerName, _, method, _) = r.call
action.name == packageName + "." + controllerName + "." + method
}.getOrElse {
throw new Exception(s"Cannot find route for action ${action.name}") //todo: this should really be a compilation error, the next right thing to do is to let it blow up the application on start.
}
def addAction(action: Action, prefix: Prefix = Prefix.root): Future[EndpointDefinition] = {
val epd: EndpointDefinition =
action.endpointDefinition(findRoute(action), prefix, version)
rec.add(epd).map(_ ⇒ epd)
}
controller.actions.map(addAction(_, prefix))
}
SystemValidator.validate(system) match {
case Left(error) ⇒ Future.failed(new ConfigurationException(error))
case _ ⇒
Future.sequence(controllers.flatMap {
case (prefix, controllers) ⇒
ApiDocumentationReporter(registry)(routes ⇒ apiDocGenerator(prefix, routes)).report(controllers)
controllers.flatMap(registerController(prefix, _))
}.toList)
}
}
}
|
iheartradio/asobu
|
distributed/src/main/scala/asobu/distributed/service/ControllerRegister.scala
|
Scala
|
apache-2.0
| 2,939 |
/**
* Copyright 2015 Frank Austin Nothaft
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.ananas.models
import net.fnothaft.ananas.avro.{ Backing, Kmer }
import scala.annotation.tailrec
object IntMer {
private def cVal(c: Char): (Int, Int) = c match {
case 'A' => (0, 0)
case 'C' => (1, 0)
case 'G' => (2, 0)
case 'T' => (3, 0)
case 'N' => (0, 3)
}
private def canon(kmer: Int, rcKmer: Int,
mask: Int, rcMask: Int): IntMer = {
if ((kmer >= 0 && rcKmer < 0) ||
(((kmer >= 0 && rcKmer >= 0) ||
(kmer < 0 && rcKmer < 0)) && kmer < rcKmer)) {
new IntMer(kmer, mask, true)
} else {
new IntMer(rcKmer, rcMask, false)
}
}
@tailrec private def constructKmer(c: Iterator[Char],
kmer: Int = 0,
mask: Int = 0,
rKmer: Int = 0,
rMask: Int = 0): (Int, Int, Int, Int) = {
if (!c.hasNext) {
// we haven't complimented the reverse compilment kmer yet, so invert
(kmer, mask, ~rKmer, rMask)
} else {
val n = c.next
val (k, m) = cVal(n)
// shift in from right to get new kmer and mask
val newKmer = (kmer << 2) | k
val newMask = (mask << 2) | m
// shift in from left to get new reverse kmer and reverse mask
val shift = k << 30
val newRKmer = (rKmer >>> 2) | (shift)
val newRMask = (rMask >>> 2) | (m << 30)
// recurse
constructKmer(c, newKmer, newMask, newRKmer, newRMask)
}
}
def apply(str: String): IntMer = {
// check input length
require(str.length == 16, "K-mer must have length 16 (%s).".format(str))
try {
val (kmer, mask, rcKmer, rcMask) = constructKmer(str.toIterator)
canon(kmer, rcKmer, mask, rcMask)
} catch {
case t : MatchError => {
throw new IllegalArgumentException("Received illegal k-mer string. %s has bad chars.".format(str))
}
}
}
def fromSequence(seq: String): Array[IntMer] = {
// check input length
require(seq.length >= 16, "Sequence must be at least 16 bases (%s).".format(seq))
// preallocate k-mer array
val kArray = new Array[IntMer](seq.length - 15)
// calculate first k-mer
val (kmer, mask, rcKmer, rcMask) = constructKmer(seq.take(16).toIterator)
kArray(0) = canon(kmer, rcKmer, mask, rcMask)
@tailrec def extendKmer(c: Iterator[Char],
kmer: Int = 0,
mask: Int = 0,
rcKmer: Int = 0,
rcMask: Int = 0,
idx: Int = 0) {
if (c.hasNext) {
val n = c.next
val (k, m) = cVal(n)
// shift in from right to get new kmer and mask
val newKmer = (kmer << 2) | k
val newMask = (mask << 2) | m
// shift in from left to get new reverse kmer and reverse mask
val shift = ~(k << 30) & 0xC0000000
val newRcKmer = (rcKmer >>> 2) | (shift)
val newRcMask = (rcMask >>> 2) | (m << 30)
// insert k-mer into array
kArray(idx + 1) = canon(newKmer, newRcKmer, newMask, newRcMask)
// recurse
extendKmer(c, newKmer, newMask, newRcKmer, newRcMask, idx + 1)
}
}
// insert k-mers into array
extendKmer(seq.toIterator.drop(16), kmer, mask, rcKmer, rcMask)
kArray
}
def toSequence(array: Array[IntMer]): String = {
"%s%s".format(array.head.toOriginalString,
array.tail
.map(_.originalLastBase)
.mkString)
}
}
case class IntMer(kmer: Int,
mask: Int,
isOriginal: Boolean) extends CanonicalKmer {
def kmerLength: Int = 16
def toAvro: Kmer = {
Kmer.newBuilder()
.setFormat(Backing.INT)
.setIsOriginal(isOriginal)
.setIntKmer(kmer)
.setIntMask(mask)
.build()
}
def sameExceptForOrientation(k: CanonicalKmer): Boolean = k match {
case imer: IntMer => {
imer.kmer == kmer && imer.mask == mask
}
case _ => {
false
}
}
def flipCanonicality: CanonicalKmer = {
@tailrec def flip(k: Int,
m: Int,
nk: Int = 0,
nm: Int = 0,
i: Int = 16): (Int, Int) = {
if (i <= 0) {
(~nk, nm)
} else {
flip(k >>> 2, m >>> 2, (nk << 2) | (k & 0x3), (nm << 2) | (m & 0x3), i - 1)
}
}
val (newKmer, newMask) = flip(kmer, mask)
new IntMer(newKmer, newMask, !isOriginal)
}
override def hashCode: Int = kmer | mask
def longHash: Long = hashCode.toLong
def equals(o: IntMer): Boolean = {
(~(kmer ^ o.kmer) | mask | o.mask) == 0xFFFFFFFF
}
private def getBase(k: Int,
m: Int): Char = {
if ((m & 0x3) != 0) {
'N'
} else {
(k & 0x3) match {
case 0 => 'A'
case 1 => 'C'
case 2 => 'G'
case _ => 'T'
}
}
}
private def getRcBase(k: Int,
m: Int): Char = {
if ((m & 0xC0000000) != 0) {
'N'
} else {
(k & 0xC0000000) match {
case 0x00000000 => 'T'
case 0x40000000 => 'G'
case 0x80000000 => 'C'
case _ => 'A'
}
}
}
def toCanonicalString: String = {
@tailrec def buildString(shiftKmer: Int,
shiftMask: Int,
a: Array[Char],
i: Int = 15): String = {
if (i < 0) {
a.mkString
} else {
a(i) = getBase(shiftKmer, shiftMask)
buildString(shiftKmer >>> 2, shiftMask >>> 2, a, i - 1)
}
}
buildString(kmer, mask, new Array[Char](16))
}
def toAntiCanonicalString: String = {
@tailrec def buildRcString(shiftKmer: Int,
shiftMask: Int,
a: Array[Char],
i: Int = 15): String = {
if (i < 0) {
a.mkString
} else {
a(i) = getRcBase(shiftKmer, shiftMask)
buildRcString(shiftKmer << 2, shiftMask << 2, a, i - 1)
}
}
buildRcString(kmer, mask, new Array[Char](16))
}
override def toString: String = toCanonicalString
def lastBase: Char = {
getBase(kmer, mask)
}
def originalLastBase: Char = {
if (isOriginal) {
lastBase
} else {
getRcBase(kmer, mask)
}
}
}
|
fnothaft/ananas
|
src/main/scala/net/fnothaft/ananas/models/IntMer.scala
|
Scala
|
apache-2.0
| 7,152 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.Date
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.TaskContext
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.{QueryExecution, SQLExecution}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.{SerializableConfiguration, Utils}
import org.apache.carbondata.common.CarbonIterator
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.dictionary.server.DictionaryServer
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.stats.QueryStatistic
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.events.{OperationContext, OperationListenerBus}
import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
import org.apache.carbondata.processing.loading.constants.DataLoadProcessorConstants
import org.apache.carbondata.processing.loading.events.LoadEvents.{LoadTablePostExecutionEvent, LoadTablePreExecutionEvent}
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
import org.apache.carbondata.spark.rdd.StreamHandoffRDD
import org.apache.carbondata.streaming.{CarbonStreamException, CarbonStreamOutputFormat}
import org.apache.carbondata.streaming.parser.CarbonStreamParser
import org.apache.carbondata.streaming.segment.StreamSegment
/**
* an implement of stream sink, it persist each batch to disk by appending the batch data to
* data files.
*/
class CarbonAppendableStreamSink(
sparkSession: SparkSession,
val carbonTable: CarbonTable,
var currentSegmentId: String,
parameters: Map[String, String],
carbonLoadModel: CarbonLoadModel,
server: Option[DictionaryServer],
operationContext: OperationContext) extends Sink {
private val fileLogPath = CarbonTablePath.getStreamingLogDir(carbonTable.getTablePath)
private val fileLog = new FileStreamSinkLog(FileStreamSinkLog.VERSION, sparkSession, fileLogPath)
// prepare configuration
private val hadoopConf = {
val conf = sparkSession.sessionState.newHadoopConf()
// put all parameters into hadoopConf
parameters.foreach { entry =>
conf.set(entry._1, entry._2)
}
// properties below will be used for default CarbonStreamParser
conf.set("carbon_complex_delimiter_level_1",
carbonLoadModel.getComplexDelimiterLevel1)
conf.set("carbon_complex_delimiter_level_2",
carbonLoadModel.getComplexDelimiterLevel2)
conf.set(
DataLoadProcessorConstants.SERIALIZATION_NULL_FORMAT,
carbonLoadModel.getSerializationNullFormat().split(",")(1))
conf.set(
CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
carbonLoadModel.getTimestampformat())
conf.set(
CarbonCommonConstants.CARBON_DATE_FORMAT,
carbonLoadModel.getDateFormat())
conf
}
// segment max size(byte)
private val segmentMaxSize = hadoopConf.getLong(
CarbonCommonConstants.HANDOFF_SIZE,
CarbonProperties.getInstance().getHandoffSize
)
// auto handoff
private val enableAutoHandoff = hadoopConf.getBoolean(
CarbonCommonConstants.ENABLE_AUTO_HANDOFF,
CarbonProperties.getInstance().isEnableAutoHandoff
)
override def addBatch(batchId: Long, data: DataFrame): Unit = {
if (batchId <= fileLog.getLatest().map(_._1).getOrElse(-1L)) {
CarbonAppendableStreamSink.LOGGER.info(s"Skipping already committed batch $batchId")
} else {
val statistic = new QueryStatistic()
// fire pre event on every batch add
// in case of streaming options and optionsFinal can be same
val loadTablePreExecutionEvent = new LoadTablePreExecutionEvent(
carbonTable.getCarbonTableIdentifier,
carbonLoadModel)
OperationListenerBus.getInstance().fireEvent(loadTablePreExecutionEvent, operationContext)
checkOrHandOffSegment()
// committer will record how this spark job commit its output
val committer = FileCommitProtocol.instantiate(
className = sparkSession.sessionState.conf.streamingFileCommitProtocolClass,
jobId = batchId.toString,
outputPath = fileLogPath,
isAppend = false)
committer match {
case manifestCommitter: ManifestFileCommitProtocol =>
manifestCommitter.setupManifestOptions(fileLog, batchId)
case _ => // Do nothing
}
CarbonAppendableStreamSink.writeDataFileJob(
sparkSession,
carbonTable,
parameters,
batchId,
currentSegmentId,
data.queryExecution,
committer,
hadoopConf,
carbonLoadModel,
server)
// fire post event on every batch add
val loadTablePostExecutionEvent = new LoadTablePostExecutionEvent(
carbonTable.getCarbonTableIdentifier,
carbonLoadModel
)
OperationListenerBus.getInstance().fireEvent(loadTablePostExecutionEvent, operationContext)
statistic.addStatistics(s"add batch: $batchId", System.currentTimeMillis())
CarbonAppendableStreamSink.LOGGER.info(
s"${statistic.getMessage}, taken time(ms): ${statistic.getTimeTaken}")
}
}
/**
* if the directory size of current segment beyond the threshold, hand off new segment
*/
private def checkOrHandOffSegment(): Unit = {
// get streaming segment, if not exists, create new streaming segment
val segmentId = StreamSegment.open(carbonTable)
if (segmentId.equals(currentSegmentId)) {
val segmentDir = CarbonTablePath.getSegmentPath(carbonTable.getTablePath, currentSegmentId)
val fileType = FileFactory.getFileType(segmentDir)
if (segmentMaxSize <= StreamSegment.size(segmentDir)) {
val newSegmentId = StreamSegment.close(carbonTable, currentSegmentId)
currentSegmentId = newSegmentId
val newSegmentDir =
CarbonTablePath.getSegmentPath(carbonTable.getTablePath, currentSegmentId)
FileFactory.mkdirs(newSegmentDir, fileType)
// trigger hand off operation
if (enableAutoHandoff) {
StreamHandoffRDD.startStreamingHandoffThread(
carbonLoadModel,
operationContext,
sparkSession,
false)
}
}
} else {
currentSegmentId = segmentId
val newSegmentDir =
CarbonTablePath.getSegmentPath(carbonTable.getTablePath, currentSegmentId)
val fileType = FileFactory.getFileType(newSegmentDir)
FileFactory.mkdirs(newSegmentDir, fileType)
}
}
}
object CarbonAppendableStreamSink {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* package the hadoop configuration and it will be passed to executor side from driver side
*/
case class WriteDataFileJobDescription(
serializableHadoopConf: SerializableConfiguration,
batchId: Long,
segmentId: String)
/**
* Run a spark job to append the newly arrived data to the existing row format
* file directly.
* If there are failure in the task, spark will re-try the task and
* carbon will do recovery by HDFS file truncate. (see StreamSegment.tryRecoverFromTaskFault)
* If there are job level failure, every files in the stream segment will do truncate
* if necessary. (see StreamSegment.tryRecoverFromJobFault)
*/
def writeDataFileJob(
sparkSession: SparkSession,
carbonTable: CarbonTable,
parameters: Map[String, String],
batchId: Long,
segmentId: String,
queryExecution: QueryExecution,
committer: FileCommitProtocol,
hadoopConf: Configuration,
carbonLoadModel: CarbonLoadModel,
server: Option[DictionaryServer]): Unit = {
// create job
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
val jobId = CarbonInputFormatUtil.getJobId(new Date, batchId.toInt)
job.setJobID(jobId)
val description = WriteDataFileJobDescription(
serializableHadoopConf = new SerializableConfiguration(job.getConfiguration),
batchId,
segmentId
)
// run write data file job
SQLExecution.withNewExecutionId(sparkSession, queryExecution) {
var result: Array[TaskCommitMessage] = null
try {
committer.setupJob(job)
// initialize dictionary server
if (server.isDefined) {
server.get.initializeDictionaryGenerator(carbonTable)
}
val rowSchema = queryExecution.analyzed.schema
// write data file
result = sparkSession.sparkContext.runJob(queryExecution.toRdd,
(taskContext: TaskContext, iterator: Iterator[InternalRow]) => {
writeDataFileTask(
description,
carbonLoadModel,
sparkStageId = taskContext.stageId(),
sparkPartitionId = taskContext.partitionId(),
sparkAttemptNumber = taskContext.attemptNumber(),
committer,
iterator,
rowSchema
)
})
// write dictionary
if (server.isDefined) {
try {
server.get.writeTableDictionary(carbonTable.getCarbonTableIdentifier.getTableId)
} catch {
case _: Exception =>
LOGGER.error(
s"Error while writing dictionary file for ${carbonTable.getTableUniqueName}")
throw new Exception(
"Streaming ingest failed due to error while writing dictionary file")
}
}
// update data file info in index file
StreamSegment.updateIndexFile(
CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segmentId))
} catch {
// catch fault of executor side
case t: Throwable =>
val segmentDir = CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segmentId)
StreamSegment.recoverSegmentIfRequired(segmentDir)
LOGGER.error(t, s"Aborting job ${ job.getJobID }.")
committer.abortJob(job)
throw new CarbonStreamException("Job failed to write data file", t)
}
committer.commitJob(job, result)
LOGGER.info(s"Job ${ job.getJobID } committed.")
}
}
/**
* execute a task for each partition to write a data file
*/
def writeDataFileTask(
description: WriteDataFileJobDescription,
carbonLoadModel: CarbonLoadModel,
sparkStageId: Int,
sparkPartitionId: Int,
sparkAttemptNumber: Int,
committer: FileCommitProtocol,
iterator: Iterator[InternalRow],
rowSchema: StructType
): TaskCommitMessage = {
val jobId = CarbonInputFormatUtil.getJobId(new Date, sparkStageId)
val taskId = new TaskID(jobId, TaskType.MAP, sparkPartitionId)
val taskAttemptId = new TaskAttemptID(taskId, sparkAttemptNumber)
// Set up the attempt context required to use in the output committer.
val taskAttemptContext: TaskAttemptContext = {
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
CarbonStreamOutputFormat.setSegmentId(hadoopConf, description.segmentId)
hadoopConf.set("mapred.job.id", jobId.toString)
hadoopConf.set("mapred.tip.id", taskAttemptId.getTaskID.toString)
hadoopConf.set("mapred.task.id", taskAttemptId.toString)
hadoopConf.setBoolean("mapred.task.is.map", true)
hadoopConf.setInt("mapred.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}
committer.setupTask(taskAttemptContext)
try {
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
val parserName = taskAttemptContext.getConfiguration.get(
CarbonStreamParser.CARBON_STREAM_PARSER,
CarbonStreamParser.CARBON_STREAM_PARSER_DEFAULT)
val streamParser =
Class.forName(parserName).newInstance.asInstanceOf[CarbonStreamParser]
streamParser.initialize(taskAttemptContext.getConfiguration, rowSchema)
StreamSegment.appendBatchData(new InputIterator(iterator, streamParser),
taskAttemptContext, carbonLoadModel)
})(catchBlock = {
committer.abortTask(taskAttemptContext)
LOGGER.error(s"Job $jobId aborted.")
})
committer.commitTask(taskAttemptContext)
} catch {
case t: Throwable =>
throw new CarbonStreamException("Task failed while writing rows", t)
}
}
/**
* convert spark iterator to carbon iterator, so that java module can use it.
*/
class InputIterator(rddIter: Iterator[InternalRow], streamParser: CarbonStreamParser)
extends CarbonIterator[Array[Object]] {
override def hasNext: Boolean = rddIter.hasNext
override def next: Array[Object] = {
streamParser.parserRow(rddIter.next())
}
override def close(): Unit = {
streamParser.close()
}
}
}
|
jatin9896/incubator-carbondata
|
integration/spark-common/src/main/scala/org/apache/spark/sql/execution/streaming/CarbonAppendableStreamSink.scala
|
Scala
|
apache-2.0
| 14,217 |
package coursier.util
import coursier.util.Monad.ops._
final case class EitherT[F[_], L, R](run: F[Either[L, R]]) {
def map[S](f: R => S)(implicit M: Monad[F]): EitherT[F, L, S] =
EitherT(
run.map(_.map(f))
)
def flatMap[S](f: R => EitherT[F, L, S])(implicit M: Monad[F]): EitherT[F, L, S] =
EitherT(
run.flatMap {
case Left(l) =>
M.point(Left(l))
case Right(r) =>
f(r).run
}
)
def leftMap[M](f: L => M)(implicit M: Monad[F]): EitherT[F, M, R] =
EitherT(
run.map(_.left.map(f))
)
def leftFlatMap[S](f: L => EitherT[F, S, R])(implicit M: Monad[F]): EitherT[F, S, R] =
EitherT(
run.flatMap {
case Left(l) =>
f(l).run
case Right(r) =>
M.point(Right(r))
}
)
def orElse(other: => EitherT[F, L, R])(implicit M: Monad[F]): EitherT[F, L, R] =
EitherT(
run.flatMap {
case Left(_) =>
other.run
case Right(r) =>
M.point(Right(r))
}
)
}
object EitherT {
def point[F[_], L, R](r: R)(implicit M: Monad[F]): EitherT[F, L, R] =
EitherT(M.point(Right(r)))
def fromEither[F[_]]: FromEither[F] =
new FromEither[F]
final class FromEither[F[_]] {
def apply[L, R](either: Either[L, R])(implicit M: Monad[F]): EitherT[F, L, R] =
EitherT(M.point(either))
}
}
|
alexarchambault/coursier
|
modules/util/shared/src/main/scala/coursier/util/EitherT.scala
|
Scala
|
apache-2.0
| 1,380 |
package com.overviewdocs.clustering
/** Debugging app, so we can check why clustering is failing.
*/
object DumpDocuments extends App {
if (args.length != 1) {
System.err.println("Usage: DumpDocuments [docset-id]")
System.exit(1)
}
val documents = new CatDocuments(args(0).toLong, None)
documents.foreach { document =>
System.out.write(s"${document.id}\\t${document.tokens.mkString(" ")}\\n".getBytes("utf-8"))
}
}
|
overview/overview-server
|
worker/src/main/scala/com/overviewdocs/clustering/DumpDocuments.scala
|
Scala
|
agpl-3.0
| 440 |
package uScheduler.core.reader
import uScheduler.model.Group
import uScheduler.model.Lecture
import uScheduler.model.Student
import uScheduler.model.Event
case class Data(s: Seq[Student], l: Seq[Lecture], g: Seq[Group]) {
private lazy val event: Seq[Event] = g ++ l
lazy val conflicts = { for (e <- event.combinations(2) if (e(0).collidesWith(e(1)))) yield Seq(e(0), e(1)) }.toSeq
lazy val lectureOfStudent = s.map(st => (st, st.lecture.toSeq)).toMap
lazy val groupOfLecture = l.map(le => (le, le.group.toSeq)).toMap
lazy val groupOfStudent = s.map(st => (st, st.lecture.foldLeft(Seq[Group]())((l1, l2) => l1 ++ l2.group))).toMap
lazy val grpMaxSize = (grp: Group) => (grp.lecture.avgStdPerGroup * 1.75 + 1).toInt
lazy val grpMinSize = (grp: Group) => (grp.lecture.avgStdPerGroup * 0.25).toInt
}
|
LarsHadidi/uScheduler
|
src/main/scala/uScheduler/core/reader/Data.scala
|
Scala
|
apache-2.0
| 811 |
package autolift.cats
import autolift.cats.applicative._
import cats.implicits._
class LiftMergeTest extends BaseSpec{
"liftMerge on a Option[Int] from a Option[Int]" should "work" in{
val in = Option(1)
val out = in liftMerge in
same[Option[(Int,Int)]](out, Option(1 -> 1))
}
"liftMerge on a Either[Option] from an Option" should "work" in{
val in = Either.right(Option(1))
val out = in liftMerge Option(1)
same[Either[Nothing,Option[(Int,Int)]]](out, Either.right(Option(1 -> 1)))
}
"liftMerge on a Option[List[Int]] on a List[Int]" should "work" in{
val in = Option(List(1, 2))
val out = in liftMerge List(1, 2)
same[Option[List[(Int,Int)]]](out, Option(List((1, 1), (2, 1), (1, 2), (2, 2))))
}
"liftMerge on an List[Either[?,Option]]" should "work" in{
val in = List(Either.right(Option(1)))
val out = in liftMerge Option(1)
same[List[Either[Nothing,Option[(Int,Int)]]]](out, List(Either.right(Option(1 -> 1))))
}
}
|
wheaties/AutoLifts
|
autolift-cats/src/test/scala/autolift/cats/LiftMergeTest.scala
|
Scala
|
apache-2.0
| 960 |
package com.flurdy.socialcrowd.model
import scala.collection.mutable.{MutableList,HashSet => MutableHashSet}
case class SocialMember(memberName: String){
val posts = new MutableList[SocialMessage]
val friends = new MutableHashSet[SocialMember]
def getPosts: List[SocialMessage] = posts.toList.sorted
def showWall: List[SocialMessage] = {
( for{
member <- this +: friends.toList
post <- member.posts
} yield post
).sorted
}
def post(message: String) = postMessage( new SocialMessage(memberName,message) )
def postMessage(message: SocialMessage) = posts += message
def follows(friend: SocialMember) {
if( friend != this ) friends += friend
}
}
|
flurdy/socialcrowd
|
src/main/scala/model/SocialMember.scala
|
Scala
|
mit
| 740 |
package slack
import akka.actor._
import slack.rtm.SlackRtmClient
import scala.concurrent.duration._
object Main extends App {
val token = "..."
implicit val system = ActorSystem("slack")
implicit val ec = system.dispatcher
val client = SlackRtmClient(token)
val selfId = client.state.self.id
client.onEvent { event =>
system.log.info("Received new event: {}", event)
/*
val mentionedIds = SlackUtil.extractMentionedIds(message.text)
if (mentionedIds.contains(selfId)) {
client.sendMessage(message.channel, s"<@${message.user}>: Hey!")
}
*/
}
}
|
weirded/slack-scala-client
|
src/main/scala/slack/Main.scala
|
Scala
|
mit
| 594 |
package juju.messages
import org.scalatest.{FunSuiteLike, Matchers}
case class DoSomething(foo: String) extends Command
case class SomethingDone(foo: String) extends DomainEvent
class MessageSpec extends FunSuiteLike with Matchers {
test("Created Command has a timestamp with default value") {
DoSomething("fake").timestamp.before(new java.util.Date)
}
test("Created Event has a timestamp with default value") {
SomethingDone("fake").timestamp.before(new java.util.Date)
}
}
|
brokersquare/juju
|
core/src/test/scala/juju/messages/MessagesSpec.scala
|
Scala
|
apache-2.0
| 495 |
package org.scalaide.ui.internal.editor.decorators.implicits
import org.scalaide.core.extensions.SemanticHighlightingParticipant
/** This class is referenced through plugin.xml. */
class ImplicitHighlightingParticipant extends SemanticHighlightingParticipant(
viewer => new ImplicitHighlightingPresenter(viewer))
|
Kwestor/scala-ide
|
org.scala-ide.sdt.core/src/org/scalaide/ui/internal/editor/decorators/implicits/ImplicitHighlightingParticipant.scala
|
Scala
|
bsd-3-clause
| 318 |
import leon.lang._
object Arithmetic {
/* VSTTE 2008 - Dafny paper */
def mult(x : BigInt, y : BigInt): BigInt = ({
var r: BigInt = 0
if(y < 0) {
var n = y
(while(n != 0) {
r = r - x
n = n + 1
}) invariant(r == x * (y - n) && 0 <= -n)
} else {
var n = y
(while(n != 0) {
r = r + x
n = n - 1
}) invariant(r == x * (y - n) && 0 <= n)
}
r
}) ensuring(_ == x*y)
/* VSTTE 2008 - Dafny paper */
def add(x : BigInt, y : BigInt): BigInt = ({
var r = x
if(y < 0) {
var n = y
(while(n != 0) {
r = r - 1
n = n + 1
}) invariant(r == x + y - n && 0 <= -n)
} else {
var n = y
(while(n != 0) {
r = r + 1
n = n - 1
}) invariant(r == x + y - n && 0 <= n)
}
r
}) ensuring(_ == x+y)
/* VSTTE 2008 - Dafny paper */
def addBuggy(x : BigInt, y : BigInt): BigInt = ({
var r = x
if(y < 0) {
var n = y
(while(n != 0) {
r = r + 1
n = n + 1
}) invariant(r == x + y - n && 0 <= -n)
} else {
var n = y
(while(n != 0) {
r = r + 1
n = n - 1
}) invariant(r == x + y - n && 0 <= n)
}
r
}) ensuring(_ == x+y)
def sum(n: BigInt): BigInt = {
require(n >= 0)
var r: BigInt = 0
var i: BigInt = 0
(while(i < n) {
i = i + 1
r = r + i
}) invariant(r >= i && i >= 0 && r >= 0)
r
} ensuring(_ >= n)
def divide(x: BigInt, y: BigInt): (BigInt, BigInt) = {
require(x >= 0 && y > 0)
var r = x
var q = BigInt(0)
(while(r >= y) {
r = r - y
q = q + 1
}) invariant(x == y*q + r && r >= 0)
(q, r)
} ensuring(res => x == y*res._1 + res._2 && res._2 >= 0 && res._2 < y)
}
|
regb/leon
|
testcases/verification/xlang/math/Arithmetic.scala
|
Scala
|
gpl-3.0
| 1,787 |
/**
* Copyright 2013, 2016 Gianluca Amato
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of a
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.utils.numberext
import org.scalatest.FunSuite
import org.scalatest.prop.PropertyChecks
import org.scalacheck.Gen
/**
* Test suite for extended rationals.
* @author Gianluca Amato <[email protected]>
*
*/
class RationalExtSuite extends FunSuite with PropertyChecks {
import RationalExt._
test("equality on infinities") {
assert(PositiveInfinity == PositiveInfinity, "+Inf = +Inf")
assert(NegativeInfinity == NegativeInfinity, "-Inf = -Inf")
assert(PositiveInfinity != NegativeInfinity, "+Inf != -Inf")
assert(NegativeInfinity != PositiveInfinity, "-Inf != +Inf")
assert(NegativeInfinity != NaN, "-Inf != NaN")
assert(PositiveInfinity != NaN, "+Inf != NaN")
assert(NaN != NaN, "NaN != NaN")
assert(NaN != NegativeInfinity, "NaN != +Inf")
assert(NaN != PositiveInfinity, "NaN != +Inf")
assert(NaN != RationalExt(2), "NaN != 2")
}
test("equality on regular numbers") {
val n1 = RationalExt(2)
val n2 = RationalExt(2)
val n3 = RationalExt(3)
assert(n1 === n1)
assert(n1 === n2)
assert(n1 != n3)
assert(n2 == n1)
assert(n2 == n2)
assert(n2 != n3)
assert(n3 != n1)
assert(n3 != n2)
assert(n3 == n3)
}
test("inequalities with infinities") {
assert(PositiveInfinity <= PositiveInfinity, "+Inf <= +Inf")
assert(! (PositiveInfinity <= NegativeInfinity), "not +Inf <= -Inf")
assert(NegativeInfinity <= PositiveInfinity, "-Inf <= +Inf")
assert(NegativeInfinity <= NegativeInfinity, "-Inf <= -Inf")
assert(! (PositiveInfinity < PositiveInfinity), "not +Inf < +Inf")
assert(! (PositiveInfinity < NegativeInfinity), "not +Inf < -Inf")
assert(NegativeInfinity < PositiveInfinity, "-Inf < +Inf")
assert(! (NegativeInfinity < NegativeInfinity), "not -Inf < -Inf")
assert(! (NaN <= PositiveInfinity))
assert(! (NaN < PositiveInfinity))
assert(! (NaN <= NegativeInfinity))
assert(! (NaN < NegativeInfinity))
assert(! (PositiveInfinity <= NaN))
assert(! (PositiveInfinity < NaN))
assert(! (NegativeInfinity <= NaN))
assert(! (NegativeInfinity < NaN))
forAll { (w: Int) =>
val rw = RationalExt(w)
assert(rw <= PositiveInfinity)
assert(rw < PositiveInfinity)
assert(NegativeInfinity <= rw)
assert(NegativeInfinity < rw)
}
}
test("binary operations on regular numbers") {
forAll(Gen.choose(-10000, 10000), Gen.choose(-10000, 10000)) { (w: Int, h: Int) =>
val rw = RationalExt(w)
val rh = RationalExt(h)
assertResult(RationalExt(w + h))(rw + rh)
assertResult(RationalExt(w - h))(rw - rh)
assertResult(RationalExt(w * h))(rw * rh)
}
}
test("unary operations in regular numbers") {
forAll(Gen.choose(Int.MinValue + 1, Int.MaxValue)) { (w: Int) =>
val rw = RationalExt(w)
assertResult(rw)(+rw)
assertResult(RationalExt(-w))(-rw)
assertResult(RationalExt(Math.abs(w)))(rw.abs)
}
}
test("sum on infinities") {
forAll { (x: Int) =>
val rx = RationalExt(x)
assertResult(PositiveInfinity) { rx + PositiveInfinity }
assertResult(PositiveInfinity) { PositiveInfinity + rx }
assertResult(NegativeInfinity) { rx + NegativeInfinity }
assertResult(NegativeInfinity) { NegativeInfinity + rx }
assertResult(NaN) { NaN + rx}
assertResult(NaN) { rx + NaN }
}
assertResult(PositiveInfinity) { PositiveInfinity + PositiveInfinity }
assertResult(NegativeInfinity) { NegativeInfinity + NegativeInfinity }
assertResult(NaN) { PositiveInfinity + NegativeInfinity }
assertResult(NaN) { NegativeInfinity + PositiveInfinity }
assertResult(NaN) { NaN + NaN }
}
test("differences on infinities") {
forAll { (x: Int) =>
val rx = RationalExt(x)
assertResult(NegativeInfinity) { rx - PositiveInfinity }
assertResult(PositiveInfinity) { PositiveInfinity - rx }
assertResult(PositiveInfinity) { rx - NegativeInfinity }
assertResult(NegativeInfinity) { NegativeInfinity - rx }
assertResult(NaN) { NaN - rx }
assertResult(NaN) { rx - NaN }
}
assertResult(NaN) { PositiveInfinity - PositiveInfinity }
assertResult(NaN) { NegativeInfinity - NegativeInfinity }
assertResult(PositiveInfinity) { PositiveInfinity - NegativeInfinity }
assertResult(NegativeInfinity) { NegativeInfinity - PositiveInfinity }
assertResult(NaN) { NaN - NaN }
}
test("mkString method") {
assertResult("12.34") { RationalExt(1234,100).mkString(2,java.math.RoundingMode.FLOOR) }
assertResult("12.3") { RationalExt(1234,100).mkString(1,java.math.RoundingMode.FLOOR) }
assertResult("12.4") { RationalExt(1234,100).mkString(1,java.math.RoundingMode.CEILING) }
}
test("toString method") {
forAll { (w: Int) =>
assertResult(w.toString)(RationalExt(w).toString)
}
assertResult("Infinity") { PositiveInfinity.toString }
assertResult("-Infinity") { NegativeInfinity.toString }
assertResult("NaN") { NaN.toString }
}
}
|
amato-gianluca/Jandom
|
core/src/test/scala/it/unich/jandom/utils/numberext/RationalExtSuite.scala
|
Scala
|
lgpl-3.0
| 5,790 |
/**
* Illustrates filtering and union to extract lines with "error" or "warning"
*/
package com.oreilly.learningsparkexamples.scala
import org.apache.spark._
import org.apache.spark.SparkContext._
object BasicFilterUnionCombo {
def main(args: Array[String]) {
val conf = new SparkConf
conf.setMaster(args(0))
val sc = new SparkContext(conf)
val inputRDD = sc.textFile(args(1))
val errorsRDD = inputRDD.filter(_.contains("error"))
val warningsRDD = inputRDD.filter(_.contains("error"))
val badLinesRDD = errorsRDD.union(warningsRDD)
println(badLinesRDD.collect().mkString("\\n"))
}
}
|
holdenk/learning-spark-examples
|
src/main/scala/com/oreilly/learningsparkexamples/scala/BasicFilterUnionCombo.scala
|
Scala
|
mit
| 642 |
package com.github.jeroenr.tepkin.protocol.command
import com.github.jeroenr.bson.BsonDsl._
import com.github.jeroenr.bson.{BsonDocument, BsonDsl}
/**
* The findAndModify command modifies and returns a single document. By default, the returned document does not include
* the modifications made on the update. To return the document with the modifications made on the update,
* use the new option.
*
* @param collectionName The collection against which to run the command.
* @param query Optional. The selection criteria for the modification. Although the query may match multiple documents,
* findAndModify will only select one document to modify.
* @param sort Optional. Determines which document the operation modifies if the query selects multiple documents.
* findAndModify modifies the first document in the sort order specified by this argument.
* @param removeOrUpdate Must specify either the remove or the update field. Remove removes the document specified
* in the query field. Set this to true to remove the selected document. The default is false.
* Update performs an update of the selected document. The update field employs the same update
* operators or field: value specifications to modify the selected document.
*
* @param returnNew Optional. When true, returns the modified document rather than the original. The findAndModify
* method ignores the new option for remove operations. The default is false.
* @param fields Optional. A subset of fields to return. The fields document specifies an inclusion of a field with 1.
* @param upsert Optional. Used in conjunction with the update field. When true, findAndModify creates a new document
* if no document matches the query, or if documents match the query, findAndModify performs an update.
* To avoid multiple upserts, ensure that the query fields are uniquely indexed. The default is false.
*/
case class FindAndModify(databaseName: String,
collectionName: String,
query: Option[BsonDocument] = None,
sort: Option[BsonDocument] = None,
removeOrUpdate: Either[Boolean, BsonDocument],
returnNew: Boolean = false,
fields: Option[Seq[String]] = None,
upsert: Boolean = false) extends Command {
override val command: BsonDocument = {
("findAndModify" := collectionName) ~
("query" := query) ~
("sort" := sort) ~
(removeOrUpdate match {
case Left(remove) => "remove" := remove
case Right(update) => "update" := update
}) ~
("new" := returnNew) ~
fields.map(fields => "fields" := $document(fields.map(_ := 1): _*)) ~
("upsert" := upsert)
}
}
|
jeroenr/tepkin
|
tepkin/src/main/scala/com/github/jeroenr/tepkin/protocol/command/FindAndModify.scala
|
Scala
|
apache-2.0
| 2,910 |
package slamdata.engine.physical.mongodb
sealed trait SortType {
def bson: Bson = this match {
case Ascending => Bson.Int32(1)
case Descending => Bson.Int32(-1)
}
}
case object Ascending extends SortType
case object Descending extends SortType
|
mossprescott/slamengine
|
src/main/scala/slamdata/engine/physical/mongodb/sort.scala
|
Scala
|
agpl-3.0
| 256 |
package io.udash.demos.rest
import io.udash._
import io.udash.demos.rest.model.{ContactId, PhoneBookId}
sealed abstract class RoutingState(val parentState: Option[ContainerRoutingState]) extends State {
type HierarchyRoot = RoutingState
def url(implicit application: Application[RoutingState]): String =
s"${application.matchState(this).value}"
}
sealed abstract class ContainerRoutingState(parentState: Option[ContainerRoutingState]) extends RoutingState(parentState) with ContainerState
sealed abstract class FinalRoutingState(parentState: Option[ContainerRoutingState]) extends RoutingState(parentState) with FinalState
object RootState extends ContainerRoutingState(None)
object ErrorState extends FinalRoutingState(Some(RootState))
case object IndexState extends FinalRoutingState(Some(RootState))
case class ContactFormState(id: Option[ContactId] = None) extends FinalRoutingState(Some(RootState))
case class PhoneBookFormState(id: Option[PhoneBookId] = None) extends FinalRoutingState(Some(RootState))
|
UdashFramework/udash-demos
|
rest-akka-http/frontend/src/main/scala/io/udash/demos/rest/states.scala
|
Scala
|
gpl-3.0
| 1,022 |
/*
The MIT License (MIT)
Copyright (c) 2013 Marco Rico Gomez <http://mrico.eu>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package asyncdbx
import akka.actor.Actor
import akka.actor.ActorRef
import akka.io.IO
import akka.pattern.{ask, pipe}
import akka.util.Timeout
import spray.http._
import spray.can.Http
import HttpMethods._
import HttpHeaders._
object OAuthHandler {
case class Subscribe(state: String, actor: ActorRef)
case class Authorized(code: String, state: String)
}
class OAuthHandler extends Actor {
import OAuthHandler._
import context.system
val settings = Settings(context.system)
var subscriptions = Map.empty[String, ActorRef]
def receive = {
case Subscribe(state, actor) =>
subscriptions += state -> actor
case _: Http.Connected =>
sender ! Http.Register(self)
case req@HttpRequest(GET, settings.OAuth2RedirectUri.path, _, _, _) =>
val query = req.uri.query
sender ! HttpResponse(status = 200, entity = "Ok")
for {
code <- query.get("code")
state <- query.get("state")
} yield {
subscriptions.get(state) foreach { subscriper =>
subscriper ! Authorized(code, state)
subscriptions -= state
}
}
case _: HttpRequest =>
sender ! HttpResponse(status = 404, entity = "Unknown resource!")
}
override def preStart() {
IO(Http) ! Http.Bind(self, interface = "localhost", port = 8082)
}
override def postStop() {
IO(Http) ! Http.Unbind
}
}
|
mrico/async-dbx-client
|
src/main/scala/OAuthHandler.scala
|
Scala
|
mit
| 2,491 |
/*
* Copyright 2015 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.svaractor.semantictrait.example
/**
*
*
* Created by dennis on 27.09.15.
*/
import simplex3d.math.floatx.Vec3f
import simx.core.entity.Entity
import simx.core.entity.description.SValSet
import simx.core.ontology.types._
import simx.core.svaractor.SVarActor
import simx.core.svaractor.handlersupport.Types.CPSRet
import simx.core.svaractor.semantictrait.base._
import simx.core.svaractor.semantictrait.example.traits.Vehicle
import simx.core.svaractor.semantictrait.example.relations.{affectedBy, has}
import simx.core.svaractor.semantictrait.example.types.{SemanticEntity, Shape, Anything, Location, SteeringBehavior}
import simx.core.svaractor.unifiedaccess.EntityUpdateHandling
import scala.language.reflectiveCalls
import scala.util.continuations
import Iterate.wrapIt
/**
* Created by dwiebusch on 27.11.14
*/
object Test2{
def test(v : Radius): Unit ={
}
val r = Radius(1)
val a = Angle(1)
test(r)
def main(args: Array[java.lang.String]) {
SVarActor.createActor(new SVarActor with EntityUpdateHandling {
override protected def removeFromLocalRep(e: Entity){}
/**
* called when the actor is started
*/
override protected def startUp() = continuations.reset {
simx.core.ontology.types.init()
val e = SemanticEntity(new Entity())
val wheels = for (i <- 1 to 4) yield SemanticEntity(new Entity())
Iterate over wheels foreach { wheel =>
wheel modify has(Shape("round")) set Position set Scale apply()
e set has(wheel)
}
val carEntity = e modify
has(SteeringBehavior) set
Gravity set
Scale set
Anything set
Position2D set
affectedBy(Gravity(Vec3f.Zero)) set
Container(SValSet()) apply()
if ( e isA Vehicle ) {
//println((Vehicle(carEntity) attain has(Radius(2))).get.get(Radius))
Vehicle(carEntity) moveTo Location("Wuerzburg")
}
}
})
}
}
case class X(in : Semantic.Entity[_ <: Thing]){
def isA(a : SpecificSemanticTrait[_ <: Thing])(implicit context : EntityUpdateHandling) : scala.Boolean@CPSRet =
a.tryApply(in).isDefined
}
object Iterate{
implicit def wrapIt[L <: Thing](in : Semantic.Entity[L]) : X = X(in)
def over[T](i : Iterable[T]) = Iterate(i)
}
case class Iterate[T](i : Iterable[T]) {
def foreach[U](handler: T => U@CPSRet): Unit@CPSRet = if (i.nonEmpty) {
handler(i.head)
Iterate(i.tail).foreach(handler)
}
}
object Test3{
def test(v : Radius): Unit ={
// println(v)
}
val r = Radius(1)
val a = Angle(1)
test(r)
import simplex3d.math.float.{Vec3, Mat4}
def main(args: Array[java.lang.String]) {
simx.core.ontology.types.init()
// SetLocationAction -> SetRadiusAction
SVarActor.createActor(new SVarActor with EntityUpdateHandling {
override protected def removeFromLocalRep(e: Entity){}
/**
* called when the actor is started
*/
override protected def startUp() = continuations.reset {
val e1 = SemanticEntity(new Entity())
val walls = for (i <- 1 to 4) yield SemanticEntity(new Entity())
Iterate over walls foreach { wall =>
wall modify has(Material("Concrete")) set Position apply()
wall set Scale(Mat4.Identity)
e1 set has(wall)
}
val house = e1.modify(Position(Vec3.Zero)).
set(Scale(Mat4.Identity)).apply
If (e1 isA House)
House(house) highlight types.Location("Door")
}
})
}
}
object ConcreteWall extends SemanticTrait(Position :: Scale, has(Material("Concrete")))
object House extends SpecificSemanticTrait( Position :: Scale, has(ConcreteWall) ){
final type SpecificEntityType = SemanticEntity{ def highlight(s : Location.ValueType) }
protected def createEntity(e: SemanticEntity.ValueType)(implicit creator : SVarActor) = new SemanticEntity(e) {
def highlight(s : Location.ValueType) =
println(s + " of entity " + entity + " is now highlighted")
}
}
|
simulator-x/core
|
src/simx/core/svaractor/semantictrait/example/Test2.scala
|
Scala
|
apache-2.0
| 4,946 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis
import org.orbeon.oxf.xforms.analysis.controls.{AttributeControl, ValueTrait, RepeatControl}
import model.Model
import org.orbeon.oxf.xforms.XFormsConstants
import org.orbeon.oxf.xforms.XFormsUtils.{getElementId, maybeAVT}
import org.dom4j.{QName, Element}
import org.orbeon.oxf.xml.{NamespaceMapping, ContentHandlerHelper}
import org.orbeon.oxf.xml.XMLConstants.XML_LANG_QNAME
import org.orbeon.oxf.xml.dom4j.{Dom4jUtils, LocationData, ExtendedLocationData}
import org.orbeon.oxf.xforms.xbl.Scope
import org.orbeon.oxf.util.ScalaUtils.stringOptionToSet
import org.orbeon.oxf.xforms.event.XFormsEvent.{Bubbling, Target, Capture, Phase}
import org.orbeon.oxf.xforms.event.EventHandler
import collection.mutable
import util.control.Breaks
import org.orbeon.oxf.xforms.XFormsConstants._
// xml:lang reference
sealed abstract class LangRef
case class LiteralLangRef(lang: String) extends LangRef
case class AVTLangRef(att: AttributeControl) extends LangRef
/**
* Abstract representation of a common XForms element supporting optional context, binding and value.
*/
abstract class ElementAnalysis(
val part: PartAnalysisImpl,
val element: Element,
val parent: Option[ElementAnalysis],
val preceding: Option[ElementAnalysis])
extends ElementEventHandlers
with ElementRepeats {
self ⇒
import ElementAnalysis._
require(element ne null)
implicit def logger = part.getIndentedLogger
// xml:lang, inherited from parent unless overridden locally
lazy val lang: Option[LangRef] = {
val v = element.attributeValue(XML_LANG_QNAME)
if (v ne null) {
if (! v.startsWith("#"))
Some(LiteralLangRef(v))
else {
val staticId = v.substring(1)
val prefixedId = scope.prefixedIdForStaticId(staticId)
Some(AVTLangRef(part.getAttributeControl(prefixedId, "xml:lang")))
}
} else
parent flatMap (_.lang)
}
val namespaceMapping: NamespaceMapping
// Element local name
def localName = element.getName
// Scope and model
val scope: Scope
val model: Option[Model]
// In-scope variables (for XPath analysis)
val inScopeVariables: Map[String, VariableTrait]
def removeFromParent() =
parent foreach
{ case parent: ChildrenBuilderTrait ⇒ parent.removeChild(self); case _ ⇒ }
lazy val treeInScopeVariables: Map[String, VariableTrait] = {
def findPreceding(element: ElementAnalysis): Option[ElementAnalysis] = element.preceding match {
case Some(preceding) if preceding.scope == self.scope ⇒ Some(preceding)
case Some(preceding) ⇒ findPreceding(preceding)
case None ⇒ element.parent match {
case Some(parent: Model) ⇒
None // models are not allowed to see outside variables for now (could lift this restriction later)
case Some(parent) ⇒ findPreceding(parent)
case _ ⇒ None
}
}
findPreceding(self) match {
case Some(preceding: VariableAnalysisTrait) ⇒ preceding.treeInScopeVariables + (preceding.name → preceding)
case Some(preceding) ⇒ preceding.treeInScopeVariables
case None ⇒ Map.empty
}
}
// Definition of the various scopes:
//
// - Container scope: scope defined by the closest ancestor XBL binding. This scope is directly related to the
// prefix of the prefixed id. E.g. <fr:foo id="my-foo"> defines a new scope `my-foo`. All children of `my-foo`,
// including directly nested handlers, models, shadow trees, have the `my-foo` prefix.
//
// - Inner scope: this is the scope given this control if this control has `xxbl:scope='inner'`. It is usually the
// same as the container scope, except for directly nested handlers.
//
// - Outer scope: this is the scope given this control if this control has `xxbl:scope='outer'`. It is usually the
// actual scope of the closest ancestor XBL bound element, except for directly nested handlers.
def containerScope: Scope
// Ids
val staticId = getElementId(element)
val prefixedId = scope.prefixedIdForStaticId(staticId) // NOTE: we could also pass the prefixed id during construction
// Location
val locationData = ElementAnalysis.createLocationData(element)
// Element attributes: @context, @ref, @bind, @value
val context = Option(element.attributeValue(XFormsConstants.CONTEXT_QNAME))
val ref = ElementAnalysis.getBindingExpression(element)
val bind = Option(element.attributeValue(XFormsConstants.BIND_QNAME))
val value = Option(element.attributeValue(XFormsConstants.VALUE_QNAME))
def modelJava = model map (_.staticId) orNull
def contextJava = context.orNull
def refJava = ref.orNull
def bindJava = bind.orNull
// Other
def hasBinding = ref.isDefined || bind.isDefined
val bindingXPathEvaluations = (if (context.isDefined) 1 else 0) + (if (ref.isDefined) 1 else 0)// 0, 1, or 2: number of XPath evaluations used to resolve the binding if no optimization is taking place
// Classes (not used at this time)
val classes = ""
// Extension attributes
protected def allowedExtensionAttributes = Set[QName]()
final lazy val extensionAttributes = Map() ++ (CommonExtensionAttributes ++ allowedExtensionAttributes map (qName ⇒ (qName, element.attributeValue(qName))) filter (_._2 ne null))
final lazy val nonRelevantExtensionAttributes = extensionAttributes map { case (k, v) ⇒ k → (if (maybeAVT(v)) "" else v) } // all blank values for AVTs
// XPath analysis
private var contextAnalysis: Option[XPathAnalysis] = None
private var _contextAnalyzed = false
private var bindingAnalysis: Option[XPathAnalysis] = None
private var _bindingAnalyzed = false
private var valueAnalysis: Option[XPathAnalysis] = None
private var _valueAnalyzed = false
def valueAnalyzed = _valueAnalyzed
final def getContextAnalysis = { assert(_contextAnalyzed); contextAnalysis }
final def getBindingAnalysis = { assert(_bindingAnalyzed); bindingAnalysis }
final def getValueAnalysis = { assert(_valueAnalyzed) ; valueAnalysis }
def analyzeXPath(): Unit = {
contextAnalysis = computeContextAnalysis
_contextAnalyzed = true
bindingAnalysis = computeBindingAnalysis
_bindingAnalyzed = true
valueAnalysis = computeValueAnalysis
_valueAnalyzed = true
}
// To implement in subclasses
protected def computeContextAnalysis: Option[XPathAnalysis]
protected def computeBindingAnalysis: Option[XPathAnalysis]
protected def computeValueAnalysis: Option[XPathAnalysis]
/**
* Return the context within which children elements or values evaluate. This is the element binding if any, or the
* element context if there is no binding.
*/
def getChildrenContext: Option[XPathAnalysis] = if (hasBinding) getBindingAnalysis else getContextAnalysis
val closestAncestorInScope = ElementAnalysis.getClosestAncestorInScope(self, scope)
def toXMLAttributes: Seq[(String, String)] = Seq(
"scope" → scope.scopeId,
"prefixed-id" → prefixedId,
"model-prefixed-id" → (model map (_.prefixedId) orNull),
"binding" → hasBinding.toString,
"value" → self.isInstanceOf[ValueTrait].toString,
"name" → element.attributeValue("name")
)
def toXMLContent(helper: ContentHandlerHelper): Unit = {
// Control binding and value analysis
if (_bindingAnalyzed)
getBindingAnalysis match {
case Some(bindingAnalysis) if hasBinding ⇒ // NOTE: for now there can be a binding analysis even if there is no binding on the control (hack to simplify determining which controls to update)
helper.startElement("binding")
bindingAnalysis.toXML(helper)
helper.endElement()
case _ ⇒ // NOP
}
if (_valueAnalyzed)
getValueAnalysis match {
case Some(valueAnalysis) ⇒
helper.startElement("value")
valueAnalysis.toXML(helper)
helper.endElement()
case _ ⇒ // NOP
}
}
final def toXML(helper: ContentHandlerHelper): Unit = {
helper.startElement(localName, toXMLAttributes flatMap (t ⇒ Seq(t._1, t._2)) toArray)
toXMLContent(helper)
helper.endElement()
}
def freeTransientState() {
if (_contextAnalyzed && getContextAnalysis.isDefined)
getContextAnalysis.get.freeTransientState()
if (_bindingAnalyzed && getBindingAnalysis.isDefined)
getBindingAnalysis.get.freeTransientState()
if (_valueAnalyzed && getValueAnalysis.isDefined)
getValueAnalysis.get.freeTransientState()
}
}
trait ElementEventHandlers {
element: ElementAnalysis ⇒
import ElementAnalysis._
import propagateBreaks.{break, breakable}
// Event handler information as a tuple:
// - whether the default action needs to run
// - all event handlers grouped by phase and observer prefixed id
private type HandlerAnalysis = (Boolean, Map[Phase, Map[String, List[EventHandler]]])
// Cache for event handlers
// Use an immutable map and @volatile so that update are published to other threads accessing this static state.
// NOTE: We could use `AtomicReference` but we just get/set so there is no benefit to it.
@volatile private var handlersCache: Map[String, HandlerAnalysis] = Map()
// Return event handler information for the given event name
// We check the cache first, and if not found we compute the result and cache it.
//
// There is a chance that concurrent writers could overwrite each other's latest cache addition, but
// `handlersForEventImpl` is idempotent so this should not be an issue, especially since a document usually has many
// `ElementAnalysis` which means the likelihood of writing to the same `ElementAnalysis` concurrently is low. Also,
// after a while, most handlers will be memoized, which means no more concurrent writes, only concurrent reads.
// Finally, `handlersForEventImpl` is not quick but also not very costly.
//
// Other options include something like `Memoizer` from "Java Concurrency in Practice" (5.6), possibly modified to
// use Scala 2.10 `TrieMap` and `Future`. However a plain immutable `Map` might be more memory-efficient.
//
// Reasoning is great but the only way to know for sure what's best would be to run a solid performance test of the
// options.
def handlersForEvent(eventName: String): HandlerAnalysis =
handlersCache.get(eventName) getOrElse {
val result = handlersForEventImpl(eventName)
handlersCache += eventName → result
result
}
private def handlersForObserver(observer: ElementAnalysis) =
observer.part.getEventHandlers(observer.prefixedId)
private def hasPhantomHandler(observer: ElementAnalysis) =
handlersForObserver(observer) exists (_.isPhantom)
// Find all observers (including in ancestor parts) which either match the current scope or have a phantom handler
private def relevantObservers: List[ElementAnalysis] = {
def observersInAncestorParts =
part.elementInParent.toList flatMap (_.relevantObservers)
def relevant(observer: ElementAnalysis) =
observer.scope == element.scope || hasPhantomHandler(observer)
(ancestorOrSelfIterator(element) filter relevant) ++: observersInAncestorParts
}
// Find all the handlers for the given event name
// For all relevant observers, find the handlers which match by phase
private def handlersForEventImpl(eventName: String): HandlerAnalysis = {
def relevantHandlersForObserverByPhaseAndName(observer: ElementAnalysis, phase: Phase) = {
val isPhantom = observer.scope != element.scope
def matchesPhaseNameTarget(eventHandler: EventHandler) =
(eventHandler.isCapturePhaseOnly && phase == Capture ||
eventHandler.isTargetPhase && phase == Target ||
eventHandler.isBubblingPhase && phase == Bubbling) && eventHandler.isMatchByNameAndTarget(eventName, element.prefixedId)
def matches(eventHandler: EventHandler) =
if (isPhantom)
eventHandler.isPhantom && matchesPhaseNameTarget(eventHandler)
else
matchesPhaseNameTarget(eventHandler)
val relevantHandlers = handlersForObserver(observer) filter matches
// DOM 3:
//
// - stopPropagation: "Prevents other event listeners from being triggered but its effect must be deferred
// until all event listeners attached on the Event.currentTarget have been triggered."
// - preventDefault: "the event must be canceled, meaning any default actions normally taken by the
// implementation as a result of the event must not occur"
// - NOTE: DOM 3 introduces also stopImmediatePropagation
val propagate = relevantHandlers forall (_.isPropagate)
val performDefaultAction = relevantHandlers forall (_.isPerformDefaultAction)
(propagate, performDefaultAction, relevantHandlers)
}
var propagate = true
var performDefaultAction = true
def handlersForPhase(observers: List[ElementAnalysis], phase: Phase) = {
val result = mutable.Map[String, List[EventHandler]]()
breakable {
for (observer ← observers) {
val (localPropagate, localPerformDefaultAction, handlersToRun) =
relevantHandlersForObserverByPhaseAndName(observer, phase)
propagate &= localPropagate
performDefaultAction &= localPerformDefaultAction
if (handlersToRun.nonEmpty)
result += observer.prefixedId → handlersToRun
// Cancel propagation if requested
if (! propagate)
break()
}
}
if (result.nonEmpty)
Some(phase → result.toMap)
else
None
}
val observers = relevantObservers
val captureHandlers =
handlersForPhase(observers.reverse.init, Capture)
val targetHandlers =
if (propagate)
handlersForPhase(List(observers.head), Target)
else
None
val bubblingHandlers =
if (propagate)
handlersForPhase(observers.tail, Bubbling)
else
None
(performDefaultAction, Map() ++ captureHandlers ++ targetHandlers ++ bubblingHandlers)
}
}
trait ElementRepeats {
element: ElementAnalysis ⇒
// This control's ancestor repeats, computed on demand
lazy val ancestorRepeats: List[RepeatControl] =
parent match {
case Some(parentRepeat: RepeatControl) ⇒ parentRepeat :: parentRepeat.ancestorRepeats
case Some(parentElement) ⇒ parentElement.ancestorRepeats
case None ⇒ Nil
}
// Same as ancestorRepeats but across parts
lazy val ancestorRepeatsAcrossParts: List[RepeatControl] =
part.elementInParent match {
case Some(elementInParentPart) ⇒ ancestorRepeats ::: elementInParentPart.ancestorRepeatsAcrossParts
case None ⇒ ancestorRepeats
}
// This control's closest ancestor in the same scope
// NOTE: This doesn't need to go across parts, because parts don't share scopes at this time.
lazy val ancestorRepeatInScope = ancestorRepeats find (_.scope == scope)
// Whether this is within a repeat
def isWithinRepeat = ancestorRepeatsAcrossParts.nonEmpty
}
object ElementAnalysis {
val CommonExtensionAttributes = Set(STYLE_QNAME, CLASS_QNAME)
val propagateBreaks = new Breaks
/**
* Return the closest preceding element in the same scope.
*
* NOTE: As in XPath, this does not include ancestors of the element.
*/
def getClosestPrecedingInScope(element: ElementAnalysis)(scope: Scope = element.scope): Option[ElementAnalysis] = element.preceding match {
case Some(preceding) if preceding.scope == scope ⇒ Some(preceding)
case Some(preceding) ⇒ getClosestPrecedingInScope(preceding)(scope)
case None ⇒ element.parent match {
case Some(parent) ⇒ getClosestPrecedingInScope(parent)(scope)
case _ ⇒ None
}
}
abstract class IteratorBase(start: ElementAnalysis) extends Iterator[ElementAnalysis] {
def initialNext: Option[ElementAnalysis]
def subsequentNext(e: ElementAnalysis): Option[ElementAnalysis]
private[this] var theNext = initialNext
def hasNext = theNext.isDefined
def next() = {
val newResult = theNext.get
theNext = subsequentNext(newResult)
newResult
}
}
/**
* Return an iterator over all the element's ancestors.
*/
def ancestorIterator(start: ElementAnalysis) = new IteratorBase(start) {
def initialNext = start.parent
def subsequentNext(e: ElementAnalysis) = e.parent
}
/**
* Iterator over the element and all its ancestors.
*/
def ancestorOrSelfIterator(start: ElementAnalysis) = new IteratorBase(start) {
def initialNext = Option(start)
def subsequentNext(e: ElementAnalysis) = e.parent
}
/**
* Iterator over the element's preceding siblings.
*/
def precedingSiblingIterator(start: ElementAnalysis) = new IteratorBase(start) {
def initialNext = start.preceding
def subsequentNext(e: ElementAnalysis) = e.preceding
}
/**
* Return a list of ancestors in the same scope from leaf to root.
*/
def getAllAncestorsInScope(start: ElementAnalysis, scope: Scope): List[ElementAnalysis] =
ancestorIterator(start) filter (_.scope == scope) toList
/**
* Return a list of ancestor-or-self in the same scope from leaf to root.
*/
def getAllAncestorsOrSelfInScope(start: ElementAnalysis): List[ElementAnalysis] =
start :: getAllAncestorsInScope(start, start.scope)
/**
* Get the closest ancestor in the same scope.
*/
def getClosestAncestorInScope(start: ElementAnalysis, scope: Scope) =
ancestorIterator(start) find (_.scope == scope)
/**
* Return the first ancestor with a binding analysis that is in the same scope/model.
*/
def getClosestAncestorInScopeModel(start: ElementAnalysis, scopeModel: ScopeModel) =
ancestorIterator(start) find (e ⇒ ScopeModel(e.scope, e.model) == scopeModel)
/**
* Get the binding XPath expression from the @ref or (deprecated) @nodeset attribute.
*/
def getBindingExpression(element: Element): Option[String] =
Option(element.attributeValue(XFormsConstants.REF_QNAME)) orElse
Option(element.attributeValue(XFormsConstants.NODESET_QNAME))
def createLocationData(element: Element): ExtendedLocationData =
if (element ne null) new ExtendedLocationData(element.getData.asInstanceOf[LocationData], "gathering static information", element) else null
/**
* Get the value of an attribute containing a space-separated list of tokens as a set.
*/
def attSet(element: Element, qName: QName) =
stringOptionToSet(Option(element.attributeValue(qName)))
def attSet(element: Element, name: String) =
stringOptionToSet(Option(element.attributeValue(name)))
/**
* Get the value of an attribute containing a space-separated list of QNames as a set.
*/
def attQNameSet(element: Element, qName: QName, namespaces: NamespaceMapping) =
attSet(element, qName) map (Dom4jUtils.extractTextValueQName(namespaces.mapping, _, true))
}
|
evlist/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/analysis/ElementAnalysis.scala
|
Scala
|
lgpl-2.1
| 21,215 |
package ch.hepia.abaplans.server
//#user-registry-actor
import akka.actor.{ Actor, ActorLogging, Props }
import scala.concurrent.Future
import scala.util.{ Failure, Success, Try }
//#user-case-classes
final case class User(name: String, age: Int, countryOfResidence: String)
final case class Users(users: Seq[User])
//#user-case-classes
import slick.jdbc.MySQLProfile.api._
import org.joda.time.DateTime
import com.github.tototoshi.slick.MySQLJodaSupport._
final case class ArcgisMaps(seq: Seq[ArcgisMap])
final case class ArcgisMap(uid: Option[Int], public: Boolean, title: String, height: Int, width: Int, extent: String, graphics: Option[String], city: Boolean, creation: Option[DateTime])
final case class ArcgisMapRow(tag: Tag) extends Table[(Option[Int], Boolean, String, Int, Int, String, Option[String], Boolean, Option[DateTime])](tag, "Map") {
def uid = column[Option[Int]]("uid", O.PrimaryKey, O.AutoInc)
def public = column[Boolean]("public", O.Default(true))
def title = column[String]("title")
def height = column[Int]("height")
def width = column[Int]("width")
def extent = column[String]("extent")
def graphics = column[Option[String]]("graphics")
def city = column[Boolean]("city")
def creationDate = column[Option[DateTime]]("creationDate", O.SqlType("DATETIME"))
def * = (uid, public, title, height, width, extent, graphics, city, creationDate)
}
/*
case class User(id: Option[Int], first: String, last: String)
class Users(tag: Tag) extends Table[User](tag, "users") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def first = column[String]("first")
def last = column[String]("last")
def * = (id.?, first, last) <> (User.tupled, User.unapply)
}
val users = TableQuery[Users]
*/
object MapRegistryActor {
// Events
final case class ActionPerformed(description: String)
final case class MapCreated(id: Int)
// Commands
final case object GetMaps
final case class CreateMap(arcgisMap: ArcgisMap)
final case class GetMap(id: String)
final case class DeleteMap(id: String)
def props: Props = Props[MapRegistryActor]
}
class MapRegistryActor extends Actor with ActorLogging {
import MapRegistryActor._
import akka.pattern.pipe
import scala.concurrent.ExecutionContext.Implicits.global
val maps = TableQuery[ArcgisMapRow]
/*
val setup = DBIO.seq(
maps.schema.create,
maps += (None, true, "test", 333, 444, "{id: 33}", Some("<graphics>"), true, DateTime.now()),
maps += (None, true, "test", 333, 444, "{id: 33}", Some("<graphics>"), true, DateTime.now()),
maps += (None, true, "test", 333, 444, "{id: 33}", Some("<graphics>"), true, DateTime.now())
)
*/
val db = Database.forConfig("mysql")
// try {
//val setupFuture = db.run(setup)
//setupFuture.onComplete(println)
// } finally db.close()
var users = Set.empty[User]
def receive: Receive = {
case GetMaps =>
val res = db.run(maps.result).map(_.map(record => (ArcgisMap.apply _).tupled(record)))
res.map(ArcgisMaps).pipeTo(sender)
case CreateMap(map) =>
// Convert map to tuple
//maps += (None, true, "test", 333, 444, "{id: 33}", Some("<graphics>"), true, Some(DateTime.now()))
val newMap = map.copy(creation = Some(DateTime.now()))
val insertAction = (maps returning maps.map(_.uid) into ((map, id) => map.copy(_1 = id))) += ArcgisMap.unapply(newMap).get
val futureMap = db.run(insertAction)
futureMap map (m => MapCreated(m._1.getOrElse(0))) pipeTo sender()
case GetMap(id) =>
Try(id.toInt).toOption match {
case None => sender() ! None
case Some(i) =>
val q = maps.filter(_.uid === i)
val resultSeq: Future[Seq[ArcgisMap]] = db.run(q.result).map(_.map(record => (ArcgisMap.apply _).tupled(record)))
val result: Future[Option[ArcgisMap]] = resultSeq.map(_.headOption)
result.pipeTo(sender)
}
case DeleteMap(idMap) =>
Try(idMap.toInt) match {
case Failure(_) => sender() ! ActionPerformed(s"Map id $idMap must be an integer")
case Success(id) =>
val q = maps.filter(_.uid === id)
val affectedRows: Future[Int] = db.run(q.delete)
affectedRows.onComplete {
case Success(_) => sender() ! ActionPerformed(s"Map $id deleted")
case Failure(e) => sender() ! ActionPerformed(e.getMessage)
}
}
}
}
//#user-registry-actor
|
ABAPlan/abaplan-restapi
|
src/main/scala/ch/hepia/abaplans/server/MapRegistryActor.scala
|
Scala
|
mit
| 4,427 |
package weaponmark
final case class BenchmarkResults(
usesPerTurn: Int,
turns: Int,
private var hits: Int = 0,
private var misses: Int = 0,
private var zeroDamage: Int = 0,
private var botches: Int = 0,
private var damage: Int = 0) {
// validate constructor parameters
require(usesPerTurn > 0)
require(turns > 0)
require(hits >= 0)
require(misses >= 0)
require(zeroDamage >= 0)
require(botches >= 0)
require(damage >= 0)
// mutators to increment counts
def incZeroDamage(): Unit = zeroDamage += 1
def incBotches(): Unit = botches += 1
def incMisses(): Unit = misses += 1
def incHitsBy(h : Int): Unit = {
require(h >= 0) // accumulators don't go backwards
hits += h
}
def incDamageBy(d : Int): Unit = {
require(d >= 0) // accumulators don't go backwards
damage += d
}
// accessors for counts
def totalHits: Int = hits
def totalMisses: Int = misses
def totalZeroDamage: Int = zeroDamage
def totalBotches: Int = botches
def totalDamage: Int = damage
private val attemptedUses = usesPerTurn * turns
// statistic accessors
def hitsPerTurn: Double = hits.toDouble / turns
def damPerTurn: Double = damage.toDouble / turns
def hitsPerUse: Double = hits.toDouble / attemptedUses
def damPerUse: Double = damage.toDouble / attemptedUses
def pctMisses: Double = (misses.toDouble / attemptedUses) * 100
def pctBotches: Double = (botches.toDouble / attemptedUses) * 100
def pctZeroDam: Double = (zeroDamage.toDouble / attemptedUses) * 100
def pctIneffective: Double = pctMisses + pctBotches + pctZeroDam
}
|
locke8/weaponmark
|
src/main/scala/weaponmark/BenchmarkResults.scala
|
Scala
|
mit
| 1,701 |
package de.aderenbach.gatlingdsl.parser.simpledsl
import de.aderenbach.gatlingdsl.parser.{RampUp, Scenario, DslParser}
import io.gatling.core.Predef._
import io.gatling.core.structure.{ScenarioBuilder, PopulationBuilder}
import io.gatling.http.Predef._
import io.gatling.http.request.builder.HttpRequestBuilder
/**
* Created by Alexander Derenbach <[email protected]> on 09.10.15.
*/
case class SimpleDslSimlationBuilder(dslparser: DslParser) {
val simulation = dslparser.parse
lazy val name = simulation.name
lazy val baseUrl = simulation.baseUrl
implicit class Regex(sc: StringContext) {
def r = new util.matching.Regex(sc.parts.mkString, sc.parts.tail.map(_ => "x"): _*)
}
def addActions(scnBuilder: ScenarioBuilder, actions: List[String]): ScenarioBuilder = {
actions match {
case action :: rest => addActions(SimpleDsl.action(scnBuilder, action), rest)
case Nil => scnBuilder
}
}
def addUsers(userCount: Integer, rampUp: RampUp) = {
SimpleDsl.rampUp(userCount, rampUp.rampUp)
}
def populationBuilder: List[PopulationBuilder] = {
for (
scnConfig <- simulation.scenarioConfigList
) yield {
val scenarioBuilder = scenario(scnConfig.scenario.name)
addActions(scenarioBuilder, scnConfig.scenario.actions).inject(addUsers(scnConfig.userCount, scnConfig.rampUp))
}
}
}
|
aderenbach/gatlingDslDriver
|
src/main/scala/de/aderenbach/gatlingdsl/parser/simpledsl/SimpleDslSimlationBuilder.scala
|
Scala
|
mit
| 1,368 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn.security
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.security.Credentials
import org.apache.spark.{SparkConf, SparkFunSuite}
class YARNHadoopDelegationTokenManagerSuite extends SparkFunSuite {
private var credentialManager: YARNHadoopDelegationTokenManager = null
private var sparkConf: SparkConf = null
private var hadoopConf: Configuration = null
override def beforeAll(): Unit = {
super.beforeAll()
sparkConf = new SparkConf()
hadoopConf = new Configuration()
}
test("Correctly loads credential providers") {
credentialManager = new YARNHadoopDelegationTokenManager(sparkConf, hadoopConf, null)
assert(credentialManager.isProviderLoaded("yarn-test"))
}
}
class YARNTestCredentialProvider extends ServiceCredentialProvider {
override def serviceName: String = "yarn-test"
override def credentialsRequired(conf: Configuration): Boolean = true
override def obtainCredentials(
hadoopConf: Configuration,
sparkConf: SparkConf,
creds: Credentials): Option[Long] = None
}
|
hhbyyh/spark
|
resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/security/YARNHadoopDelegationTokenManagerSuite.scala
|
Scala
|
apache-2.0
| 1,898 |
package org.ferrit.dao.cassandra
import com.typesafe.config.Config
import com.datastax.driver.core.{Cluster, Session}
import com.datastax.driver.core.policies.Policies
import org.ferrit.core.model._
class CassandraPersistenceManager(config: CassandraConfig) {
val cluster: Cluster = CassandraPersistenceManager.initCluster(config)
val session: Session = cluster.connect(config.keyspace)
def shutdown():Unit = {
cluster.shutdown()
}
def getColumnTTL(config: Config):CassandraColumnTTL =
CassandraColumnTTL(
CassandraTables.AllTables.map({t =>
(t -> config.getInt(s"persistence.cassandra.tableColumnTTL.$t"))
}).toMap
)
}
object CassandraPersistenceManager {
/**
* Best to have this in an object, can be used in tests as well.
*/
def initCluster(config: CassandraConfig):Cluster =
Cluster.builder()
.addContactPoints(config.nodes.toArray: _*)
.withPort(config.port)
.withRetryPolicy(Policies.defaultRetryPolicy())
.build()
}
object CassandraTables {
val Crawler = "crawler"
val CrawlJobByCrawler = "crawl_job_by_crawler"
val CrawlJobByDate = "crawl_job_by_date"
val FetchLog = "fetch_log"
val Document = "document"
val DocumentMetaData = "document_metadata"
val AllTables = Seq(
//Crawler,
CrawlJobByCrawler,
CrawlJobByDate,
FetchLog,
Document,
DocumentMetaData
)
}
case class CassandraConfig(
keyspace: String,
nodes: Seq[String],
port: Int
)
|
reggoodwin/ferrit
|
src/main/scala/org/ferrit/dao/cassandra/CassandraPersistenceManager.scala
|
Scala
|
mit
| 1,496 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import scala.annotation.tailrec
class GenContainBase {
def translateLine(line: String, mapping: (String, String)*): String = {
@tailrec
def translate(current: String, itr: Iterator[(String, String)]): String =
if (itr.hasNext) {
val next = itr.next
translate(current.replaceAll(next._1, next._2), itr)
}
else
current
translate(line, mapping.toIterator)
}
val stringLowerCased = "val lowerCased: Uniformity[Char] = new Uniformity[Char] {\\n" +
" def normalized(c: Char): Char = c.toString.toLowerCase.toCharArray()(0)\\n" +
" def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[Char]\\n" +
" def normalizedOrSame(b: Any) =\\n" +
" b match {\\n" +
" case c: Char => normalized(c)\\n" +
" case _ => b\\n" +
" }\\n" +
" }"
val mapLowerCased = "val lowerCased: Uniformity[(String, String)] = new Uniformity[(String, String)] {\\n" +
" def normalized(s: (String, String)): (String, String) = (s._1.toLowerCase, s._2.toLowerCase)\\n" +
" def normalizedCanHandle(b: Any): Boolean =\\n" +
" b match {\\n" +
" case (_: String, _: String) => true\\n" +
" case _ => false\\n" +
" }\\n" +
" def normalizedOrSame(b: Any) =\\n" +
" b match {\\n" +
" case (s1: String, s2: String) => normalized((s1, s2))\\n" +
" case _ => b\\n" +
" }\\n" +
" }"
val mapUpperCasedAreEqual = "def areEqual(a: (String, String), b: Any): Boolean = b match {\\n" +
" case (b1: String, b2: String) => a._1.toUpperCase == b1 && a._2.toUpperCase == b2\\n" +
" case _ => (a._1.toUpperCase, a._2.toUpperCase) == b\\n" +
" }"
val mapTrimmed = "val trimmed: Uniformity[(String, String)] = new Uniformity[(String, String)] {\\n" +
" def normalized(s: (String, String)): (String, String) = (s._1.trim, s._2.trim)\\n" +
" def normalizedCanHandle(b: Any): Boolean =\\n" +
" b match {\\n" +
" case (_: String, _: String) => true\\n" +
" case _ => false\\n" +
" }\\n" +
" def normalizedOrSame(b: Any) =\\n" +
" b match {\\n" +
" case (s1: String, s2: String) => normalized((s1, s2))\\n" +
" case _ => b\\n" +
" }\\n" +
" }"
val javaMapLowerCased = "val lowerCased: Uniformity[java.util.Map.Entry[String, String]] = new Uniformity[java.util.Map.Entry[String, String]] {\\n" +
" def normalized(s: java.util.Map.Entry[String, String]): java.util.Map.Entry[String, String] = org.scalatest.Entry(s.getKey.toLowerCase, s.getValue.toLowerCase)\\n" +
" def normalizedCanHandle(b: Any): Boolean =\\n" +
" b match {\\n" +
" case entry: java.util.Map.Entry[_, _] => \\n" +
" (entry.getKey, entry.getValue) match {\\n" +
" case (k: String, v: String) => true\\n" +
" case _ => false\\n" +
" }\\n" +
" case _ => false\\n" +
" }\\n" +
" def normalizedOrSame(b: Any) =\\n" +
" b match {\\n" +
" case entry: java.util.Map.Entry[_, _] => \\n" +
" (entry.getKey, entry.getValue) match {\\n" +
" case (k: String, v: String) => normalized(org.scalatest.Entry(k, v))\\n" +
" case _ => b\\n" +
" }\\n" +
" case _ => b\\n" +
" }\\n" +
" }"
val javaMapUpperCasedAreEqual = "def areEqual(a: java.util.Map.Entry[String, String], b: Any): Boolean = b match {\\n" +
" case entry: java.util.Map.Entry[_, _] => \\n" +
" (entry.getKey, entry.getValue) match {\\n" +
" case (k: String, v: String) => a.getKey.toUpperCase == k && a.getValue.toUpperCase == v\\n" +
" case _ => (a.getKey.toUpperCase, a.getValue.toUpperCase) == b\\n" +
" }\\n" +
" case _ => (a.getKey.toUpperCase, a.getValue.toUpperCase) == b\\n" +
" }"
val javaMapTrimmed = "val trimmed: Uniformity[java.util.Map.Entry[String, String]] = new Uniformity[java.util.Map.Entry[String, String]] {\\n" +
" def normalized(s: java.util.Map.Entry[String, String]): java.util.Map.Entry[String, String] = org.scalatest.Entry(s.getKey.trim, s.getValue.trim)\\n" +
" def normalizedCanHandle(b: Any): Boolean =\\n" +
" b match {\\n" +
" case entry: java.util.Map.Entry[_, _] => \\n" +
" (entry.getKey, entry.getValue) match {\\n " +
" case (_: String, _: String) => true\\n" +
" case _ => false\\n" +
" }\\n" +
" case _ => false\\n" +
" }\\n" +
" def normalizedOrSame(b: Any) =\\n" +
" b match {\\n" +
" case entry: java.util.Map.Entry[_, _] => \\n" +
" (entry.getKey, entry.getValue) match {\\n " +
" case (k: String, v: String) => normalized(Entry(k, v))\\n" +
" case _ => b\\n" +
" }\\n" +
" case _ => b\\n" +
" }\\n" +
" }"
val optionMapping =
List(
"List\\\\[String\\\\]" -> "Option[String]",
"List\\\\[Int\\\\]" -> "Option[Int]",
"List" -> "Option",
"listsNil" -> "listsOption",
"Nil" -> "scala.None"
)
val arrayMapping =
List(
"List\\\\[String\\\\]" -> "Array[String]",
"List\\\\[Int\\\\]" -> "Array[Int]",
"List" -> "Array",
"listsNil" -> "listsArray",
"Nil" -> "Array()",
"LinkedArray" -> "LinkedList"
)
val sortedSetMapping =
List(
"ListShould" -> "SetShould",
"List\\\\[String\\\\]" -> "SortedSet[String]",
"List\\\\[Int\\\\]" -> "SortedSet[Int]",
"List\\\\(" -> "sortedSet(",
"listsNil" -> "listsSortedSet",
"Nil" -> "SortedSet.empty",
"LinkedSortedSet" -> "List",
"//ADDITIONAL//" -> "import scala.collection.SortedSet",
"LinkedsortedSet" -> "LinkedList"
)
val javaSortedSetMapping =
List(
"ListShould" -> "JavaSetShould",
"List\\\\[String\\\\]" -> "java.util.SortedSet[String]",
"List\\\\[Int\\\\]" -> "java.util.SortedSet[Int]",
"List\\\\(" -> "javaSortedSet(",
"listsNil" -> "listsJavaSortedSet",
"Nil" -> "javaSortedSet()",
"LinkedSortedSet" -> "List",
"//ADDITIONAL//" -> "import java.util.SortedSet",
"LinkedjavaSortedSet" -> "LinkedList"
)
val javaColMapping =
List(
"ListShould" -> "JavaColShould",
"List\\\\[String\\\\]" -> "java.util.List[String]",
"def areEqual\\\\(a: java.util.List\\\\[String\\\\], b: Any\\\\): Boolean = a.map\\\\(\\\\_.toUpperCase\\\\) == b" -> "def areEqual(a: java.util.List[String], b: Any): Boolean = a.asScala.map(_.toUpperCase) == b",
"List\\\\[Int\\\\]" -> "java.util.List[Int]",
"List\\\\(" -> "javaList(",
"listsNil" -> "listsJavaCol",
"Nil" -> "new java.util.ArrayList",
"LinkedjavaList" -> "LinkedList"
)
val mapMapping =
List(
"ListShould" -> "MapShould",
"new Equality\\\\[String\\\\]" -> "new Equality[(String, String)]",
"//ADDITIONAL//" -> (mapLowerCased + "\\n" + mapTrimmed),
"def areEqual\\\\(a: String, b: Any\\\\): Boolean = a.toUpperCase == b" -> mapUpperCasedAreEqual,
"def areEqual\\\\(a: List\\\\[String\\\\], b: Any\\\\): Boolean = a.map\\\\(\\\\_.toUpperCase\\\\) == b" -> "def areEqual(a: scala.collection.mutable.LinkedHashMap[String, String], b: Any): Boolean = a.map(e => (e._1.toUpperCase, e._2.toUpperCase)) == b",
"def areEqual\\\\(a: String, b: Any\\\\)" -> "def areEqual(a: (String, String), b: Any)",
"case s: String => a.toUpperCase == s.toUpperCase" -> "case (s1: String, s2: String) => a._1.toUpperCase == s1.toUpperCase && a._2.toUpperCase == s2.toUpperCase",
"case _ => a.toUpperCase == b" -> "case _ => (a._1.toUpperCase, a._2.toUpperCase) == b",
"case l: List\\\\[_\\\\] => l.map\\\\(upperCase\\\\(_\\\\)\\\\)" -> "case l: Map[_, _] => l.map(upperCase(_))",
"defaultEquality\\\\[String\\\\]" -> "defaultEquality[(String, String)]",
"List\\\\[String\\\\]" -> "scala.collection.mutable.LinkedHashMap[String, String]",
"List\\\\[Int\\\\]" -> "scala.collection.mutable.LinkedHashMap[Int, Int]",
"List\\\\(\\\\\\"fum\\\\\\"\\\\)" -> "scala.collection.mutable.LinkedHashMap(\\"fum\\" -> \\"fum\\")",
"List\\\\(\\\\\\"to\\\\\\"\\\\)" -> "scala.collection.mutable.LinkedHashMap(\\"to\\" -> \\"to\\")",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"fu\\\\\\"\\\\)" -> "scala.collection.mutable.LinkedHashMap(\\"fum\\" -> \\"fum\\", \\"fu\\" -> \\"fu\\")",
"List\\\\(1\\\\)" -> "scala.collection.mutable.LinkedHashMap(1 -> 1)",
"List\\\\(2\\\\)" -> "scala.collection.mutable.LinkedHashMap(2 -> 2)",
"List\\\\(3\\\\)" -> "scala.collection.mutable.LinkedHashMap(3 -> 3)",
"List\\\\(8\\\\)" -> "scala.collection.mutable.LinkedHashMap(8 -> 8)",
"List\\\\(1, 2\\\\)" -> "scala.collection.mutable.LinkedHashMap(1 -> 1, 2 -> 2)",
"List\\\\(2, 3\\\\)" -> "scala.collection.mutable.LinkedHashMap(2 -> 2, 3 -> 3)",
"List\\\\(\\\\\\"hi\\\\\\"\\\\)" -> "scala.collection.mutable.LinkedHashMap(\\"hi\\" -> \\"hi\\")",
"List\\\\(\\\\\\"hi\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "scala.collection.mutable.LinkedHashMap(\\"hi\\" -> \\"hi\\", \\"ho\\" -> \\"ho\\")",
"List\\\\(\\\\\\"hey\\\\\\"\\\\)" -> "scala.collection.mutable.LinkedHashMap(\\"hey\\" -> \\"hey\\")",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(\\"fee\\" -> \\"fee\\", \\"fie\\" -> \\"fie\\", \\"foe\\" -> \\"foe\\")",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(\\"fee\\" -> \\"fee\\", \\"fie\\" -> \\"fie\\", \\"foe\\" -> \\"foe\\", \\"fum\\" -> \\"fum\\")",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(\\"fee\\" -> \\"fee\\", \\"fie\\" -> \\"fie\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fum\\" -> \\"fum\\")",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(\\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\")",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(\\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\", \\"foe\\" -> \\"foe\\", \\"fum\\" -> \\"fum\\")",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fu\\\\\\"\\\\)" -> "(\\"fee\\" -> \\"fee\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fu\\" -> \\"fu\\")",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fam\\\\\\"\\\\)" -> "(\\"fee\\" -> \\"fee\\", \\"fie\\" -> \\"fie\\", \\"foe\\" -> \\"foe\\", \\"fam\\" -> \\"fam\\")",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(\\"fee\\" -> \\"fee\\", \\"fie\\" -> \\"fie\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\")",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fam\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(\\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\", \\"fam\\" -> \\"fam\\", \\"foe\\" -> \\"foe\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fex\\" -> \\"fex\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fex\\" -> \\"fex\\", \\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fex\\" -> \\"fex\\", \\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\", \\"fee\\" -> \\"fee\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fex\\" -> \\"fex\\", \\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fie\\" -> \\"fie\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\", \\"fee\\" -> \\"fee\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\", \\"fie\\" -> \\"fie\\", \\"fie\\" -> \\"fie\\", \\"fie\\" -> \\"fie\\", \\"fee\\" -> \\"fee\\", \\"fee\\" -> \\"fee\\")",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hey\\\\\\", \\\\\\"howdy\\\\\\"\\\\)" -> "(\\"ho\\" -> \\"ho\\", \\"hey\\" -> \\"hey\\", \\"howdy\\" -> \\"howdy\\")",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(\\"ho\\" -> \\"ho\\", \\"hello\\" -> \\"hello\\")",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hey\\\\\\", \\\\\\"howdy\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\", \\"hey\\" -> \\"hey\\", \\"howdy\\" -> \\"howdy\\")",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\", \\"hello\\" -> \\"hello\\")",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(\\"howdy\\" -> \\"howdy\\", \\"hi\\" -> \\"hi\\", \\"hello\\" -> \\"hello\\")",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"howdy\\" -> \\"howdy\\", \\"hi\\" -> \\"hi\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(\\"howdy\\" -> \\"howdy\\", \\"hello\\" -> \\"hello\\", \\"hi\\" -> \\"hi\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"foe\\" -> \\"foe\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\")",
"\\\\(\\\\\\"fum\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\")",
"\\\\(\\\\\\"hi\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\")",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\", \\"ho\\" -> \\"ho\\")",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"ho\\" -> \\"ho\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\", \\"HE\\" -> \\"HE\\")",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\", \\"hi\\" -> \\"hi\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"hi\\" -> \\"hi\\", \\"he\\" -> \\"he\\", \\"he\\" -> \\"he\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"he\\\\\\"\\\\)" -> "(\\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"he\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(\\"he\\" -> \\"he\\", \\"hi\\" -> \\"hi\\")",
"\\\\(\\\\\\"he\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(\\"he\\" -> \\"he\\", \\"hi\\" -> \\"hi\\", \\"hello\\" -> \\"hello\\")",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(\\"hello\\" -> \\"hello\\", \\"hi\\" -> \\"hi\\")",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"hello\\" -> \\"hello\\", \\"hi\\" -> \\"hi\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"hello\\" -> \\"hello\\", \\"hi\\" -> \\"hi\\", \\"he\\" -> \\"he\\", \\"he\\" -> \\"he\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(\\"hello\\" -> \\"hello\\", \\"hi\\" -> \\"hi\\", \\"hi\\" -> \\"hi\\", \\"he\\" -> \\"he\\")",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "(\\"hello\\" -> \\"hello\\", \\"ho\\" -> \\"ho\\")",
"\\\\(\\\\\\"ho\\\\\\"\\\\)" -> "(\\"ho\\" -> \\"ho\\")",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(\\"ho\\" -> \\"ho\\", \\"hi\\" -> \\"hi\\")",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HEY\\\\\\", \\\\\\"HOWDY\\\\\\"\\\\)" -> "(\\"HO\\" -> \\"HO\\", \\"HEY\\" -> \\"HEY\\", \\"HOWDY\\" -> \\"HOWDY\\")",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HEY\\\\\\", \\\\\\"HOWDY\\\\\\"\\\\)" -> "(\\"HE\\" -> \\"HE\\", \\"HEY\\" -> \\"HEY\\", \\"HOWDY\\" -> \\"HOWDY\\")",
"\\\\(\\\\\\"HI\\\\\\"\\\\)" -> "(\\"HI\\" -> \\"HI\\")",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HELLO\\\\\\"\\\\)" -> "(\\"HI\\" -> \\"HI\\", \\"HELLO\\" -> \\"HELLO\\")",
"\\\\(\\\\\\"HELLO\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "(\\"HELLO\\" -> \\"HELLO\\", \\"HI\\" -> \\"HI\\")",
"\\\\(\\\\\\"HELLO\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "(\\"HELLO\\" -> \\"HELLO\\", \\"HO\\" -> \\"HO\\")",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HELLO\\\\\\"\\\\)" -> "(\\"HO\\" -> \\"HO\\", \\"HELLO\\" -> \\"HELLO\\")",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "(\\"HE\\" -> \\"HE\\", \\"HI\\" -> \\"HI\\")",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "(\\"HI\\" -> \\"HI\\", \\"HE\\" -> \\"HE\\")",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "(\\"HI\\" -> \\"HI\\", \\"HO\\" -> \\"HO\\")",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "(\\"HO\\" -> \\"HO\\", \\"HE\\" -> \\"HE\\")",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "(\\"HE\\" -> \\"HE\\", \\"HO\\" -> \\"HO\\")",
"\\\\(\\\\\\"HO\\\\\\"\\\\)" -> "(\\"HO\\" -> \\"HO\\")",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "(\\"HO\\" -> \\"HO\\", \\"HI\\" -> \\"HI\\")",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\", \\\\\\"dear\\\\\\"\\\\)" -> "(\\"happy\\" -> \\"happy\\", \\"birthday\\" -> \\"birthday\\", \\"to\\" -> \\"to\\", \\"you\\" -> \\"you\\", \\"dear\\" -> \\"dear\\")",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(\\"happy\\" -> \\"happy\\", \\"birthday\\" -> \\"birthday\\", \\"to\\" -> \\"to\\", \\"you\\" -> \\"you\\")",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(\\"happy\\" -> \\"happy\\", \\"birthday\\" -> \\"birthday\\", \\"to\\" -> \\"to\\")",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\", \\\\\\"too\\\\\\"\\\\)" -> "(\\"happy\\" -> \\"happy\\", \\"happy\\" -> \\"happy\\", \\"happy\\" -> \\"happy\\", \\"birthday\\" -> \\"birthday\\", \\"to\\" -> \\"to\\", \\"you\\" -> \\"you\\", \\"too\\" -> \\"too\\")",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(\\"happy\\" -> \\"happy\\", \\"happy\\" -> \\"happy\\", \\"happy\\" -> \\"happy\\", \\"birthday\\" -> \\"birthday\\", \\"to\\" -> \\"to\\", \\"you\\" -> \\"you\\")",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(\\"happy\\" -> \\"happy\\", \\"to\\" -> \\"to\\", \\"you\\" -> \\"you\\")",
"\\\\(\\\\\\"have\\\\\\", \\\\\\"a\\\\\\", \\\\\\"nice\\\\\\", \\\\\\"day\\\\\\"\\\\)" -> "(\\"have\\" -> \\"have\\", \\"a\\" -> \\"a\\", \\"nice\\" -> \\"nice\\", \\"day\\" -> \\"day\\")",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(\\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\", \\"fum\\" -> \\"fum\\")",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FUM \\\\\\"\\\\)" -> "(\\" FEE \\" -> \\" FEE \\", \\" FIE \\" -> \\" FIE \\", \\" FOE \\" -> \\" FOE \\", \\" FUM \\" -> \\" FUM \\")",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FU\\\\\\"\\\\)" -> "(\\"FEE\\" -> \\"FEE\\", \\"FUM\\" -> \\"FUM\\", \\"FOE\\" -> \\"FOE\\", \\"FU\\" -> \\"FU\\")",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUM\\\\\\"\\\\)" -> "(\\"FEE\\" -> \\"FEE\\", \\"FIE\\" -> \\"FIE\\", \\"FOE\\" -> \\"FOE\\", \\"FUM\\" -> \\"FUM\\")",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(\\"FIE\\" -> \\"FIE\\", \\"FEE\\" -> \\"FEE\\", \\"FUM\\" -> \\"FUM\\", \\"FOE\\" -> \\"FOE\\")",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(\\"FEE\\" -> \\"FEE\\", \\"FIE\\" -> \\"FIE\\", \\"FUM\\" -> \\"FUM\\", \\"FOE\\" -> \\"FOE\\")",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FU\\\\\\"\\\\)" -> "(\\"FEE\\" -> \\"FEE\\", \\"FAM\\" -> \\"FAM\\", \\"FOE\\" -> \\"FOE\\", \\"FU\\" -> \\"FU\\")",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FAM \\\\\\"\\\\)" -> "(\\" FEE \\" -> \\" FEE \\", \\" FIE \\" -> \\" FIE \\", \\" FOE \\" -> \\" FOE \\", \\" FAM \\" -> \\" FAM \\")",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FAM\\\\\\"\\\\)" -> "(\\"FEE\\" -> \\"FEE\\", \\"FIE\\" -> \\"FIE\\", \\"FOE\\" -> \\"FOE\\", \\"FAM\\" -> \\"FAM\\")",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(\\"FEE\\" -> \\"FEE\\", \\"FIE\\" -> \\"FIE\\", \\"FAM\\" -> \\"FAM\\", \\"FOE\\" -> \\"FOE\\")",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FUU \\\\\\"\\\\)" -> "(\\" FEE \\" -> \\" FEE \\", \\" FIE \\" -> \\" FIE \\", \\" FOE \\" -> \\" FOE \\", \\" FUU \\" -> \\" FUU \\")",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUU\\\\\\"\\\\)" -> "(\\"FEE\\" -> \\"FEE\\", \\"FIE\\" -> \\"FIE\\", \\"FOE\\" -> \\"FOE\\", \\"FUU\\" -> \\"FUU\\")",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(\\"FIE\\" -> \\"FIE\\", \\"FEE\\" -> \\"FEE\\", \\"FAM\\" -> \\"FAM\\", \\"FOE\\" -> \\"FOE\\")",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(\\"FIE\\" -> \\"FIE\\", \\"FEE\\" -> \\"FEE\\", \\"FUM\\" -> \\"FUM\\", \\"FOE\\" -> \\"FOE\\")",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FAM\\\\\\"\\\\)" -> "(\\"FIE\\" -> \\"FIE\\", \\"FEE\\" -> \\"FEE\\", \\"FOE\\" -> \\"FOE\\", \\"FAM\\" -> \\"FAM\\")",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUM\\\\\\"\\\\)" -> "(\\"FIE\\" -> \\"FIE\\", \\"FEE\\" -> \\"FEE\\", \\"FOE\\" -> \\"FOE\\", \\"FUM\\" -> \\"FUM\\")",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUU\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(\\"FIE\\" -> \\"FIE\\", \\"FEE\\" -> \\"FEE\\", \\"FUU\\" -> \\"FUU\\", \\"FOE\\" -> \\"FOE\\")",
"\\\\(\\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\"\\\\)" -> "(\\"FUM\\" -> \\"FUM\\", \\"FOE\\" -> \\"FOE\\", \\"FIE\\" -> \\"FIE\\", \\"FEE\\" -> \\"FEE\\")",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FaM \\\\\\"\\\\)" -> "(\\" FEE \\" -> \\" FEE \\", \\" FIE \\" -> \\" FIE \\", \\" FOE \\" -> \\" FOE \\", \\" FaM \\" -> \\" FaM \\")",
"\\\\(\\\\\\" FUM \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FEE \\\\\\"\\\\)" -> "(\\" FUM \\" -> \\" FUM \\", \\" FOE \\" -> \\" FOE \\", \\" FIE \\" -> \\" FIE \\", \\" FEE \\" -> \\" FEE \\")",
"\\\\(\\\\\\"HAPPY\\\\\\", \\\\\\"BIRTHDAY\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(\\"HAPPY\\" -> \\"HAPPY\\", \\"BIRTHDAY\\" -> \\"BIRTHDAY\\", \\"TO\\" -> \\"TO\\", \\"YOU\\" -> \\"YOU\\")",
"\\\\(\\\\\\" HAPPY \\\\\\", \\\\\\" BIRTHDAY \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" YOU \\\\\\"\\\\)" -> "(\\" HAPPY \\" -> \\" HAPPY \\", \\" BIRTHDAY \\" -> \\" BIRTHDAY \\", \\" TO \\" -> \\" TO \\", \\" YOU \\" -> \\" YOU \\")",
"\\\\(\\\\\\"YOU\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"BIRTHDAY\\\\\\", \\\\\\"HAPPY\\\\\\"\\\\)" -> "(\\"YOU\\" -> \\"YOU\\", \\"TO\\" -> \\"TO\\", \\"BIRTHDAY\\" -> \\"BIRTHDAY\\", \\"HAPPY\\" -> \\"HAPPY\\")",
"\\\\(\\\\\\"nice\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(\\"nice\\" -> \\"nice\\", \\"to\\" -> \\"to\\", \\"you\\" -> \\"you\\")",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(\\"NICE\\" -> \\"NICE\\", \\"TO\\" -> \\"TO\\", \\"MEET\\" -> \\"MEET\\", \\"YOU\\" -> \\"YOU\\")",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\", \\\\\\"TOO\\\\\\"\\\\)" -> "(\\"NICE\\" -> \\"NICE\\", \\"TO\\" -> \\"TO\\", \\"MEET\\" -> \\"MEET\\", \\"YOU\\" -> \\"YOU\\", \\"TOO\\" -> \\"TOO\\")",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(\\"NICE\\" -> \\"NICE\\", \\"TO\\" -> \\"TO\\", \\"MEET\\" -> \\"MEET\\", \\"YOU\\" -> \\"YOU\\")",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(\\"NICE\\" -> \\"NICE\\", \\"MEET\\" -> \\"MEET\\", \\"YOU\\" -> \\"YOU\\")",
"\\\\(\\\\\\"nice\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(\\"nice\\" -> \\"nice\\", \\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\")",
"\\\\(\\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(\\"to\\" -> \\"to\\", \\"you\\" -> \\"you\\")",
"\\\\(\\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(\\"to\\" -> \\"to\\", \\"to\\" -> \\"to\\", \\"to\\" -> \\"to\\", \\"to\\" -> \\"to\\")",
"\\\\(\\\\\\"to\\\\\\"\\\\)" -> "(\\"to\\" -> \\"to\\")",
"\\\\(\\\\\\"TO\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(\\"TO\\" -> \\"TO\\", \\"YOU\\" -> \\"YOU\\")",
"\\\\(\\\\\\" TO \\\\\\", \\\\\\" YOU \\\\\\"\\\\)" -> "(\\" TO \\" -> \\" TO \\", \\" YOU \\" -> \\" YOU \\")",
"\\\\(\\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\"\\\\)" -> "(\\" TO \\" -> \\" TO \\", \\" TO \\" -> \\" TO \\", \\" TO \\" -> \\" TO \\", \\" TO \\" -> \\" TO \\")",
"\\\\(\\\\\\" TO \\\\\\"\\\\)" -> "(\\" TO \\" -> \\" TO \\")",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(\\"too\\" -> \\"too\\", \\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\", \\"birthday\\" -> \\"birthday\\", \\"happy\\" -> \\"happy\\")",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(\\"too\\" -> \\"too\\", \\"you\\" -> \\"you\\", \\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\", \\"birthday\\" -> \\"birthday\\", \\"happy\\" -> \\"happy\\", \\"happy\\" -> \\"happy\\")",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(\\"too\\" -> \\"too\\", \\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\", \\"to\\" -> \\"to\\", \\"birthday\\" -> \\"birthday\\", \\"birthday\\" -> \\"birthday\\", \\"happy\\" -> \\"happy\\")",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(\\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\", \\"to\\" -> \\"to\\", \\"birthday\\" -> \\"birthday\\", \\"birthday\\" -> \\"birthday\\", \\"happy\\" -> \\"happy\\")",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(\\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\", \\"birthday\\" -> \\"birthday\\", \\"happy\\" -> \\"happy\\")",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(\\"you\\" -> \\"you\\", \\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\", \\"birthday\\" -> \\"birthday\\", \\"happy\\" -> \\"happy\\", \\"happy\\" -> \\"happy\\")",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(\\"you\\" -> \\"you\\", \\"to\\" -> \\"to\\")",
"\\\\(\\\\\\"YOU\\\\\\", \\\\\\"TO\\\\\\"\\\\)" -> "(\\"YOU\\" -> \\"YOU\\", \\"TO\\" -> \\"TO\\")",
"\\\\(\\\\\\" YOU \\\\\\", \\\\\\" TO \\\\\\"\\\\)" -> "(\\" YOU \\" -> \\" YOU \\", \\" TO \\" -> \\" TO \\")",
"\\\\\\"\\\\\\\\\\"happy\\\\\\\\\\", \\\\\\\\\\"birthday\\\\\\\\\\", \\\\\\\\\\"to\\\\\\\\\\", \\\\\\\\\\"you\\\\\\\\\\\\\\"\\\\\\"" -> "\\"(happy,happy), (birthday,birthday), (to,to), (you,you)\\"",
"\\\\\\"\\\\\\\\\\"HAPPY\\\\\\\\\\", \\\\\\\\\\"BIRTHDAY\\\\\\\\\\", \\\\\\\\\\"TO\\\\\\\\\\", \\\\\\\\\\"YOU\\\\\\\\\\\\\\"\\\\\\"" -> "\\"(HAPPY,HAPPY), (BIRTHDAY,BIRTHDAY), (TO,TO), (YOU,YOU)\\"",
"\\\\\\\\\\"ho\\\\\\\\\\", \\\\\\\\\\"hey\\\\\\\\\\", \\\\\\\\\\"howdy\\\\\\\\\\"" -> "(ho,ho), (hey,hey), (howdy,howdy)",
"\\\\\\\\\\"ho\\\\\\\\\\", \\\\\\\\\\"hello\\\\\\\\\\"" -> "(ho,ho), (hello,hello)",
"\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"hey\\\\\\\\\\", \\\\\\\\\\"howdy\\\\\\\\\\"" -> "(hi,hi), (hey,hey), (howdy,howdy)",
"\\\\\\\\\\"hello\\\\\\\\\\", \\\\\\\\\\"hi\\\\\\\\\\"" -> "(hello,hello), (hi,hi)",
"\\\\\\\\\\"happy\\\\\\\\\\", \\\\\\\\\\"birthday\\\\\\\\\\", \\\\\\\\\\"to\\\\\\\\\\", \\\\\\\\\\"you\\\\\\\\\\"" -> "(happy,happy), (birthday,birthday), (to,to), (you,you)",
"\\\\\\\\\\"have\\\\\\\\\\", \\\\\\\\\\"a\\\\\\\\\\", \\\\\\\\\\"nice\\\\\\\\\\", \\\\\\\\\\"day\\\\\\\\\\"" -> "(have,have), (a,a), (nice,nice), (day,day)",
"\\\\\\\\\\"HELLO\\\\\\\\\\", \\\\\\\\\\"HI\\\\\\\\\\"" -> "(HELLO,HELLO), (HI,HI)",
"\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"he\\\\\\\\\\"" -> "(hi,hi), (he,he)",
"\\\\\\\\\\"hi\\\\\\\\\\"" -> "(hi,hi)",
"\\\\\\\\\\"ho\\\\\\\\\\"" -> "(ho,ho)",
"\\\\\\\\\\"hello\\\\\\\\\\"" -> "(hello,hello)",
"\\\\\\\\\\"HO\\\\\\\\\\", \\\\\\\\\\"HEY\\\\\\\\\\", \\\\\\\\\\"HOWDY\\\\\\\\\\"" -> "(HO,HO), (HEY,HEY), (HOWDY,HOWDY)",
"\\\\\\\\\\"HE\\\\\\\\\\", \\\\\\\\\\"HEY\\\\\\\\\\", \\\\\\\\\\"HOWDY\\\\\\\\\\"" -> "(HE,HE), (HEY,HEY), (HOWDY,HOWDY)",
"\\\\\\\\\\"HE\\\\\\\\\\", \\\\\\\\\\"HI\\\\\\\\\\"" -> "(HE,HE), (HI,HI)",
"\\\\\\\\\\"HI\\\\\\\\\\", \\\\\\\\\\"HE\\\\\\\\\\"" -> "(HI,HI), (HE,HE)",
"\\\\\\\\\\"HI\\\\\\\\\\"" -> "(HI,HI)",
"\\\\\\\\\\"HO\\\\\\\\\\", \\\\\\\\\\"HE\\\\\\\\\\"" -> "(HO,HO), (HE,HE)",
"\\\\\\\\\\"HO\\\\\\\\\\"" -> "(HO,HO)",
"\\\\\\\\\\"HELLO\\\\\\\\\\"" -> "(HELLO,HELLO)",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "(fee,fee), (fie,fie), (foe,foe), (fum,fum)",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fu\\\\\\\\\\"" -> "(fee,fee), (fum,fum), (foe,foe), (fu,fu)",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fam\\\\\\\\\\"" -> "(fee,fee), (fie,fie), (foe,foe), (fam,fam)",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "(fee,fee), (fie,fie), (fum,fum), (foe,foe)",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "(fie,fie), (fee,fee), (fum,fum), (foe,foe)",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "(fie,fie), (fee,fee), (foe,foe), (fum,fum)",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fam\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "(fie,fie), (fee,fee), (fam,fam), (foe,foe)",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\"" -> "(fum,fum), (foe,foe), (fie,fie), (fee,fee)",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "(fum,fum), (fum,fum), (fum,fum), (fum,fum)",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "(fum,fum), (fum,fum), (fum,fum)",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "(fum,fum), (fum,fum)",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "(fum,fum), (foe,foe)",
"\\\\\\\\\\"fum\\\\\\\\\\"" -> "(fum,fum)",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\"" -> "(FEE,FEE), (FIE,FIE), (FOE,FOE), (FUM,FUM)",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUU\\\\\\\\\\"" -> "(FEE,FEE), (FIE,FIE), (FOE,FOE), (FUU,FUU)",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\"" -> "(FEE,FEE), (FIE,FIE), (FOE,FOE), (FAM,FAM)",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "(FEE,FEE), (FIE,FIE), (FAM,FAM), (FOE,FOE)",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "(FEE,FEE), (FIE,FIE), (FUM,FUM), (FOE,FOE)",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "(FIE,FIE), (FEE,FEE), (FUM,FUM), (FOE,FOE)",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "(FIE,FIE), (FEE,FEE), (FAM,FAM), (FOE,FOE)",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\"" -> "(FIE,FIE), (FEE,FEE), (FOE,FOE), (FUM,FUM)",
"\\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\"" -> "(FUM,FUM), (FOE,FOE), (FIE,FIE), (FEE,FEE)",
"\\\\\\\\\\"you\\\\\\\\\\"" -> "(you,you)",
"\\\\\\\\\\"to\\\\\\\\\\"" -> "(to,to)",
"of \\\\(1, 2, 3\\\\)" -> "of ((1,1), (2,2), (3,3))",
"of \\\\(1, 2, 8\\\\)" -> "of ((1,1), (2,2), (8,8))",
"of \\\\(1, 3, 4\\\\)" -> "of ((1,1), (3,3), (4,4))",
"of \\\\(1, 6, 8\\\\)" -> "of ((1,1), (6,6), (8,8))",
"of \\\\(2, 3, 1\\\\)" -> "of ((2,2), (3,3), (1,1))",
"of \\\\(2, 3, 4\\\\)" -> "of ((2,2), (3,3), (4,4))",
"of \\\\(2, 3, 5\\\\)" -> "of ((2,2), (3,3), (5,5))",
"of \\\\(2, 3, 8\\\\)" -> "of ((2,2), (3,3), (8,8))",
"of \\\\(2, 6, 8\\\\)" -> "of ((2,2), (6,6), (8,8))",
"of \\\\(3, 6, 8\\\\)" -> "of ((3,3), (6,6), (8,8))",
"of \\\\(3, 6, 9\\\\)" -> "of ((3,3), (6,6), (9,9))",
"of \\\\(4, 3, 2\\\\)" -> "of ((4,4), (3,3), (2,2))",
"of \\\\(6, 7, 8\\\\)" -> "of ((6,6), (7,7), (8,8))",
"of \\\\(\\\\\\\\\\"ho\\\\\\\\\\"\\\\)" -> "of ((ho,ho))",
"of \\\\(\\\\\\\\\\"hi\\\\\\\\\\"\\\\)" -> "of ((hi,hi))",
"of \\\\(\\\\\\\\\\"he\\\\\\\\\\"\\\\)" -> "of ((he,he))",
"of \\\\(\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"hello\\\\\\\\\\"\\\\)" -> "of ((hi,hi), (hello,hello))",
"of \\\\(\\\\\\\\\\"HI\\\\\\\\\\"\\\\)" -> "of ((HI,HI))",
"\\\\\\"\\\\(1, 2, 3\\\\)\\\\\\"" -> "\\"((1,1), (2,2), (3,3))\\"",
"\\\\\\"\\\\(1, 3, 2\\\\)\\\\\\"" -> "\\"((1,1), (3,3), (2,2))\\"",
"\\\\\\"\\\\(1, 3, 4\\\\)\\\\\\"" -> "\\"((1,1), (3,3), (4,4))\\"",
"\\\\\\"\\\\(2, 3, 1\\\\)\\\\\\"" -> "\\"((2,2), (3,3), (1,1))\\"",
"\\\\\\"\\\\(2, 3, 4\\\\)\\\\\\"" -> "\\"((2,2), (3,3), (4,4))\\"",
"\\\\\\"\\\\(2, 3, 8\\\\)\\\\\\"" -> "\\"((2,2), (3,3), (8,8))\\"",
"\\\\\\"\\\\(3, 1, 2\\\\)\\\\\\"" -> "\\"((3,3), (1,1), (2,2))\\"",
"\\\\\\"\\\\(3, 6, 8\\\\)\\\\\\"" -> "\\"((3,3), (6,6), (8,8))\\"",
"\\\\\\"\\\\(4, 2, 3\\\\)\\\\\\"" -> "\\"((4,4), (2,2), (3,3))\\"",
"\\\\(\\\\\\"1, 2, 3\\\\\\"\\\\)" -> "(\\"(1,1), (2,2), (3,3)\\")",
"\\\\(\\\\\\"1, 2, 8\\\\\\"\\\\)" -> "(\\"(1,1), (2,2), (8,8)\\")",
"\\\\(\\\\\\"1, 3, 4\\\\\\"\\\\)" -> "(\\"(1,1), (3,3), (4,4)\\")",
"\\\\(\\\\\\"1, 6, 8\\\\\\"\\\\)" -> "(\\"(1,1), (6,6), (8,8)\\")",
"\\\\(\\\\\\"2, 3, 1\\\\\\"\\\\)" -> "(\\"(2,2), (3,3), (1,1)\\")",
"\\\\(\\\\\\"2, 3, 4\\\\\\"\\\\)" -> "(\\"(2,2), (3,3), (4,4)\\")",
"\\\\(\\\\\\"2, 3, 5\\\\\\"\\\\)" -> "(\\"(2,2), (3,3), (5,5)\\")",
"\\\\(\\\\\\"2, 3, 8\\\\\\"\\\\)" -> "(\\"(2,2), (3,3), (8,8)\\")",
"\\\\(\\\\\\"2, 6, 8\\\\\\"\\\\)" -> "(\\"(2,2), (6,6), (8,8)\\")",
"\\\\(\\\\\\"3, 6, 8\\\\\\"\\\\)" -> "(\\"(3,3), (6,6), (8,8)\\")",
"\\\\(\\\\\\"3, 6, 9\\\\\\"\\\\)" -> "(\\"(3,3), (6,6), (9,9)\\")",
"\\\\(\\\\\\"6, 7, 8\\\\\\"\\\\)" -> "(\\"(6,6), (7,7), (8,8)\\")",
"List\\\\(to\\\\)" -> "scala.collection.mutable.LinkedHashMap(to -> to)",
"List\\\\(ho\\\\)" -> "scala.collection.mutable.LinkedHashMap(ho -> ho)",
"List\\\\(hi\\\\)" -> "scala.collection.mutable.LinkedHashMap(hi -> hi)",
"List\\\\(hey\\\\)" -> "scala.collection.mutable.LinkedHashMap(hey -> hey)",
"\\\\(0, 1, 1, 1, 2, 3\\\\)" -> "(0 -> 0, 1 -> 1, 1 -> 1, 1 -> 1, 2 -> 2, 3 -> 3)",
"\\\\(0, 1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "(0 -> 0, 1 -> 1, 1 -> 1, 1 -> 1, 2 -> 2, 2 -> 2, 2 -> 2, 3 -> 3, 3 -> 3, 3 -> 3)",
"\\\\(0, 1, 1, 2, 3, 3\\\\)" -> "(0 -> 0, 1 -> 1, 1 -> 1, 2 -> 2, 3 -> 3, 3 -> 3)",
"\\\\(0, 1, 1, 2, 3, 3, 3\\\\)" -> "(0 -> 0, 1 -> 1, 1 -> 1, 2 -> 2, 3 -> 3, 3 -> 3, 3 -> 3)",
"\\\\(0, 1, 2, 2, 3\\\\)" -> "(0 -> 0, 1 -> 1, 2 -> 2, 2 -> 2, 3 -> 3)",
"\\\\(0, 1, 2, 2, 3, 3, 3\\\\)" -> "(0 -> 0, 1 -> 1, 2 -> 2, 2 -> 2, 3 -> 3, 3 -> 3, 3 -> 3)",
"\\\\(0, 1, 2, 3\\\\)" -> "(0 -> 0, 1 -> 1, 2 -> 2, 3 -> 3)",
"\\\\(0, 1, 2, 3, 3\\\\)" -> "(0 -> 0, 1 -> 1, 2 -> 2, 3 -> 3, 3 -> 3)",
"\\\\(1, 1, 1, 2, 3\\\\)" -> "(1 -> 1, 1 -> 1, 1 -> 1, 2 -> 2, 3 -> 3)",
"\\\\(1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "(1 -> 1, 1 -> 1, 1 -> 1, 2 -> 2, 2 -> 2, 2 -> 2, 3 -> 3, 3 -> 3, 3 -> 3)",
"\\\\(1, 1, 2, 3, 3\\\\)" -> "(1 -> 1, 1 -> 1, 2 -> 2, 3 -> 3, 3 -> 3)",
"\\\\(1, 1, 2, 3, 3, 3\\\\)" -> "(1 -> 1, 1 -> 1, 2 -> 2, 3 -> 3, 3 -> 3, 3 -> 3)",
"\\\\(1, 2, 2, 3\\\\)" -> "(1 -> 1, 2 -> 2, 2 -> 2, 3 -> 3)",
"\\\\(1, 2, 2, 3, 3, 3\\\\)" -> "(1 -> 1, 2 -> 2, 2 -> 2, 3 -> 3, 3 -> 3, 3 -> 3)",
"\\\\(1, 2, 3\\\\)" -> "(1 -> 1, 2 -> 2, 3 -> 3)",
"\\\\(1, 2, 3, 3\\\\)" -> "(1 -> 1, 2 -> 2, 3 -> 3, 3 -> 3)",
"\\\\(1, 2, 5\\\\)" -> "(1 -> 1, 2 -> 2, 5 -> 5)",
"\\\\(1, 2, 8\\\\)" -> "(1 -> 1, 2 -> 2, 8 -> 8)",
"\\\\(1, 2, 9\\\\)" -> "(1 -> 1, 2 -> 2, 9 -> 9)",
"\\\\(1, 3, 2\\\\)" -> "(1 -> 1, 3 -> 3, 2 -> 2)",
"\\\\(1, 3, 4\\\\)" -> "(1 -> 1, 3 -> 3, 4 -> 4)",
"\\\\(1, 3, 8\\\\)" -> "(1 -> 1, 3 -> 3, 8 -> 8)",
"\\\\(1, 6, 8\\\\)" -> "(1 -> 1, 6 -> 6, 8 -> 8)",
"\\\\(2, 1, 5\\\\)" -> "(2 -> 2, 1 -> 1, 5 -> 5)",
"\\\\(2, 2, 3, 4\\\\)" -> "(2 -> 2, 2 -> 2, 3 -> 3, 4 -> 4)",
"\\\\(2, 3, 1\\\\)" -> "(2 -> 2, 3 -> 3, 1 -> 1)",
"\\\\(2, 3, 4\\\\)" -> "(2 -> 2, 3 -> 3, 4 -> 4)",
"\\\\(2, 3, 5\\\\)" -> "(2 -> 2, 3 -> 3, 5 -> 5)",
"\\\\(2, 3, 8\\\\)" -> "(2 -> 2, 3 -> 3, 8 -> 8)",
"\\\\(2, 4, 3\\\\)" -> "(2 -> 2, 4 -> 4, 3 -> 3)",
"\\\\(2, 6, 8\\\\)" -> "(2 -> 2, 6 -> 6, 8 -> 8)",
"\\\\(3, 1, 2\\\\)" -> "(3 -> 3, 1 -> 1, 2 -> 2)",
"\\\\(3, 1, 5\\\\)" -> "(3 -> 3, 1 -> 1, 5 -> 5)",
"\\\\(3, 2, 1\\\\)" -> "(3 -> 3, 2 -> 2, 1 -> 1)",
"\\\\(3, 2, 1, 0\\\\)" -> "(3 -> 3, 2 -> 2, 1 -> 1, 0 -> 0)",
"\\\\(3, 2, 5\\\\)" -> "(3 -> 3, 2 -> 2, 5 -> 5)",
"\\\\(3, 2, 8\\\\)" -> "(3 -> 3, 2 -> 2, 8 -> 8)",
"\\\\(3, 4, 2\\\\)" -> "(3 -> 3, 4 -> 4, 2 -> 2)",
"\\\\(3, 4, 5\\\\)" -> "(3 -> 3, 4 -> 4, 5 -> 5)",
"\\\\(3, 6, 5\\\\)" -> "(3 -> 3, 6 -> 6, 5 -> 5)",
"\\\\(3, 6, 8\\\\)" -> "(3 -> 3, 6 -> 6, 8 -> 8)",
"\\\\(3, 6, 9\\\\)" -> "(3 -> 3, 6 -> 6, 9 -> 9)",
"\\\\(3, 8, 5\\\\)" -> "(3 -> 3, 8 -> 8, 5 -> 5)",
"\\\\(4, 2, 3\\\\)" -> "(4 -> 4, 2 -> 2, 3 -> 3)",
"\\\\(4, 3, 2\\\\)" -> "(4 -> 4, 3 -> 3, 2 -> 2)",
"\\\\(5, 3, 4\\\\)" -> "(5 -> 5, 3 -> 3, 4 -> 4)",
"\\\\(5, 7, 9\\\\)" -> "(5 -> 5, 7 -> 7, 9 -> 9)",
"\\\\(6, 7, 8\\\\)" -> "(6 -> 6, 7 -> 7, 8 -> 8)",
"\\\\(8, 2, 3, 4\\\\)" -> "(8 -> 8, 2 -> 2, 3 -> 3, 4 -> 4)",
"\\\\(8, 2, 2, 3, 4\\\\)" -> "(8 -> 8, 2 -> 2, 2 -> 2, 3 -> 3, 4 -> 4)",
"\\\\(8, 3, 1\\\\)" -> "(8 -> 8, 3 -> 3, 1 -> 1)",
"\\\\(8, 3, 4\\\\)" -> "(8 -> 8, 3 -> 3, 4 -> 4)",
"\\\\(8, 4, 3, 2\\\\)" -> "(8 -> 8, 4 -> 4, 3 -> 3, 2 -> 2)",
"\\\\(1, 3, Nil\\\\)" -> "(1 -> 1, 3 -> 3, Map())",
"List\\\\(" -> "scala.collection.mutable.LinkedHashMap(",
//"List" -> "Map",
"Linkedscala.collection.mutable.LinkedHashMap" -> "scala.collection.mutable.LinkedList",
//"LinkedMap" -> "LinkedHashMap",
"listsNil" -> "listsMap",
"Nil" -> "scala.collection.mutable.LinkedHashMap()"
)
val sortedMapMapping =
(mapMapping map { case (key, value) =>
(key,
value.replace("scala.collection.mutable.LinkedHashMap(", "sortedMap(")
.replace("scala.collection.mutable.LinkedHashMap[", "scala.collection.SortedMap["))
}) ++
List(
"LinkedsortedMap" -> "scala.collection.mutable.LinkedHashMap"
)
val javaMapMapping =
List(
"ListShould" -> "JavaMapShould",
//"Entry\\\\(" -> "org.scalatest.Entry(",
"new Equality\\\\[String\\\\]" -> "new Equality[java.util.Map.Entry[String, String]]",
"//ADDITIONAL//" -> (javaMapLowerCased + "\\n" + javaMapTrimmed + "\\n" + "import org.scalatest.Entry"),
"def areEqual\\\\(a: String, b: Any\\\\): Boolean = a.toUpperCase == b" -> javaMapUpperCasedAreEqual,
"def areEqual\\\\(a: List\\\\[String\\\\], b: Any\\\\): Boolean = a.map\\\\(\\\\_.toUpperCase\\\\) == b" -> "def areEqual(a: java.util.Map[String, String], b: Any): Boolean = a.asScala.map(e => upperCase(e)) == b",
"def areEqual\\\\(a: String, b: Any\\\\)" -> "def areEqual(a: java.util.Map.Entry[String, String], b: Any)",
"case s: String => a.toUpperCase == s.toUpperCase" -> "case java.util.Map.Entry[_, _] => toUpperCase(a) == toUpperCase(s)",
"case _ => a.toUpperCase == b" -> "case _ => (a.getKey.toUpperCase, a.getValue.toUpperCase) == b",
"case l: List\\\\[_\\\\] => l.map\\\\(upperCase\\\\(_\\\\)\\\\)" -> "case l: Map[_, _] => l.map(upperCase(_))",
"defaultEquality\\\\[String\\\\]" -> "defaultEquality[java.util.Map.Entry[String, String]]",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"))",
"List\\\\[String\\\\]" -> "java.util.Map[String, String]",
"List\\\\[Int\\\\]" -> "java.util.Map[Int, Int]",
"List\\\\(\\\\\\"fum\\\\\\"\\\\)" -> "javaMap(Entry(\\"fum\\", \\"fum\\"))",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"fu\\\\\\"\\\\)" -> "javaMap(Entry(\\"fum\\", \\"fum\\"), Entry(\\"fu\\", \\"fu\\"))",
"List\\\\(\\\\\\"to\\\\\\"\\\\)" -> "javaMap(Entry(\\"to\\", \\"to\\"))",
"List\\\\(1\\\\)" -> "javaMap(Entry(1, 1))",
"List\\\\(2\\\\)" -> "javaMap(Entry(2, 2))",
"List\\\\(3\\\\)" -> "javaMap(Entry(3, 3))",
"List\\\\(8\\\\)" -> "javaMap(Entry(8, 8))",
"List\\\\(1, 2\\\\)" -> "javaMap(Entry(1, 1), Entry(2, 2))",
"List\\\\(2, 3\\\\)" -> "javaMap(Entry(2, 2), Entry(3, 3))",
"List\\\\(\\\\\\"hi\\\\\\"\\\\)" -> "javaMap(Entry(\\"hi\\", \\"hi\\"))",
"List\\\\(\\\\\\"hi\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "javaMap(Entry(\\"hi\\", \\"hi\\"), Entry(\\"ho\\", \\"ho\\"))",
"List\\\\(\\\\\\"hey\\\\\\"\\\\)" -> "javaMap(Entry(\\"hey\\", \\"hey\\"))",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(Entry(\\"fee\\", \\"fee\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fum\\", \\"fum\\"))",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(Entry(\\"fee\\", \\"fee\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fum\\", \\"fum\\"))",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"))",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fum\\", \\"fum\\"))",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fu\\\\\\"\\\\)" -> "(Entry(\\"fee\\", \\"fee\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fu\\", \\"fu\\"))",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fam\\\\\\"\\\\)" -> "(Entry(\\"fee\\", \\"fee\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fam\\", \\"fam\\"))",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(Entry(\\"fee\\", \\"fee\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"foe\\", \\"foe\\"))",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(Entry(\\"fee\\", \\"fee\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"))",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fam\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fam\\", \\"fam\\"), Entry(\\"foe\\", \\"foe\\"))",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fex\\", \\"fex\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fex\\", \\"fex\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fex\\", \\"fex\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fex\\", \\"fex\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fie\\", \\"fie\\"), Entry(\\"fee\\", \\"fee\\"), Entry(\\"fee\\", \\"fee\\"))",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hey\\\\\\", \\\\\\"howdy\\\\\\"\\\\)" -> "(Entry(\\"ho\\", \\"ho\\"), Entry(\\"hey\\", \\"hey\\"), Entry(\\"howdy\\", \\"howdy\\"))",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(Entry(\\"ho\\", \\"ho\\"), Entry(\\"hello\\", \\"hello\\"))",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hey\\\\\\", \\\\\\"howdy\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"), Entry(\\"hey\\", \\"hey\\"), Entry(\\"howdy\\", \\"howdy\\"))",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"), Entry(\\"hello\\", \\"hello\\"))",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(Entry(\\"howdy\\", \\"howdy\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"hello\\", \\"hello\\"))",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"howdy\\", \\"howdy\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(Entry(\\"howdy\\", \\"howdy\\"), Entry(\\"hello\\", \\"hello\\"), Entry(\\"hi\\", \\"hi\\"))",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"foe\\", \\"foe\\"))",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"))",
"\\\\(\\\\\\"fum\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"))",
"\\\\(\\\\\\"hi\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"))",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"ho\\", \\"ho\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"), Entry(\\"HE\\", \\"HE\\"))",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"), Entry(\\"ho\\", \\"ho\\"))",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"hi\\", \\"hi\\"), Entry(\\"he\\", \\"he\\"), Entry(\\"he\\", \\"he\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"he\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(Entry(\\"he\\", \\"he\\"), Entry(\\"hi\\", \\"hi\\"))",
"\\\\(\\\\\\"he\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "(Entry(\\"he\\", \\"he\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"hello\\", \\"hello\\"))",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(Entry(\\"hello\\", \\"hello\\"), Entry(\\"hi\\", \\"hi\\"))",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"hello\\", \\"hello\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"hello\\", \\"hello\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"he\\", \\"he\\"), Entry(\\"he\\", \\"he\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "(Entry(\\"hello\\", \\"hello\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"hi\\", \\"hi\\"), Entry(\\"he\\", \\"he\\"))",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "(Entry(\\"hello\\", \\"hello\\"), Entry(\\"ho\\", \\"ho\\"))",
"\\\\(\\\\\\"ho\\\\\\"\\\\)" -> "(Entry(\\"ho\\", \\"ho\\"))",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "(Entry(\\"ho\\", \\"ho\\"), Entry(\\"hi\\", \\"hi\\"))",
"\\\\(\\\\\\"HI\\\\\\"\\\\)" -> "(Entry(\\"HI\\", \\"HI\\"))",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HEY\\\\\\", \\\\\\"HOWDY\\\\\\"\\\\)" -> "(Entry(\\"HO\\", \\"HO\\"), Entry(\\"HEY\\", \\"HEY\\"), Entry(\\"HOWDY\\", \\"HOWDY\\"))",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HEY\\\\\\", \\\\\\"HOWDY\\\\\\"\\\\)" -> "(Entry(\\"HE\\", \\"HE\\"), Entry(\\"HEY\\", \\"HEY\\"), Entry(\\"HOWDY\\", \\"HOWDY\\"))",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HELLO\\\\\\"\\\\)" -> "(Entry(\\"HI\\", \\"HI\\"), Entry(\\"HELLO\\", \\"HELLO\\"))",
"\\\\(\\\\\\"HELLO\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "(Entry(\\"HELLO\\", \\"HELLO\\"), Entry(\\"HI\\", \\"HI\\"))",
"\\\\(\\\\\\"HELLO\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "(Entry(\\"HELLO\\", \\"HELLO\\"), Entry(\\"HO\\", \\"HO\\"))",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "(Entry(\\"HE\\", \\"HE\\"), Entry(\\"HI\\", \\"HI\\"))",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "(Entry(\\"HE\\", \\"HE\\"), Entry(\\"HO\\", \\"HO\\"))",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "(Entry(\\"HI\\", \\"HI\\"), Entry(\\"HE\\", \\"HE\\"))",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "(Entry(\\"HI\\", \\"HI\\"), Entry(\\"HO\\", \\"HO\\"))",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "(Entry(\\"HO\\", \\"HO\\"), Entry(\\"HE\\", \\"HE\\"))",
"\\\\(\\\\\\"HO\\\\\\"\\\\)" -> "(Entry(\\"HO\\", \\"HO\\"))",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "(Entry(\\"HO\\", \\"HO\\"), Entry(\\"HI\\", \\"HI\\"))",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HELLO\\\\\\"\\\\)" -> "(Entry(\\"HO\\", \\"HO\\"), Entry(\\"HELLO\\", \\"HELLO\\"))",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(Entry(\\"happy\\", \\"happy\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"you\\", \\"you\\"))",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(Entry(\\"happy\\", \\"happy\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"to\\", \\"to\\"))",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\", \\\\\\"dear\\\\\\"\\\\)" -> "(Entry(\\"happy\\", \\"happy\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"dear\\", \\"dear\\"))",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\", \\\\\\"too\\\\\\"\\\\)" -> "(Entry(\\"happy\\", \\"happy\\"), Entry(\\"happy\\", \\"happy\\"), Entry(\\"happy\\", \\"happy\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"too\\", \\"too\\"))",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(Entry(\\"happy\\", \\"happy\\"), Entry(\\"happy\\", \\"happy\\"), Entry(\\"happy\\", \\"happy\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"you\\", \\"you\\"))",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(Entry(\\"happy\\", \\"happy\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"you\\", \\"you\\"))",
"\\\\(\\\\\\"YOU\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"BIRTHDAY\\\\\\", \\\\\\"HAPPY\\\\\\"\\\\)" -> "(Entry(\\"YOU\\", \\"YOU\\"), Entry(\\"TO\\", \\"TO\\"), Entry(\\"BIRTHDAY\\", \\"BIRTHDAY\\"), Entry(\\"HAPPY\\", \\"HAPPY\\"))",
"\\\\(\\\\\\"have\\\\\\", \\\\\\"a\\\\\\", \\\\\\"nice\\\\\\", \\\\\\"day\\\\\\"\\\\)" -> "(Entry(\\"have\\", \\"have\\"), Entry(\\"a\\", \\"a\\"), Entry(\\"nice\\", \\"nice\\"), Entry(\\"day\\", \\"day\\"))",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "(Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"), Entry(\\"fum\\", \\"fum\\"))",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FUM \\\\\\"\\\\)" -> "(Entry(\\" FEE \\", \\" FEE \\"), Entry(\\" FIE \\", \\" FIE \\"), Entry(\\" FOE \\", \\" FOE \\"), Entry(\\" FUM \\", \\" FUM \\"))",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FU\\\\\\"\\\\)" -> "(Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FUM\\", \\"FUM\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FU\\", \\"FU\\"))",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUM\\\\\\"\\\\)" -> "(Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FUM\\", \\"FUM\\"))",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FUM\\", \\"FUM\\"), Entry(\\"FOE\\", \\"FOE\\"))",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FUM\\", \\"FUM\\"), Entry(\\"FOE\\", \\"FOE\\"))",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FU\\\\\\"\\\\)" -> "(Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FAM\\", \\"FAM\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FU\\", \\"FU\\"))",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FAM \\\\\\"\\\\)" -> "(Entry(\\" FEE \\", \\" FEE \\"), Entry(\\" FIE \\", \\" FIE \\"), Entry(\\" FOE \\", \\" FOE \\"), Entry(\\" FAM \\", \\" FAM \\"))",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FAM\\\\\\"\\\\)" -> "(Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FAM\\", \\"FAM\\"))",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FAM\\", \\"FAM\\"), Entry(\\"FOE\\", \\"FOE\\"))",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FUU \\\\\\"\\\\)" -> "(Entry(\\" FEE \\", \\" FEE \\"), Entry(\\" FIE \\", \\" FIE \\"), Entry(\\" FOE \\", \\" FOE \\"), Entry(\\" FUU \\", \\" FUU \\"))",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUU\\\\\\"\\\\)" -> "(Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FUU\\", \\"FUU\\"))",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FAM\\", \\"FAM\\"), Entry(\\"FOE\\", \\"FOE\\"))",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FUM\\", \\"FUM\\"), Entry(\\"FOE\\", \\"FOE\\"))",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FAM\\\\\\"\\\\)" -> "(Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FAM\\", \\"FAM\\"))",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUM\\\\\\"\\\\)" -> "(Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FUM\\", \\"FUM\\"))",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUU\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "(Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FEE\\", \\"FEE\\"), Entry(\\"FUU\\", \\"FUU\\"), Entry(\\"FOE\\", \\"FOE\\"))",
"\\\\(\\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\"\\\\)" -> "(Entry(\\"FUM\\", \\"FUM\\"), Entry(\\"FOE\\", \\"FOE\\"), Entry(\\"FIE\\", \\"FIE\\"), Entry(\\"FEE\\", \\"FEE\\"))",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FaM \\\\\\"\\\\)" -> "(Entry(\\" FEE \\", \\" FEE \\"), Entry(\\" FIE \\", \\" FIE \\"), Entry(\\" FOE \\", \\" FOE \\"), Entry(\\" FaM \\", \\" FaM \\"))",
"\\\\(\\\\\\" FUM \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FEE \\\\\\"\\\\)" -> "(Entry(\\" FUM \\", \\" FUM \\"), Entry(\\" FOE \\", \\" FOE \\"), Entry(\\" FIE \\", \\" FIE \\"), Entry(\\" FEE \\", \\" FEE \\"))",
"\\\\(\\\\\\"HAPPY\\\\\\", \\\\\\"BIRTHDAY\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(Entry(\\"HAPPY\\", \\"HAPPY\\"), Entry(\\"BIRTHDAY\\", \\"BIRTHDAY\\"), Entry(\\"TO\\", \\"TO\\"), Entry(\\"YOU\\", \\"YOU\\"))",
"\\\\(\\\\\\" HAPPY \\\\\\", \\\\\\" BIRTHDAY \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" YOU \\\\\\"\\\\)" -> "(Entry(\\" HAPPY \\", \\" HAPPY \\"), Entry(\\" BIRTHDAY \\", \\" BIRTHDAY \\"), Entry(\\" TO \\", \\" TO \\"), Entry(\\" YOU \\", \\" YOU \\"))",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(Entry(\\"NICE\\", \\"NICE\\"), Entry(\\"TO\\", \\"TO\\"), Entry(\\"MEET\\", \\"MEET\\"), Entry(\\"YOU\\", \\"YOU\\"))",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\", \\\\\\"TOO\\\\\\"\\\\)" -> "(Entry(\\"NICE\\", \\"NICE\\"), Entry(\\"TO\\", \\"TO\\"), Entry(\\"MEET\\", \\"MEET\\"), Entry(\\"YOU\\", \\"YOU\\"), Entry(\\"TOO\\", \\"TOO\\"))",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(Entry(\\"NICE\\", \\"NICE\\"), Entry(\\"MEET\\", \\"MEET\\"), Entry(\\"YOU\\", \\"YOU\\"))",
"\\\\(\\\\\\"nice\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(Entry(\\"nice\\", \\"nice\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"you\\", \\"you\\"))",
"\\\\(\\\\\\"nice\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(Entry(\\"nice\\", \\"nice\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"))",
"\\\\(\\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "(Entry(\\"to\\", \\"to\\"), Entry(\\"you\\", \\"you\\"))",
"\\\\(\\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(Entry(\\"to\\", \\"to\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"to\\", \\"to\\"))",
"\\\\(\\\\\\"to\\\\\\"\\\\)" -> "(Entry(\\"to\\", \\"to\\"))",
"\\\\(\\\\\\"TO\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "(Entry(\\"TO\\", \\"TO\\"), Entry(\\"YOU\\", \\"YOU\\"))",
"\\\\(\\\\\\" TO \\\\\\", \\\\\\" YOU \\\\\\"\\\\)" -> "(Entry(\\" TO \\", \\" TO \\"), Entry(\\" YOU \\", \\" YOU \\"))",
"\\\\(\\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\"\\\\)" -> "(Entry(\\" TO \\", \\" TO \\"), Entry(\\" TO \\", \\" TO \\"), Entry(\\" TO \\", \\" TO \\"), Entry(\\" TO \\", \\" TO \\"))",
"\\\\(\\\\\\" TO \\\\\\"\\\\)" -> "(Entry(\\" TO \\", \\" TO \\"))",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(Entry(\\"too\\", \\"too\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"happy\\", \\"happy\\"))",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(Entry(\\"too\\", \\"too\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"happy\\", \\"happy\\"), Entry(\\"happy\\", \\"happy\\"))",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(Entry(\\"too\\", \\"too\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"happy\\", \\"happy\\"))",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"happy\\", \\"happy\\"))",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"happy\\", \\"happy\\"))",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "(Entry(\\"you\\", \\"you\\"), Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"), Entry(\\"birthday\\", \\"birthday\\"), Entry(\\"happy\\", \\"happy\\"), Entry(\\"happy\\", \\"happy\\"))",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "(Entry(\\"you\\", \\"you\\"), Entry(\\"to\\", \\"to\\"))",
"\\\\(\\\\\\"YOU\\\\\\", \\\\\\"TO\\\\\\"\\\\)" -> "(Entry(\\"YOU\\", \\"YOU\\"), Entry(\\"TO\\", \\"TO\\"))",
"\\\\(\\\\\\" YOU \\\\\\", \\\\\\" TO \\\\\\"\\\\)" -> "(Entry(\\" YOU \\", \\" YOU \\"), Entry(\\" TO \\", \\" TO \\"))",
"\\\\\\"\\\\\\\\\\"happy\\\\\\\\\\", \\\\\\\\\\"birthday\\\\\\\\\\", \\\\\\\\\\"to\\\\\\\\\\", \\\\\\\\\\"you\\\\\\\\\\\\\\"\\\\\\"" -> "\\"happy=happy, birthday=birthday, to=to, you=you\\"",
"\\\\\\"\\\\\\\\\\"HAPPY\\\\\\\\\\", \\\\\\\\\\"BIRTHDAY\\\\\\\\\\", \\\\\\\\\\"TO\\\\\\\\\\", \\\\\\\\\\"YOU\\\\\\\\\\\\\\"\\\\\\"" -> "\\"HAPPY=HAPPY, BIRTHDAY=BIRTHDAY, TO=TO, YOU=YOU\\"",
"\\\\\\\\\\"ho\\\\\\\\\\", \\\\\\\\\\"hey\\\\\\\\\\", \\\\\\\\\\"howdy\\\\\\\\\\"" -> "ho=ho, hey=hey, howdy=howdy",
"\\\\\\\\\\"ho\\\\\\\\\\", \\\\\\\\\\"hello\\\\\\\\\\"" -> "ho=ho, hello=hello",
"\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"hey\\\\\\\\\\", \\\\\\\\\\"howdy\\\\\\\\\\"" -> "hi=hi, hey=hey, howdy=howdy",
"\\\\\\\\\\"hello\\\\\\\\\\", \\\\\\\\\\"hi\\\\\\\\\\"" -> "hello=hello, hi=hi",
"\\\\\\\\\\"happy\\\\\\\\\\", \\\\\\\\\\"birthday\\\\\\\\\\", \\\\\\\\\\"to\\\\\\\\\\", \\\\\\\\\\"you\\\\\\\\\\"" -> "happy=happy, birthday=birthday, to=to, you=you",
"\\\\\\\\\\"have\\\\\\\\\\", \\\\\\\\\\"a\\\\\\\\\\", \\\\\\\\\\"nice\\\\\\\\\\", \\\\\\\\\\"day\\\\\\\\\\"" -> "have=have, a=a, nice=nice, day=day",
"\\\\\\\\\\"HELLO\\\\\\\\\\", \\\\\\\\\\"HI\\\\\\\\\\"" -> "HELLO=HELLO, HI=HI",
"\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"he\\\\\\\\\\"" -> "hi=hi, he=he",
"\\\\\\\\\\"hi\\\\\\\\\\"" -> "hi=hi",
"\\\\\\\\\\"ho\\\\\\\\\\"" -> "ho=ho",
"\\\\\\\\\\"hello\\\\\\\\\\"" -> "hello=hello",
"\\\\\\\\\\"HO\\\\\\\\\\", \\\\\\\\\\"HEY\\\\\\\\\\", \\\\\\\\\\"HOWDY\\\\\\\\\\"" -> "HO=HO, HEY=HEY, HOWDY=HOWDY",
"\\\\\\\\\\"HE\\\\\\\\\\", \\\\\\\\\\"HEY\\\\\\\\\\", \\\\\\\\\\"HOWDY\\\\\\\\\\"" -> "HE=HE, HEY=HEY, HOWDY=HOWDY",
"\\\\\\\\\\"HE\\\\\\\\\\", \\\\\\\\\\"HI\\\\\\\\\\"" -> "HE=HE, HI=HI",
"\\\\\\\\\\"HI\\\\\\\\\\", \\\\\\\\\\"HE\\\\\\\\\\"" -> "HI=HI, HE=HE",
"\\\\\\\\\\"HO\\\\\\\\\\", \\\\\\\\\\"HE\\\\\\\\\\"" -> "HO=HO, HE=HE",
"\\\\\\\\\\"HI\\\\\\\\\\"" -> "HI=HI",
"\\\\\\\\\\"HO\\\\\\\\\\"" -> "HO=HO",
"\\\\\\\\\\"HELLO\\\\\\\\\\"" -> "HELLO=HELLO",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "fee=fee, fie=fie, foe=foe, fum=fum",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fu\\\\\\\\\\"" -> "fee=fee, fum=fum, foe=foe, fu=fu",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fam\\\\\\\\\\"" -> "fee=fee, fie=fie, foe=foe, fam=fam",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "fee=fee, fie=fie, fum=fum, foe=foe",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "fie=fie, fee=fee, fum=fum, foe=foe",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "fie=fie, fee=fee, foe=foe, fum=fum",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fam\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "fie=fie, fee=fee, fam=fam, foe=foe",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\"" -> "fum=fum, foe=foe, fie=fie, fee=fee",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "fum=fum, fum=fum, fum=fum, fum=fum",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "fum=fum, fum=fum, fum=fum",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "fum=fum, fum=fum",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "fum=fum, foe=foe",
"\\\\\\\\\\"fum\\\\\\\\\\"" -> "fum=fum",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\"" -> "FEE=FEE, FIE=FIE, FOE=FOE, FUM=FUM",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUU\\\\\\\\\\"" -> "FEE=FEE, FIE=FIE, FOE=FOE, FUU=FUU",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\"" -> "FEE=FEE, FIE=FIE, FOE=FOE, FAM=FAM",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "FEE=FEE, FIE=FIE, FAM=FAM, FOE=FOE",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "FEE=FEE, FIE=FIE, FUM=FUM, FOE=FOE",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "FIE=FIE, FEE=FEE, FUM=FUM, FOE=FOE",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "FIE=FIE, FEE=FEE, FAM=FAM, FOE=FOE",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\"" -> "FIE=FIE, FEE=FEE, FOE=FOE, FUM=FUM",
"\\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\"" -> "FUM=FUM, FOE=FOE, FIE=FIE, FEE=FEE",
"\\\\\\\\\\"you\\\\\\\\\\"" -> "you=you",
"\\\\\\\\\\"to\\\\\\\\\\"" -> "to=to",
"of \\\\(1, 2, 3\\\\)" -> "of (1=1, 2=2, 3=3)",
"of \\\\(1, 2, 8\\\\)" -> "of (1=1, 2=2, 8=8)",
"of \\\\(1, 3, 4\\\\)" -> "of (1=1, 3=3, 4=4)",
"of \\\\(1, 6, 8\\\\)" -> "of (1=1, 6=6, 8=8)",
"of \\\\(2, 3, 1\\\\)" -> "of (2=2, 3=3, 1=1)",
"of \\\\(2, 3, 4\\\\)" -> "of (2=2, 3=3, 4=4)",
"of \\\\(2, 3, 5\\\\)" -> "of (2=2, 3=3, 5=5)",
"of \\\\(2, 3, 8\\\\)" -> "of (2=2, 3=3, 8=8)",
"of \\\\(2, 6, 8\\\\)" -> "of (2=2, 6=6, 8=8)",
"of \\\\(3, 6, 8\\\\)" -> "of (3=3, 6=6, 8=8)",
"of \\\\(3, 6, 9\\\\)" -> "of (3=3, 6=6, 9=9)",
"of \\\\(4, 3, 2\\\\)" -> "of (4=4, 3=3, 2=2)",
"of \\\\(6, 7, 8\\\\)" -> "of (6=6, 7=7, 8=8)",
"of \\\\(\\\\\\\\\\"ho\\\\\\\\\\"\\\\)" -> "of (ho=ho)",
"of \\\\(\\\\\\\\\\"hi\\\\\\\\\\"\\\\)" -> "of (hi=hi)",
"of \\\\(\\\\\\\\\\"he\\\\\\\\\\"\\\\)" -> "of (he=he)",
"of \\\\(\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"hello\\\\\\\\\\"\\\\)" -> "of (hi=hi, hello=hello)",
"of \\\\(\\\\\\\\\\"HI\\\\\\\\\\"\\\\)" -> "of (HI=HI)",
"\\\\\\"\\\\(1, 2, 3\\\\)\\\\\\"" -> "\\"(1=1, 2=2, 3=3)\\"",
"\\\\\\"\\\\(1, 2, 8\\\\)\\\\\\"" -> "\\"(1=1, 2=2, 8=8)\\"",
"\\\\\\"\\\\(1, 3, 2\\\\)\\\\\\"" -> "\\"(1=1, 3=3, 2=2)\\"",
"\\\\\\"\\\\(1, 3, 4\\\\)\\\\\\"" -> "\\"(1=1, 3=3, 4=4)\\"",
"\\\\\\"\\\\(1, 6, 8\\\\)\\\\\\"" -> "\\"(1=1, 6=6, 8=8)\\"",
"\\\\\\"\\\\(2, 3, 1\\\\)\\\\\\"" -> "\\"(2=2, 3=3, 1=1)\\"",
"\\\\\\"\\\\(2, 3, 4\\\\)\\\\\\"" -> "\\"(2=2, 3=3, 4=4)\\"",
"\\\\\\"\\\\(2, 3, 8\\\\)\\\\\\"" -> "\\"(2=2, 3=3, 8=8)\\"",
"\\\\\\"\\\\(3, 1, 2\\\\)\\\\\\"" -> "\\"(3=3, 1=1, 2=2)\\"",
"\\\\\\"\\\\(3, 6, 8\\\\)\\\\\\"" -> "\\"(3=3, 6=6, 8=8)\\"",
"\\\\\\"\\\\(4, 2, 3\\\\)\\\\\\"" -> "\\"(4=4, 2=2, 3=3)\\"",
"\\\\(\\\\\\"1, 2, 3\\\\\\"\\\\)" -> "(\\"1=1, 2=2, 3=3\\")",
"\\\\(\\\\\\"1, 2, 8\\\\\\"\\\\)" -> "(\\"1=1, 2=2, 8=8\\")",
"\\\\(\\\\\\"1, 3, 4\\\\\\"\\\\)" -> "(\\"1=1, 3=3, 4=4\\")",
"\\\\(\\\\\\"1, 6, 8\\\\\\"\\\\)" -> "(\\"1=1, 6=6, 8=8\\")",
"\\\\(\\\\\\"2, 3, 1\\\\\\"\\\\)" -> "(\\"2=2, 3=3, 1=1\\")",
"\\\\(\\\\\\"2, 3, 4\\\\\\"\\\\)" -> "(\\"2=2, 3=3, 4=4\\")",
"\\\\(\\\\\\"2, 3, 5\\\\\\"\\\\)" -> "(\\"2=2, 3=3, 5=5\\")",
"\\\\(\\\\\\"2, 3, 8\\\\\\"\\\\)" -> "(\\"2=2, 3=3, 8=8\\")",
"\\\\(\\\\\\"2, 6, 8\\\\\\"\\\\)" -> "(\\"2=2, 6=6, 8=8\\")",
"\\\\(\\\\\\"3, 6, 8\\\\\\"\\\\)" -> "(\\"3=3, 6=6, 8=8\\")",
"\\\\(\\\\\\"3, 6, 9\\\\\\"\\\\)" -> "(\\"3=3, 6=6, 9=9\\")",
"\\\\(\\\\\\"6, 7, 8\\\\\\"\\\\)" -> "(\\"6=6, 7=7, 8=8\\")",
"List\\\\(to\\\\)" -> "javaMap(Entry(to, to))",
"List\\\\(ho\\\\)" -> "javaMap(Entry(ho, ho))",
"List\\\\(hi\\\\)" -> "javaMap(Entry(hi, hi))",
"List\\\\(hey\\\\)" -> "javaMap(Entry(hey, hey))",
"\\\\(0, 1, 1, 1, 2, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(3, 3))",
"\\\\(0, 1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(2, 2), Entry(2, 2), Entry(3, 3), Entry(3, 3), Entry(3, 3))",
"\\\\(0, 1, 1, 2, 3, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(3, 3), Entry(3, 3))",
"\\\\(0, 1, 1, 2, 3, 3, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(3, 3), Entry(3, 3), Entry(3, 3))",
"\\\\(0, 1, 2, 2, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(2, 2), Entry(2, 2), Entry(3, 3))",
"\\\\(0, 1, 2, 2, 3, 3, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(2, 2), Entry(2, 2), Entry(3, 3), Entry(3, 3), Entry(3, 3))",
"\\\\(0, 1, 2, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(2, 2), Entry(3, 3))",
"\\\\(0, 1, 2, 3, 3\\\\)" -> "(Entry(0, 0), Entry(1, 1), Entry(2, 2), Entry(3, 3), Entry(3, 3))",
"\\\\(1, 1, 1, 2, 3\\\\)" -> "(Entry(1, 1), Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(3, 3))",
"\\\\(1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "(Entry(1, 1), Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(2, 2), Entry(2, 2), Entry(3, 3), Entry(3, 3), Entry(3, 3))",
"\\\\(1, 1, 2, 3, 3\\\\)" -> "(Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(3, 3), Entry(3, 3))",
"\\\\(1, 1, 2, 3, 3, 3\\\\)" -> "(Entry(1, 1), Entry(1, 1), Entry(2, 2), Entry(3, 3), Entry(3, 3), Entry(3, 3))",
"\\\\(1, 2, 2, 3\\\\)" -> "(Entry(1, 1), Entry(2, 2), Entry(2, 2), Entry(3, 3))",
"\\\\(1, 2, 2, 3, 3, 3\\\\)" -> "(Entry(1, 1), Entry(2, 2), Entry(2, 2), Entry(3, 3), Entry(3, 3), Entry(3, 3))",
"\\\\(1, 2, 3\\\\)" -> "(Entry(1, 1), Entry(2, 2), Entry(3, 3))",
"\\\\(1, 2, 3, 3\\\\)" -> "(Entry(1, 1), Entry(2, 2), Entry(3, 3), Entry(3, 3))",
"\\\\(1, 2, 5\\\\)" -> "(Entry(1, 1), Entry(2, 2), Entry(5, 5))",
"\\\\(1, 2, 8\\\\)" -> "(Entry(1, 1), Entry(2, 2), Entry(8, 8))",
"\\\\(1, 2, 9\\\\)" -> "(Entry(1, 1), Entry(2, 2), Entry(9, 9))",
"\\\\(1, 3, 2\\\\)" -> "(Entry(1, 1), Entry(3, 3), Entry(2, 2))",
"\\\\(1, 3, 4\\\\)" -> "(Entry(1, 1), Entry(3, 3), Entry(4, 4))",
"\\\\(1, 3, 8\\\\)" -> "(Entry(1, 1), Entry(3, 3), Entry(8, 8))",
"\\\\(1, 6, 8\\\\)" -> "(Entry(1, 1), Entry(6, 6), Entry(8, 8))",
"\\\\(2, 1, 5\\\\)" -> "(Entry(2, 2), Entry(1, 1), Entry(5, 5))",
"\\\\(2, 2, 3, 4\\\\)" -> "(Entry(2, 2), Entry(2, 2), Entry(3, 3), Entry(4, 4))",
"\\\\(2, 3, 1\\\\)" -> "(Entry(2, 2), Entry(3, 3), Entry(1, 1))",
"\\\\(2, 3, 4\\\\)" -> "(Entry(2, 2), Entry(3, 3), Entry(4, 4))",
"\\\\(2, 3, 5\\\\)" -> "(Entry(2, 2), Entry(3, 3), Entry(5, 5))",
"\\\\(2, 3, 8\\\\)" -> "(Entry(2, 2), Entry(3, 3), Entry(8, 8))",
"\\\\(2, 6, 8\\\\)" -> "(Entry(2, 2), Entry(6, 6), Entry(8, 8))",
"\\\\(3, 1, 2\\\\)" -> "(Entry(3, 3), Entry(1, 1), Entry(2, 2))",
"\\\\(3, 1, 5\\\\)" -> "(Entry(3, 3), Entry(1, 1), Entry(5, 5))",
"\\\\(3, 2, 1\\\\)" -> "(Entry(3, 3), Entry(2, 2), Entry(1, 1))",
"\\\\(3, 2, 1, 0\\\\)" -> "(Entry(3, 3), Entry(2, 2), Entry(1, 1), Entry(0, 0))",
"\\\\(3, 2, 8\\\\)" -> "(Entry(3, 3), Entry(2, 2), Entry(8, 8))",
"\\\\(3, 4, 2\\\\)" -> "(Entry(3, 3), Entry(4, 4), Entry(2, 2))",
"\\\\(3, 4, 5\\\\)" -> "(Entry(3, 3), Entry(4, 4), Entry(5, 5))",
"\\\\(3, 6, 5\\\\)" -> "(Entry(3, 3), Entry(6, 6), Entry(5, 5))",
"\\\\(3, 6, 8\\\\)" -> "(Entry(3, 3), Entry(6, 6), Entry(8, 8))",
"\\\\(3, 6, 9\\\\)" -> "(Entry(3, 3), Entry(6, 6), Entry(9, 9))",
"\\\\(3, 8, 5\\\\)" -> "(Entry(3, 3), Entry(8, 8), Entry(5, 5))",
"\\\\(4, 2, 3\\\\)" -> "(Entry(4, 4), Entry(2, 2), Entry(3, 3))",
"\\\\(4, 3, 2\\\\)" -> "(Entry(4, 4), Entry(3, 3), Entry(2, 2))",
"\\\\(5, 3, 4\\\\)" -> "(Entry(5, 5), Entry(3, 3), Entry(4, 4))",
"\\\\(5, 7, 9\\\\)" -> "(Entry(5, 5), Entry(7, 7), Entry(9, 9))",
"\\\\(6, 7, 8\\\\)" -> "(Entry(6, 6), Entry(7, 7), Entry(8, 8))",
"\\\\(8, 2, 3, 4\\\\)" -> "(Entry(8, 8), Entry(2, 2), Entry(3, 3), Entry(4, 4))",
"\\\\(8, 2, 2, 3, 4\\\\)" -> "(Entry(8, 8), Entry(2, 2), Entry(2, 2), Entry(3, 3), Entry(4, 4))",
"\\\\(8, 3, 1\\\\)" -> "(Entry(8, 8), Entry(3, 3), Entry(1, 1))",
"\\\\(8, 3, 4\\\\)" -> "(Entry(8, 8), Entry(3, 3), Entry(4, 4))",
"\\\\(8, 4, 3, 2\\\\)" -> "(Entry(8, 8), Entry(4, 4), Entry(3, 3), Entry(2, 2))",
"\\\\(1, 3, Nil\\\\)" -> "(Entry(1, 1), Entry(3, 3), Map())",
"List" -> "javaMap",
"listsNil" -> "listsMap",
"Nil" -> "javaMap()",
"LinkedjavaMap" -> "LinkedList"
)
val sortedJavaMapMapping =
(javaMapMapping map { case (key, value) =>
(key,
value.replace("javaMap", "javaSortedMap")
.replace("java.util.Map[", "java.util.SortedMap["))
}) ++
List(
"LinkedjavaSortedMap" -> "LinkedList"
)
val stringMapping =
List(
"ListShould" -> "StringShould",
//"List\\\\[String\\\\]" -> "List[Char]",
//"Vector\\\\[List\\\\[Int\\\\]\\\\]" -> "Vector[List[Char]]",
"new Equality\\\\[String\\\\]" -> "new Equality[Char]",
"def areEqual\\\\(a: String, b: Any\\\\): Boolean = a != b" -> "def areEqual(a: Char, b: Any): Boolean = a != b",
"def areEqual\\\\(a: List\\\\[String\\\\], b: Any\\\\): Boolean = a != b" -> "def areEqual(a: String, b: Any): Boolean = a != b",
"def areEqual\\\\(a: String, b: Any\\\\): Boolean = a.toUpperCase == b" -> "def areEqual(a: Char, b: Any): Boolean = a.toString.toUpperCase.toCharArray()(0) == b",
//"def areEqual\\\\(a: List\\\\[String\\\\], b: Any\\\\): Boolean = a.map\\\\(\\\\_.toUpperCase\\\\) == b" -> "def areEqual(a: String, b: Any): Boolean = a.toUpperCase == b",
//"def areEqual\\\\(a: String, b: Any\\\\)" -> "def areEqual(a: Char, b: Any)",
"def areEqual\\\\(a: String, b: Any\\\\): Boolean = upperCase\\\\(a\\\\) == upperCase\\\\(b\\\\)" -> "def areEqual(a: Char, b: Any): Boolean = upperCase(a) == upperCase(b)",
"def areEqual\\\\(a: List\\\\[String\\\\], b: Any\\\\): Boolean = a.map\\\\(\\\\_\\\\.toUpperCase\\\\) == b" -> "def areEqual(a: String, b: Any): Boolean = a.toUpperCase == b",
"def areEqual\\\\(a: String, b: Any\\\\): Boolean = upperCase\\\\(a\\\\) == b" -> "def areEqual(a: Char, b: Any): Boolean = upperCase(a) == b",
"defaultEquality\\\\[String\\\\]" -> "defaultEquality[Char]",
" and trimmed" -> "",
"//ADDITIONAL//" -> (stringLowerCased),
"LinkedList" -> "TempL",
"List\\\\[String\\\\]" -> "String",
"List\\\\[Int\\\\]" -> "String",
"List\\\\(\\\\\\"fum\\\\\\"\\\\)" -> "\\"u\\"",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "\\"up\\"",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"fu\\\\\\"\\\\)" -> "\\"uf\\"",
"List\\\\(\\\\\\"to\\\\\\"\\\\)" -> "\\"o\\"",
"List\\\\(\\\\\\"ho\\\\\\"\\\\)" -> "\\"o\\"",
"List\\\\(1\\\\)" -> "\\"1\\"",
"List\\\\(2\\\\)" -> "\\"2\\"",
"List\\\\(3\\\\)" -> "\\"3\\"",
"List\\\\(8\\\\)" -> "\\"8\\"",
"List\\\\(1, 2\\\\)" -> "\\"12\\"",
"List\\\\(2, 3\\\\)" -> "\\"23\\"",
"List\\\\(0, 1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "\\"0111222333\\"",
"List\\\\(0, 1, 2, 2, 3\\\\)" -> "\\"01223\\"",
"List\\\\(0, 1, 2, 3, 3\\\\)" -> "\\"01233\\"",
"List\\\\(0, 1, 2, 3\\\\)" -> "\\"0123\\"",
"List\\\\(0, 1, 1, 1, 2, 3\\\\)" -> "\\"011123\\"",
"List\\\\(0, 1, 1, 2, 3, 3\\\\)" -> "\\"011233\\"",
"List\\\\(0, 1, 1, 2, 3, 3, 3\\\\)" -> "\\"0112333\\"",
"List\\\\(0, 1, 2, 2, 3, 3, 3\\\\)" -> "\\"0122333\\"",
"List\\\\(1, 1, 1, 2, 3\\\\)" -> "\\"11123\\"",
"List\\\\(1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "\\"111222333\\"",
"List\\\\(1, 1, 2, 3, 3\\\\)" -> "\\"11233\\"",
"List\\\\(1, 1, 2, 3, 3, 3\\\\)" -> "\\"112333\\"",
"List\\\\(1, 2, 3\\\\)" -> "\\"123\\"",
"List\\\\(1, 2, 3, 3\\\\)" -> "\\"1233\\"",
"List\\\\(1, 2, 2, 3\\\\)" -> "\\"1223\\"",
"List\\\\(1, 2, 2, 3, 3, 3\\\\)" -> "\\"122333\\"",
"List\\\\(2, 2, 3, 4\\\\)" -> "\\"2234\\"",
"List\\\\(2, 3, 4\\\\)" -> "\\"234\\"",
"List\\\\(3, 2, 1\\\\)" -> "\\"321\\"",
"List\\\\(3, 2, 1, 0\\\\)" -> "\\"3210\\"",
"List\\\\(4, 3, 2\\\\)" -> "\\"432\\"",
"List\\\\(8, 2, 3, 4\\\\)" -> "\\"8234\\"",
"List\\\\(8, 2, 2, 3, 4\\\\)" -> "\\"82234\\"",
"List\\\\(8, 4, 3, 2\\\\)" -> "\\"8432\\"",
"List\\\\(\\\\\\"hi\\\\\\"\\\\)" -> "\\"i\\"",
"List\\\\(\\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "\\"il\\"",
"List\\\\(\\\\\\"he\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "\\"eil\\"",
"List\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "\\"liie\\"",
"List\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "\\"lieee\\"",
"List\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "\\"lie\\"",
"List\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "\\"dil\\"",
"List\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "\\"die\\"",
"List\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "\\"ie\\"",
"List\\\\(\\\\\\"hi\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "\\"io\\"",
"List\\\\(\\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "\\"iie\\"",
"List\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "\\"ieee\\"",
"List\\\\(\\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "\\"oy\\"",
"List\\\\(\\\\\\"happy\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "\\"hoy\\"",
"List\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "\\"yo\\"",
"List\\\\(\\\\\\"nice\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "\\"nyo\\"",
"List\\\\(\\\\\\"nice\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "\\"noy\\"",
"List\\\\(\\\\\\"hey\\\\\\"\\\\)" -> "\\"e\\"",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"upie\\"",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"uupiie\\"",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"uupieee\\"",
"List\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"uupiiiee\\"",
"List\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"xupie\\"",
"List\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"xuupiie\\"",
"List\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"xuupieee\\"",
"List\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "\\"xuupiiiee\\"",
"List\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "\\"yobh\\"",
"List\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "\\"yoobbh\\"",
"List\\\\(\\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "\\"yyobhh\\"",
"List\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "\\"zyobh\\"",
"List\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "\\"zyoobbh\\"",
"List\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "\\"zyyobhh\\"",
"List\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "\\"hboy\\"",
"List\\\\(\\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\", \\\\\\"too\\\\\\"\\\\)" -> "\\"hhhboyz\\"",
"List\\\\(\\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "\\"hhhboy\\"",
"TempL" -> "LinkedList",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "('e', 'i', 'p', 'u')",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "('e', 'i', 'p', 'i', 'u')",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "('i', 'e', 'u', 'p')",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "('i', 'e', 'p', 'u')",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fu\\\\\\"\\\\)" -> "('e', 'u', 'p', 'f')",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "('e', 'i', 'u', 'p')",
"\\\\(\\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fam\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "('i', 'e', 'a', 'p')",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fam\\\\\\"\\\\)" -> "('e', 'i', 'p', 'a')",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "('u', 'p', 'i', 'e')",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "('x', 'u', 'p', 'i', 'e')",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "('x', 'u', 'u', 'p', 'i', 'i', 'e')",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "('x', 'u', 'u', 'p', 'i', 'e', 'e', 'e')",
"\\\\(\\\\\\"fex\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "('x', 'u', 'u', 'p', 'i', 'i', 'i', 'e', 'e')",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"fee\\\\\\"\\\\)" -> "('u', 'u', 'p', 'i', 'i', 'e')",
"\\\\(\\\\\\"fee\\\\\\", \\\\\\"fie\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "('e', 'i', 'p')",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hey\\\\\\", \\\\\\"howdy\\\\\\"\\\\)" -> "('o', 'e', 'd')",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hey\\\\\\", \\\\\\"howdy\\\\\\"\\\\)" -> "('i', 'e', 'd')",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "('i', 'l')",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "('o', 'l')",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "('d', 'i', 'l')",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('d', 'i', 'e')",
"\\\\(\\\\\\"howdy\\\\\\", \\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "('d', 'l', 'i')",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"foe\\\\\\"\\\\)" -> "('u', 'p')",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "('u', 'u', 'u')",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "('u', 'u')",
"\\\\(\\\\\\"fum\\\\\\"\\\\)" -> "('u')",
"\\\\(\\\\\\"hi\\\\\\"\\\\)" -> "('i')",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('i', 'e')",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "('i', 'E')",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "('i', 'o')",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('i', 'e', 'e', 'e')",
"\\\\(\\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('i', 'i', 'e')",
"\\\\(\\\\\\"he\\\\\\"\\\\)" -> "('e')",
"\\\\(\\\\\\"he\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "('e', 'i')",
"\\\\(\\\\\\"he\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hello\\\\\\"\\\\)" -> "('e', 'i', 'l')",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "('l', 'i')",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('l', 'i', 'e')",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('l', 'i', 'e', 'e', 'e')",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"hi\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('l', 'i', 'i', 'e')",
"\\\\(\\\\\\"hello\\\\\\", \\\\\\"ho\\\\\\"\\\\)" -> "('l', 'o')",
"\\\\(\\\\\\"ho\\\\\\"\\\\)" -> "('o')",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"hi\\\\\\"\\\\)" -> "('o', 'i')",
"\\\\(\\\\\\"ho\\\\\\", \\\\\\"he\\\\\\"\\\\)" -> "('o', 'e')",
"\\\\(\\\\\\"HI\\\\\\"\\\\)" -> "('I')",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HEY\\\\\\", \\\\\\"HOWDY\\\\\\"\\\\)" -> "('O', 'Y', 'D')",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HEY\\\\\\", \\\\\\"HOWDY\\\\\\"\\\\)" -> "('E', 'Y', 'D')",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HELLO\\\\\\"\\\\)" -> "('I', 'L')",
"\\\\(\\\\\\"HELLO\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "('L', 'I')",
"\\\\(\\\\\\"HELLO\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "('L', 'O')",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "('E', 'I')",
"\\\\(\\\\\\"HE\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "('E', 'O')",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "('I', 'E')",
"\\\\(\\\\\\"HI\\\\\\", \\\\\\"HO\\\\\\"\\\\)" -> "('I', 'O')",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HE\\\\\\"\\\\)" -> "('O', 'E')",
"\\\\(\\\\\\"HO\\\\\\"\\\\)" -> "('O')",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HI\\\\\\"\\\\)" -> "('O', 'I')",
"\\\\(\\\\\\"HO\\\\\\", \\\\\\"HELLO\\\\\\"\\\\)" -> "('O', 'L')",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\", \\\\\\"dear\\\\\\"\\\\)" -> "('h', 'b', 'o', 'y', 'd')",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\", \\\\\\"too\\\\\\"\\\\)" -> "('h', 'h', 'h', 'b', 'o', 'y', 'z')",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "('h', 'b', 'o', 'y')",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "('h', 'b', 'o')",
"\\\\(\\\\\\"happy\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "('h', 'o', 'y')",
"\\\\(\\\\\\"have\\\\\\", \\\\\\"a\\\\\\", \\\\\\"nice\\\\\\", \\\\\\"day\\\\\\"\\\\)" -> "('h', 'a', 'n', 'd')",
"\\\\(\\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\", \\\\\\"fum\\\\\\"\\\\)" -> "('u', 'u', 'u', 'u')",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUM\\\\\\"\\\\)" -> "('E', 'I', 'P', 'U')",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "('I', 'E', 'U', 'P')",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FUM \\\\\\"\\\\)" -> "('E', 'I', 'P', 'U')",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FU\\\\\\"\\\\)" -> "('E', 'U', 'P', 'F')",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "('E', 'I', 'U', 'P')",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FAM \\\\\\"\\\\)" -> "('E', 'I', 'P', 'A')",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FAM\\\\\\"\\\\)" -> "('E', 'I', 'P', 'A')",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "('E', 'I', 'A', 'P')",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FUU \\\\\\"\\\\)" -> "('E', 'I', 'P', 'D')",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUU\\\\\\"\\\\)" -> "('E', 'I', 'P', 'D')",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "('I', 'E', 'A', 'P')",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "('I', 'E', 'U', 'P')",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FAM\\\\\\"\\\\)" -> "('I', 'E', 'P', 'A')",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FUM\\\\\\"\\\\)" -> "('I', 'E', 'P', 'U')",
"\\\\(\\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\", \\\\\\"FUU\\\\\\", \\\\\\"FOE\\\\\\"\\\\)" -> "('I', 'E', 'D', 'P')",
"\\\\(\\\\\\"FUM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FIE\\\\\\", \\\\\\"FEE\\\\\\"\\\\)" -> "('U', 'P', 'I', 'E')",
"\\\\(\\\\\\"FEE\\\\\\", \\\\\\"FAM\\\\\\", \\\\\\"FOE\\\\\\", \\\\\\"FU\\\\\\"\\\\)" -> "('E', 'A', 'P', 'D')",
"\\\\(\\\\\\" FEE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FaM \\\\\\"\\\\)" -> "('E', 'I', 'P', 'a')",
"\\\\(\\\\\\" FUM \\\\\\", \\\\\\" FOE \\\\\\", \\\\\\" FIE \\\\\\", \\\\\\" FEE \\\\\\"\\\\)" -> "('U', 'P', 'I', 'E')",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\", \\\\\\"TOO\\\\\\"\\\\)" -> "('N', 'O', 'M', 'Y', 'Z')",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "('N', 'O', 'M', 'Y')",
"\\\\(\\\\\\"NICE\\\\\\", \\\\\\"MEET\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "('N', 'M', 'Y')",
"\\\\(\\\\\\"nice\\\\\\", \\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "('n', 'o', 'y')",
"\\\\(\\\\\\"nice\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "('n', 'y', 'o')",
"\\\\(\\\\\\"to\\\\\\", \\\\\\"you\\\\\\"\\\\)" -> "('o', 'y')",
"\\\\(\\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "('o', 'o', 'o', 'o')",
"\\\\(\\\\\\"to\\\\\\"\\\\)" -> "('o')",
"\\\\(\\\\\\"TO\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "('O', 'Y')",
"\\\\(\\\\\\" TO \\\\\\", \\\\\\" YOU \\\\\\"\\\\)" -> "('O', 'Y')",
"\\\\(\\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" TO \\\\\\"\\\\)" -> "('O', 'O', 'O', 'O')",
"\\\\(\\\\\\" TO \\\\\\"\\\\)" -> "('O')",
"\\\\(\\\\\\"HAPPY\\\\\\", \\\\\\"BIRTHDAY\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"YOU\\\\\\"\\\\)" -> "('H', 'B', 'O', 'Y')",
"\\\\(\\\\\\" HAPPY \\\\\\", \\\\\\" BIRTHDAY \\\\\\", \\\\\\" TO \\\\\\", \\\\\\" YOU \\\\\\"\\\\)" -> "('H', 'B', 'O', 'Y')",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "('z', 'y', 'o', 'b', 'h')",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "('z', 'y', 'y', 'o', 'b', 'h', 'h')",
"\\\\(\\\\\\"too\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "('z', 'y', 'o', 'o', 'b', 'b', 'h')",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "('y', 'o', 'b', 'h')",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"you\\\\\\", \\\\\\"to\\\\\\", \\\\\\"birthday\\\\\\", \\\\\\"happy\\\\\\", \\\\\\"happy\\\\\\"\\\\)" -> "('y', 'y', 'o', 'b', 'h', 'h')",
"\\\\(\\\\\\"you\\\\\\", \\\\\\"to\\\\\\"\\\\)" -> "('y', 'o')",
"\\\\(\\\\\\"YOU\\\\\\", \\\\\\"TO\\\\\\"\\\\)" -> "('Y', 'O')",
"\\\\(\\\\\\" YOU \\\\\\", \\\\\\" TO \\\\\\"\\\\)" -> "('Y', 'O')",
"\\\\(\\\\\\"YOU\\\\\\", \\\\\\"TO\\\\\\", \\\\\\"BIRTHDAY\\\\\\", \\\\\\"HAPPY\\\\\\"\\\\)" -> "('Y', 'O', 'B', 'H')",
"\\\\\\"\\\\\\\\\\"happy\\\\\\\\\\", \\\\\\\\\\"birthday\\\\\\\\\\", \\\\\\\\\\"to\\\\\\\\\\", \\\\\\\\\\"you\\\\\\\\\\\\\\"\\\\\\"" -> "\\"'h', 'b', 'o', 'y'\\"",
"\\\\\\"\\\\\\\\\\"HAPPY\\\\\\\\\\", \\\\\\\\\\"BIRTHDAY\\\\\\\\\\", \\\\\\\\\\"TO\\\\\\\\\\", \\\\\\\\\\"YOU\\\\\\\\\\\\\\"\\\\\\"" -> "\\"'H', 'B', 'O', 'Y'\\"",
"\\\\\\\\\\"ho\\\\\\\\\\", \\\\\\\\\\"hey\\\\\\\\\\", \\\\\\\\\\"howdy\\\\\\\\\\"" -> "'o', 'e', 'd'",
"\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"hey\\\\\\\\\\", \\\\\\\\\\"howdy\\\\\\\\\\"" -> "'i', 'e', 'd'",
"\\\\\\\\\\"hello\\\\\\\\\\", \\\\\\\\\\"hi\\\\\\\\\\"" -> "'l', 'i'",
"\\\\\\\\\\"happy\\\\\\\\\\", \\\\\\\\\\"birthday\\\\\\\\\\", \\\\\\\\\\"to\\\\\\\\\\", \\\\\\\\\\"you\\\\\\\\\\"" -> "'h', 'b', 'o', 'y'",
"\\\\\\\\\\"have\\\\\\\\\\", \\\\\\\\\\"a\\\\\\\\\\", \\\\\\\\\\"nice\\\\\\\\\\", \\\\\\\\\\"day\\\\\\\\\\"" -> "'h', 'a', 'n', 'd'",
"\\\\\\\\\\"HELLO\\\\\\\\\\", \\\\\\\\\\"HI\\\\\\\\\\"" -> "'L', 'I'",
"\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"he\\\\\\\\\\"" -> "'i', 'e'",
"\\\\\\\\\\"hi\\\\\\\\\\"" -> "'i'",
"\\\\\\\\\\"ho\\\\\\\\\\"" -> "'o'",
"\\\\\\\\\\"hello\\\\\\\\\\"" -> "'l'",
"\\\\\\\\\\"HO\\\\\\\\\\", \\\\\\\\\\"HEY\\\\\\\\\\", \\\\\\\\\\"HOWDY\\\\\\\\\\"" -> "'O', 'Y', 'D'",
"\\\\\\\\\\"HE\\\\\\\\\\", \\\\\\\\\\"HEY\\\\\\\\\\", \\\\\\\\\\"HOWDY\\\\\\\\\\"" -> "'E', 'Y', 'D'",
"\\\\\\\\\\"HE\\\\\\\\\\", \\\\\\\\\\"HI\\\\\\\\\\"" -> "'E', 'I'",
"\\\\\\\\\\"HI\\\\\\\\\\", \\\\\\\\\\"HE\\\\\\\\\\"" -> "'I', 'E'",
"\\\\\\\\\\"HO\\\\\\\\\\", \\\\\\\\\\"HE\\\\\\\\\\"" -> "'O', 'E'",
"\\\\\\\\\\"HI\\\\\\\\\\"" -> "'I'",
"\\\\\\\\\\"HO\\\\\\\\\\"" -> "'O'",
"\\\\\\\\\\"HELLO\\\\\\\\\\"" -> "'L'",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "'e', 'i', 'p', 'u'",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fam\\\\\\\\\\"" -> "'e', 'i', 'p', 'a'",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "'e', 'i', 'u', 'p'",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\"" -> "'u', 'p', 'i', 'e'",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "'u', 'u', 'u', 'u'",
"\\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fu\\\\\\\\\\"" -> "'e', 'u', 'p', 'f'",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "'i', 'e', 'u', 'p'",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "'i', 'e', 'p', 'u'",
"\\\\\\\\\\"fie\\\\\\\\\\", \\\\\\\\\\"fee\\\\\\\\\\", \\\\\\\\\\"fam\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "'i', 'e', 'a', 'p'",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\"" -> "'E', 'I', 'P', 'U'",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUU\\\\\\\\\\"" -> "'E', 'I', 'P', 'D'",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\"" -> "'E', 'I', 'P', 'A'",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "'E', 'I', 'A', 'P'",
"\\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "'E', 'I', 'U', 'P'",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "'I', 'E', 'U', 'P'",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "'I', 'E', 'U', 'P'",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FAM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\"" -> "'I', 'E', 'A', 'P'",
"\\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FUM\\\\\\\\\\"" -> "'I', 'E', 'P', 'U'",
"\\\\\\\\\\"FUM\\\\\\\\\\", \\\\\\\\\\"FOE\\\\\\\\\\", \\\\\\\\\\"FIE\\\\\\\\\\", \\\\\\\\\\"FEE\\\\\\\\\\"" -> "'U', 'P', 'I', 'E'",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "'u', 'u', 'u'",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"fum\\\\\\\\\\"" -> "'u', 'u'",
"\\\\\\\\\\"fum\\\\\\\\\\", \\\\\\\\\\"foe\\\\\\\\\\"" -> "'u', 'p'",
"\\\\\\\\\\"fum\\\\\\\\\\"" -> "'u'",
"\\\\\\\\\\"you\\\\\\\\\\"" -> "'y'",
"\\\\\\\\\\"to\\\\\\\\\\"" -> "'o'",
"of \\\\(1, 2, 8\\\\)" -> "of ('1', '2', '8')",
"of \\\\(1, 3, 4\\\\)" -> "of ('1', '3', '4')",
"of \\\\(1, 6, 8\\\\)" -> "of ('1', '6', '8')",
"of \\\\(2, 3, 1\\\\)" -> "of ('2', '3', '1')",
"of \\\\(2, 3, 4\\\\)" -> "of ('2', '3', '4')",
"of \\\\(2, 3, 5\\\\)" -> "of ('2', '3', '5')",
"of \\\\(2, 3, 8\\\\)" -> "of ('2', '3', '8')",
"of \\\\(2, 6, 8\\\\)" -> "of ('2', '6', '8')",
"of \\\\(3, 6, 8\\\\)" -> "of ('3', '6', '8')",
"of \\\\(4, 3, 2\\\\)" -> "of ('4', '3', '2')",
"of \\\\(\\\\\\\\\\"ho\\\\\\\\\\"\\\\)" -> "of ('o')",
"of \\\\(\\\\\\\\\\"hi\\\\\\\\\\"\\\\)" -> "of ('i')",
"of \\\\(\\\\\\\\\\"he\\\\\\\\\\"\\\\)" -> "of ('e')",
"of \\\\(\\\\\\\\\\"hi\\\\\\\\\\", \\\\\\\\\\"hello\\\\\\\\\\"\\\\)" -> "of ('i', 'l')",
"of \\\\(\\\\\\\\\\"ho\\\\\\\\\\", \\\\\\\\\\"hello\\\\\\\\\\"\\\\)" -> "of ('o', 'l')",
"of \\\\(\\\\\\\\\\"HI\\\\\\\\\\"\\\\)" -> "of ('I')",
"\\\\\\"\\\\(1, 2, 3\\\\)\\\\\\"" -> "\\"('1', '2', '3')\\"",
"\\\\\\"\\\\(1, 2, 8\\\\)\\\\\\"" -> "\\"('1', '2', '8')\\"",
"\\\\\\"\\\\(1, 3, 2\\\\)\\\\\\"" -> "\\"('1', '3', '2')\\"",
"\\\\\\"\\\\(1, 3, 4\\\\)\\\\\\"" -> "\\"('1', '3', '4')\\"",
"\\\\\\"\\\\(1, 6, 8\\\\)\\\\\\"" -> "\\"('1', '6', '8')\\"",
"\\\\\\"\\\\(2, 3, 1\\\\)\\\\\\"" -> "\\"('2', '3', '1')\\"",
"\\\\\\"\\\\(2, 3, 4\\\\)\\\\\\"" -> "\\"('2', '3', '4')\\"",
"\\\\\\"\\\\(2, 3, 8\\\\)\\\\\\"" -> "\\"('2', '3', '8')\\"",
"\\\\\\"\\\\(2, 6, 8\\\\)\\\\\\"" -> "\\"('2', '6', '8')\\"",
"\\\\\\"\\\\(3, 1, 2\\\\)\\\\\\"" -> "\\"('3', '1', '2')\\"",
"\\\\\\"\\\\(3, 6, 8\\\\)\\\\\\"" -> "\\"('3', '6', '8')\\"",
"\\\\\\"\\\\(4, 2, 3\\\\)\\\\\\"" -> "\\"('4', '2', '3')\\"",
"\\\\\\"\\\\(6, 7, 8\\\\)\\\\\\"" -> "\\"('6', '7', '8')\\"",
"\\\\(\\\\\\"1, 2, 3\\\\\\"\\\\)" -> "(\\"'1', '2', '3'\\")",
"\\\\(\\\\\\"1, 2, 8\\\\\\"\\\\)" -> "(\\"'1', '2', '8'\\")",
"\\\\(\\\\\\"1, 3, 4\\\\\\"\\\\)" -> "(\\"'1', '3', '4'\\")",
"\\\\(\\\\\\"1, 6, 8\\\\\\"\\\\)" -> "(\\"'1', '6', '8'\\")",
"\\\\(\\\\\\"2, 3, 1\\\\\\"\\\\)" -> "(\\"'2', '3', '1'\\")",
"\\\\(\\\\\\"2, 3, 4\\\\\\"\\\\)" -> "(\\"'2', '3', '4'\\")",
"\\\\(\\\\\\"2, 3, 5\\\\\\"\\\\)" -> "(\\"'2', '3', '5'\\")",
"\\\\(\\\\\\"2, 3, 8\\\\\\"\\\\)" -> "(\\"'2', '3', '8'\\")",
"\\\\(\\\\\\"2, 6, 8\\\\\\"\\\\)" -> "(\\"'2', '6', '8'\\")",
"\\\\(\\\\\\"3, 6, 8\\\\\\"\\\\)" -> "(\\"'3', '6', '8'\\")",
"\\\\(\\\\\\"3, 6, 9\\\\\\"\\\\)" -> "(\\"'3', '6', '9'\\")",
"List\\\\(to\\\\)" -> "\\\\\\"to\\\\\\"",
"List\\\\(ho\\\\)" -> "\\\\\\"ho\\\\\\"",
"List\\\\(hi\\\\)" -> "\\\\\\"hi\\\\\\"",
"List\\\\(hey\\\\)" -> "\\\\\\"hey\\\\\\"",
"\\\\(0, 1, 1, 1, 2, 3\\\\)" -> "('0', '1', '1', '1', '2', '3')",
"\\\\(0, 1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "('0', '1', '1', '1', '2', '2', '2', '3', '3', '3')",
"\\\\(0, 1, 1, 2, 3, 3\\\\)" -> "('0', '1', '1', '2', '3', '3')",
"\\\\(0, 1, 1, 2, 3, 3, 3\\\\)" -> "('0', '1', '1', '2', '3', '3', '3')",
"\\\\(0, 1, 2, 2, 3\\\\)" -> "('0', '1', '2', '2', '3')",
"\\\\(0, 1, 2, 2, 3, 3, 3\\\\)" -> "('0', '1', '2', '2', '3', '3', '3')",
"\\\\(0, 1, 2, 3\\\\)" -> "('0', '1', '2', '3')",
"\\\\(0, 1, 2, 3, 3\\\\)" -> "('0', '1', '2', '3', '3')",
"\\\\(1, 1, 1, 2, 3\\\\)" -> "('1', '1', '1', '2', '3')",
"\\\\(1, 1, 1, 2, 2, 2, 3, 3, 3\\\\)" -> "('1', '1', '1', '2', '2', '2', '3', '3', '3')",
"\\\\(1, 1, 2, 3, 3\\\\)" -> "('1', '1', '2', '3', '3')",
"\\\\(1, 1, 2, 3, 3, 3\\\\)" -> "('1', '1', '2', '3', '3', '3')",
"\\\\(1, 2, 2, 3\\\\)" -> "('1', '2', '2', '3')",
"\\\\(1, 2, 2, 3, 3, 3\\\\)" -> "('1', '2', '2', '3', '3', '3')",
"\\\\(1, 2, 3\\\\)" -> "('1', '2', '3')",
"\\\\(1, 2, 3, 3\\\\)" -> "('1', '2', '3', '3')",
"\\\\(1, 2, 5\\\\)" -> "('1', '2', '5')",
"\\\\(1, 2, 8\\\\)" -> "('1', '2', '8')",
"\\\\(1, 2, 9\\\\)" -> "('1', '2', '9')",
"\\\\(1, 3, 2\\\\)" -> "('1', '3', '2')",
"\\\\(1, 3, 4\\\\)" -> "('1', '3', '4')",
"\\\\(1, 3, 8\\\\)" -> "('1', '3', '8')",
"\\\\(1, 6, 8\\\\)" -> "('1', '6', '8')",
"\\\\(2, 1, 5\\\\)" -> "('2', '1', '5')",
"\\\\(2, 2, 3, 4\\\\)" -> "('2', '2', '3', '4')",
"\\\\(2, 3, 1\\\\)" -> "('2', '3', '1')",
"\\\\(2, 3, 4\\\\)" -> "('2', '3', '4')",
"\\\\(2, 3, 5\\\\)" -> "('2', '3', '5')",
"\\\\(2, 3, 8\\\\)" -> "('2', '3', '8')",
"\\\\(2, 6, 8\\\\)" -> "('2', '6', '8')",
"\\\\(3, 1, 2\\\\)" -> "('3', '1', '2')",
"\\\\(3, 1, 5\\\\)" -> "('3', '1', '5')",
"\\\\(3, 2, 1\\\\)" -> "('3', '2', '1')",
"\\\\(3, 2, 1, 0\\\\)" -> "('3', '2', '1', '0')",
"\\\\(3, 2, 8\\\\)" -> "('3', '2', '8')",
"\\\\(3, 4, 2\\\\)" -> "('3', '4', '2')",
"\\\\(3, 4, 5\\\\)" -> "('3', '4', '5')",
"\\\\(3, 6, 5\\\\)" -> "('3', '6', '5')",
"\\\\(3, 6, 8\\\\)" -> "('3', '6', '8')",
"\\\\(3, 6, 9\\\\)" -> "('3', '6', '9')",
"\\\\(3, 8, 5\\\\)" -> "('3', '8', '5')",
"\\\\(4, 2, 3\\\\)" -> "('4', '2', '3')",
"\\\\(4, 3, 2\\\\)" -> "('4', '3', '2')",
"\\\\(5, 3, 4\\\\)" -> "('5', '3', '4')",
"\\\\(5, 7, 9\\\\)" -> "('5', '7', '9')",
"\\\\(8, 2, 3, 4\\\\)" -> "('8', '2', '3', '4')",
"\\\\(8, 2, 2, 3, 4\\\\)" -> "('8', '2', '2', '3', '4')",
"\\\\(8, 3, 4\\\\)" -> "('8', '3', '4')",
"\\\\(8, 4, 3, 2\\\\)" -> "('8', '4', '3', '2')",
"\\\\(1, 3, Nil\\\\)" -> "('1', '3')",
"listsNil" -> "listsString",
"Nil" -> "\\\\\\"\\\\\\"",
"List\\\\(\\\\)" -> "\\\\\\"\\\\\\"",
"Resources\\\\.didNotEqual\\\\(decorateToStringValue\\\\(fumList\\\\), decorateToStringValue\\\\(toList\\\\)\\\\)" -> "Resources.didNotEqual(decorateToStringValue(\\"[\\" + fumList + \\"]\\"), decorateToStringValue(\\"[\\" + toList + \\"]\\"))",
//"Resources\\\\(\\\\\\"equaled\\\\\\", decorateToStringValue\\\\(fumList\\\\), decorateToStringValue\\\\(toList\\\\)\\\\)" -> "Resources(\\"equaled\\", decorateToStringValue(\\"[\\" + fumList + \\"]\\"), decorateToStringValue(\\"[\\" + toList + \\"]\\"))",
"Resources\\\\.wasNotEqualTo\\\\(decorateToStringValue\\\\(fumList\\\\), decorateToStringValue\\\\(toList\\\\)\\\\)" -> "Resources.wasNotEqualTo(decorateToStringValue(\\"[\\" + fumList + \\"]\\"), decorateToStringValue(\\"[\\" + toList + \\"]\\"))",
"FailureMessages\\\\.didNotEqual\\\\(fumList, toList\\\\)" -> "FailureMessages\\\\.didNotEqual\\\\(\\"[\\" + fumList + \\"]\\", \\"[\\" + toList + \\"]\\")",
"FailureMessages\\\\.wasNotEqualTo\\\\(fumList, toList\\\\)" -> "FailureMessages\\\\.wasNotEqualTo\\\\(\\"[\\" + fumList + \\"]\\", \\"[\\" + toList + \\"]\\")",
"FailureMessages\\\\(\\\\\\"didNotEqual\\\\\\", fumList, toList\\\\)" -> "FailureMessages(\\"didNotEqual\\", \\"[\\" + fumList + \\"]\\", \\"[\\" + toList + \\"]\\")",
"FailureMessages\\\\(\\\\\\"wasNotEqualTo\\\\\\", fumList, toList\\\\)" -> "FailureMessages(\\"wasNotEqualTo\\", \\"[\\" + fumList + \\"]\\", \\"[\\" + toList + \\"]\\")",
"decorateToStringValue\\\\(\\\\\\"1\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"2\\\\\\"\\\\)" -> "decorateToStringValue(\\"[1]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[2]\\")",
"decorateToStringValue\\\\(\\\\\\"2\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"1\\\\\\"\\\\)" -> "decorateToStringValue(\\"[2]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[1]\\")",
"decorateToStringValue\\\\(\\\\\\"\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"e\\\\\\"\\\\)" -> "decorateToStringValue(\\"[]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[e]\\")",
"decorateToStringValue\\\\(\\\\\\"\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"1\\\\\\"\\\\)" -> "decorateToStringValue(\\"[]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[1]\\")",
"decorateToStringValue\\\\(\\\\\\"i\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"o\\\\\\"\\\\)" -> "decorateToStringValue(\\"[i]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[o]\\")",
"decorateToStringValue\\\\(\\\\\\"2\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3\\\\\\"\\\\)" -> "decorateToStringValue(\\"[2]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3]\\")",
"decorateToStringValue\\\\(\\\\\\"432\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3\\\\\\"\\\\)" -> "decorateToStringValue(\\"[432]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3]\\")",
"decorateToStringValue\\\\(\\\\\\"432\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"321\\\\\\"\\\\)" -> "decorateToStringValue(\\"[432]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[321]\\")",
"decorateToStringValue\\\\(\\\\\\"8234\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3\\\\\\"\\\\)" -> "decorateToStringValue(\\"[8234]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3]\\")",
"decorateToStringValue\\\\(\\\\\\"8432\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3\\\\\\"\\\\)" -> "decorateToStringValue(\\"[8432]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3]\\")",
"decorateToStringValue\\\\(\\\\\\"8432\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3210\\\\\\"\\\\)" -> "decorateToStringValue(\\"[8432]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3210]\\")",
"decorateToStringValue\\\\(lists\\\\(2\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3210\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + lists(2) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3210]\\")",
"decorateToStringValue\\\\(list1s\\\\(0\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + list1s(0) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[234]\\")",
"decorateToStringValue\\\\(\\\\\\"\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"321\\\\\\"\\\\)" -> "decorateToStringValue(\\"[]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[321]\\")",
"decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"123\\\\\\"\\\\)" -> "decorateToStringValue(\\"[234]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[123]\\")",
"decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"1233\\\\\\"\\\\)" -> "decorateToStringValue(\\"[234]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[1233]\\")",
"decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3\\\\\\"\\\\)" -> "decorateToStringValue(\\"[234]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3]\\")",
"decorateToStringValue\\\\(\\\\\\"123\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\)" -> "decorateToStringValue(\\"[123]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[234]\\")",
"decorateToStringValue\\\\(\\\\\\"1223\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\)" -> "decorateToStringValue(\\"[1223]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[234]\\")",
"decorateToStringValue\\\\(\\\\\\"321\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\)" -> "decorateToStringValue(\\"[321]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[234]\\")",
"decorateToStringValue\\\\(\\\\\\"3210\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\)" -> "decorateToStringValue(\\"[3210]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[234]\\")",
"decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"321\\\\\\"\\\\)" -> "decorateToStringValue(\\"[234]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[321]\\")",
"decorateToStringValue\\\\(\\\\\\"\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3210\\\\\\"\\\\)" -> "decorateToStringValue(\\"[]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3210]\\")",
"decorateToStringValue\\\\(\\\\\\"dil\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"o\\\\\\"\\\\)" -> "decorateToStringValue(\\"[dil]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[o]\\")",
"decorateToStringValue\\\\(\\\\\\"eil\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"o\\\\\\"\\\\)" -> "decorateToStringValue(\\"[eil]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[o]\\")",
"decorateToStringValue\\\\(\\\\\\"il\\\\\\"\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"o\\\\\\"\\\\)" -> "decorateToStringValue(\\"[il]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[o]\\")",
"decorateToStringValue\\\\(nils\\\\(0\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"e\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + nils(0) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[e]\\")",
"decorateToStringValue\\\\(listsString\\\\(2\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"123\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + listsString(2) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[123]\\")",
"decorateToStringValue\\\\(listsString\\\\(2\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"0123\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + listsString(2) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[0123]\\")",
"decorateToStringValue\\\\(lists\\\\(2\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"3\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + lists(2) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[3]\\")",
"decorateToStringValue\\\\(lists\\\\(2\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"12\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + lists(2) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[12]\\")",
"decorateToStringValue\\\\(lists\\\\(2\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"234\\\\\\"\\\\)" -> "decorateToStringValue(lists(2) + \\"[]\\") + \\" was not equal to \\" + decorateToStringValue(\\"23[4]\\")",
"decorateToStringValue\\\\(hiLists\\\\(0\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"o\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + hiLists(0) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[o]\\")",
"decorateToStringValue\\\\(hiLists\\\\(0\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"io\\\\\\"\\\\)" -> "decorateToStringValue(\\"i[e]\\") + \\" was not equal to \\" + decorateToStringValue(\\"i[o]\\")",
"decorateToStringValue\\\\(list1s\\\\(0\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"8234\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + list1s(0) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[8234]\\")",
"decorateToStringValue\\\\(list1s\\\\(0\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"23\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + list1s(0) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[23]\\")",
"decorateToStringValue\\\\(lists\\\\(2\\\\)\\\\) \\\\+ \\\\\\" was not equal to \\\\\\" \\\\+ decorateToStringValue\\\\(\\\\\\"01233\\\\\\"\\\\)" -> "decorateToStringValue(\\"[\\" + lists(2) + \\"]\\") + \\" was not equal to \\" + decorateToStringValue(\\"[01233]\\")"
)
}
|
SRGOM/scalatest
|
project/GenContainBase.scala
|
Scala
|
apache-2.0
| 99,895 |
package com.overviewdocs.models.tables
import com.overviewdocs.database.Slick.api._
import com.overviewdocs.models.DocumentProcessingError
class DocumentProcessingErrorsImpl(tag: Tag) extends Table[DocumentProcessingError](tag, "document_processing_error") {
def id = column[Long]("id", O.PrimaryKey)
def documentSetId = column[Long]("document_set_id")
def file2Id = column[Option[Long]]("file2_id")
def textUrl = column[String]("text_url")
def message = column[String]("message")
def statusCode = column[Option[Int]]("status_code")
def headers = column[Option[String]]("headers")
def * = (id, documentSetId, file2Id, textUrl, message, statusCode, headers) <>
((DocumentProcessingError.apply _).tupled, DocumentProcessingError.unapply)
def createAttributes = (
documentSetId,
file2Id,
textUrl,
message,
statusCode,
headers
) <> (
DocumentProcessingError.CreateAttributes.tupled,
DocumentProcessingError.CreateAttributes.unapply
)
}
object DocumentProcessingErrors extends TableQuery(new DocumentProcessingErrorsImpl(_))
|
overview/overview-server
|
common/src/main/scala/com/overviewdocs/models/tables/DocumentProcessingErrors.scala
|
Scala
|
agpl-3.0
| 1,083 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.compendium.types
import io.truthencode.ddo.model.compendium.types.MonsterType.Animal
/**
* Created by adarr on 3/25/2017.
*/
trait Animals extends MainType {
override val mainTypes = Some(Animal)
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/compendium/types/Animals.scala
|
Scala
|
apache-2.0
| 908 |
package com.github.ldaniels528.broadway.core.io.record.impl
import com.github.ldaniels528.broadway.core.io.Scope
import com.github.ldaniels528.broadway.core.io.record.{DataTypes, Field}
import org.scalatest.Matchers._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfterEach, FeatureSpec, GivenWhenThen}
/**
* Delimiter Record Specification
* @author [email protected]
*/
class DelimiterRecordSpec() extends FeatureSpec with BeforeAndAfterEach with GivenWhenThen with MockitoSugar {
val validation = List("symbol" -> Some("AAPL"), "open" -> Some("96.76"), "close" -> Some("96.99"), "low" -> Some("95.89"), "high" -> Some("109.99"))
info("As a DelimiterRecord instance")
info("I want to be able to transform text into delimited record (and vice versa)")
feature("Transform CSV text to CSV record") {
scenario("Import a CSV stock quote into a CSV record") {
Given("a text string in CSV format")
val csvText = """"AAPL", 96.76, 96.99, 95.89, 109.99"""
And("a CSV record")
val record = DelimitedRecord(
id = "cvs_rec",
delimiter = ',',
isTextQuoted = true,
isNumbersQuoted = false,
fields = Seq(
Field(name = "symbol", `type` = DataTypes.STRING),
Field(name = "open", `type` = DataTypes.STRING),
Field(name = "close", `type` = DataTypes.STRING),
Field(name = "low", `type` = DataTypes.STRING),
Field(name = "high", `type` = DataTypes.STRING)
))
And("a scope")
implicit val scope = new Scope()
When("the text is consumed")
val dataSet = record.fromText(csvText)
Then("the toLine method should return the CSV string")
val outText = record.toText(dataSet)
info(outText)
outText shouldBe """"AAPL","96.76","96.99","95.89","109.99""""
And(s"the record must contain the values")
dataSet.data shouldBe validation
}
}
feature("Transform delimited text to delimited record") {
scenario("Import a delimited stock quote into a delimited record") {
Given("a text string in delimited format")
val line = "AAPL\\t96.76\\t96.99\\t95.89\\t109.99"
And("a delimited record")
val record = DelimitedRecord(
id = "delim_rec",
delimiter = '\\t',
fields = Seq(
Field(name = "symbol", `type` = DataTypes.STRING),
Field(name = "open", `type` = DataTypes.STRING),
Field(name = "close", `type` = DataTypes.STRING),
Field(name = "low", `type` = DataTypes.STRING),
Field(name = "high", `type` = DataTypes.STRING)
))
And("a scope")
implicit val scope = new Scope()
When("the text is consumed")
val dataSet = record.fromText(line)
Then("the toLine method should return the delimited string")
val outText = record.toText(dataSet)
info(outText)
outText shouldBe "AAPL\\t96.76\\t96.99\\t95.89\\t109.99"
And(s"the record must contain the values")
dataSet.data shouldBe validation
}
}
}
|
ldaniels528/broadway
|
app-cli/src/test/scala/com/github/ldaniels528/broadway/core/io/record/impl/DelimiterRecordSpec.scala
|
Scala
|
apache-2.0
| 3,069 |
package com.sksamuel.elastic4s.indexes
import com.sksamuel.elastic4s.http.ElasticDsl
import com.sksamuel.elastic4s.testkit.DockerTests
import org.scalatest.{Matchers, WordSpec}
class DeleteIndexTest extends WordSpec with Matchers with DockerTests {
"delete index request" should {
"delete index" in {
client.execute {
createIndex("languages").mappings(
mapping("dialects").fields(
textField("type")
)
).shards(1).waitForActiveShards(1)
}.await
client.execute {
indexExists("languages")
}.await.result.exists shouldBe true
client.execute {
ElasticDsl.deleteIndex("languages")
}.await.result.acknowledged shouldBe true
client.execute {
indexExists("languages")
}.await.result.exists shouldBe false
}
"support multiple indexes" in {
client.execute {
createIndex("languages1").mappings(
mapping("dialects").fields(
textField("type")
)
)
}.await
client.execute {
createIndex("languages2").mappings(
mapping("dialects").fields(
textField("type")
)
)
}.await
client.execute {
indexExists("languages1")
}.await.result.exists shouldBe true
client.execute {
indexExists("languages2")
}.await.result.exists shouldBe true
client.execute {
ElasticDsl.deleteIndex("languages1", "languages2")
}.await.result.acknowledged shouldBe true
client.execute {
indexExists("languages1")
}.await.result.exists shouldBe false
client.execute {
indexExists("languages2")
}.await.result.exists shouldBe false
}
}
}
|
Tecsisa/elastic4s
|
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/indexes/DeleteIndexTest.scala
|
Scala
|
apache-2.0
| 1,760 |
package org.aprsdroid.app
import android.content.Context
import android.content.Intent
import android.content.pm.PackageManager
import android.net.Uri
import android.view.MenuItem
import com.google.android.gms.common.GoogleApiAvailability
import com.google.android.gms.maps.GoogleMap
object MapModes {
val all_mapmodes = new scala.collection.mutable.ArrayBuffer[MapMode]()
def initialize(ctx : Context) {
if (all_mapmodes.size > 0)
return
all_mapmodes += new GoogleMapMode("google", R.id.normal, null, GoogleMap.MAP_TYPE_NORMAL)
all_mapmodes += new GoogleMapMode("satellite", R.id.satellite, null, GoogleMap.MAP_TYPE_HYBRID)
all_mapmodes += new MapsforgeOnlineMode("osm", R.id.mapsforge, null, "TODO")
}
def reloadOfflineMaps(ctx : Context) {
}
def defaultMapMode(ctx : Context, prefs : PrefsWrapper): MapMode = {
MapModes.initialize(ctx)
val tag = prefs.getString("mapmode", "google")
android.util.Log.d("MapModes", "tag is " + tag )
var default : MapMode = null
for (mode <- all_mapmodes) {
android.util.Log.d("MapModes", "mode " + mode.tag + " isA=" + mode.isAvailable(ctx))
if (default == null && mode.isAvailable(ctx))
default = mode
if (mode.tag == tag && mode.isAvailable(ctx)) {
android.util.Log.d("MapModes", "mode " + mode.tag + " is tagged")
return mode
}
}
android.util.Log.d("MapModes", "mode " + default.tag + " is default")
return default
}
def startMap(ctx : Context, prefs : PrefsWrapper, targetcall : String) {
val mm = defaultMapMode(ctx, prefs)
val intent = new Intent(ctx, mm.viewClass)
if (targetcall != null && targetcall != "")
intent.setData(Uri.parse(targetcall))
else
intent.addFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT)
ctx.startActivity(intent)
}
def setDefault(prefs : PrefsWrapper, tag : String) {
prefs.set("mapmode", tag)
}
def fromMenuItem(mi : MenuItem) : MapMode = {
for (mode <- all_mapmodes) {
if (mode.menu_id == mi.getItemId())
return mode
}
return null
}
}
class MapMode(val tag : String, val menu_id : Int, val title : String, val viewClass : Class[_]) {
def isAvailable(ctx : Context) = true
}
class GoogleMapMode(tag : String, menu_id : Int, title : String, val mapType : Int)
extends MapMode(tag, menu_id, title, classOf[GoogleMapAct]) {
override def isAvailable(ctx : Context) = {
try {
ctx.getPackageManager().getPackageInfo(GoogleApiAvailability.GOOGLE_PLAY_SERVICES_PACKAGE, 0)
true
} catch {
case e : PackageManager.NameNotFoundException => false
}
}
}
class MapsforgeOnlineMode(tag : String, menu_id : Int, title : String, val foo : String)
extends MapMode(tag, menu_id, title, classOf[MapAct]) {
}
class MapsforgeFileMode(tag : String, menu_id : Int, title : String, val file : String)
extends MapMode(tag, menu_id, title, classOf[MapAct]) {
}
|
ge0rg/aprsdroid
|
src/MapMode.scala
|
Scala
|
gpl-2.0
| 2,938 |
package coder.simon.slots
import scala.util.Random
package object common {
case class Symbol(t: Int) extends AnyVal
case class Cell(x: Int, y: Int)
case class ReelInfo(numOfSymbols: Int, lenOfReel: Int)
type Matrix = Seq[Seq[Int]]
type Lines = Map[String, List[Cell]]
case class OneLineResult(name:String, symbol: Symbol, count: Int, mul: Int)
case class OneScatterResult(symbol: Symbol, count: Int, mul: Int)
type LineResult = List[OneLineResult]
type ScatterResult = List[OneScatterResult]
type ReelArray = Array[Array[Symbol]]
}
|
erlangxk/fpscala
|
src/main/scala/coder/simon/slots/common/package.scala
|
Scala
|
mit
| 560 |
class Test:
import ArbitraryDerivation.given
private def test[A: Arbitrary]: Unit = {}
test[Foo]
|
dotty-staging/dotty
|
tests/pos/i13001/Test_2.scala
|
Scala
|
apache-2.0
| 102 |
import java.nio.file.Paths
import sbt.Keys.{version, _}
import sbt._
import MyTasks._
import bintray.BintrayPlugin.autoImport._
object Settings {
val commonSettings = Seq(
resolvers += Resolver.bintrayRepo("cuzfrog", "maven"),
organization := "com.github.cuzfrog",
scalacOptions ++= Seq(
//"-Xlint",
"-unchecked",
"-deprecation",
"-feature",
"-language:postfixOps",
"-language:implicitConversions",
"-language:higherKinds",
"-language:existentials"),
libraryDependencies ++= Seq(
"junit" % "junit" % "4.12" % "test",
"com.novocode" % "junit-interface" % "0.11" % "test->default"
),
logBuffered in Test := false,
testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-q", "-a"),
parallelExecution in Test := false,
licenses += ("Apache-2.0", url("https://opensource.org/licenses/Apache-2.0")),
cleanSnapshot := {
val home = System.getenv("HOME")
IO.delete(new File(home) / ".ivy2/local/com.github.cuzfrog/sbt-tmpfs")
},
cleanSnapshot := (cleanSnapshot runBefore publishLocal).value
)
val publicationSettings = Seq(
//publishTo := Some("My Bintray" at s"https://api.bintray.com/maven/cuzfrog/maven/${name.value}/;publish=1"),
//(compile in Compile) := ((compile in Compile) dependsOn generateCredential).value,
publishMavenStyle := false,
bintrayRepository := "sbt-plugins",
bintrayOrganization in bintray := None,
generateCredential := {
val home = System.getenv("HOME")
val bintrayUser = System.getenv("BINTRAY_USER")
val bintrayPass = System.getenv("BINTRAY_PASS")
val content = Seq(
"realm = Bintray API Realm",
"host = api.bintray.com",
"user = " + bintrayUser,
"password = " + bintrayPass
)
IO.writeLines(Paths.get(home, ".bintray", ".credentials").toFile, content)
}
)
val readmeVersionSettings = Seq(
(compile in Compile) := ((compile in Compile) dependsOn versionReadme).value,
versionReadme := {
val contents = IO.read(file("README.md"))
val regex =raw"""(?<=addSbtPlugin\\("com\\.github\\.cuzfrog" % "${name.value}" % ")[\\+\\d\\w\\-\\.]+(?="\\))"""
val releaseVersion = version.value.split("""\\+""").head
val newContents = contents.replaceAll(regex, releaseVersion)
IO.write(file("README.md"), newContents)
streams.value.log.info(s"Version set to $releaseVersion")
}
)
}
|
cuzfrog/sbt-tmpfs
|
project/Settings.scala
|
Scala
|
apache-2.0
| 2,458 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{Analyzer, FunctionRegistry}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.optimizer.Optimizer
import org.apache.spark.sql.catalyst.parser.ParserInterface
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.strategy.{CarbonLateDecodeStrategy, DDLStrategy, StreamingTableStrategy}
import org.apache.spark.sql.internal.{SQLConf, SessionResourceLoader, SessionState, SessionStateBuilder}
import org.apache.spark.sql.optimizer.{CarbonIUDRule, CarbonLateDecodeRule, CarbonUDFTransformRule}
import org.apache.spark.sql.parser.CarbonSparkSqlParser
import org.apache.spark.sql.types.{StructField, StructType}
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.format.TableInfo
import org.apache.carbondata.spark.util.CarbonScalaUtil
/**
* This class will have carbon catalog and refresh the relation from cache if the carbontable in
* carbon catalog is not same as cached carbon relation's carbon table
*
* @param externalCatalog
* @param globalTempViewManager
* @param sparkSession
* @param functionResourceLoader
* @param functionRegistry
* @param conf
* @param hadoopConf
*/
class InMemorySessionCatalog(
externalCatalog: ExternalCatalog,
globalTempViewManager: GlobalTempViewManager,
functionRegistry: FunctionRegistry,
sparkSession: SparkSession,
conf: SQLConf,
hadoopConf: Configuration,
parser: ParserInterface,
functionResourceLoader: FunctionResourceLoader)
extends SessionCatalog(
externalCatalog,
globalTempViewManager,
functionRegistry,
conf,
hadoopConf,
parser,
functionResourceLoader
) with CarbonSessionCatalog {
override def alterTableRename(oldTableIdentifier: TableIdentifier,
newTableIdentifier: TableIdentifier,
newTablePath: String): Unit = {
sparkSession.sessionState.catalog.renameTable(oldTableIdentifier, newTableIdentifier)
}
override def alterTable(tableIdentifier: TableIdentifier,
schemaParts: String,
cols: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
// NOt Required in case of In-memory catalog
}
override def alterAddColumns(tableIdentifier: TableIdentifier,
schemaParts: String,
newColumns: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
val catalogTable = sparkSession.sessionState.catalog.getTableMetadata(tableIdentifier)
val structType = catalogTable.schema
var newStructType = structType
newColumns.get.foreach {cols =>
newStructType = structType
.add(cols.getColumnName, CarbonScalaUtil.convertCarbonToSparkDataType(cols.getDataType))
}
alterSchema(newStructType, catalogTable, tableIdentifier)
}
override def alterDropColumns(tableIdentifier: TableIdentifier,
schemaParts: String,
dropCols: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
val catalogTable = sparkSession.sessionState.catalog.getTableMetadata(tableIdentifier)
val fields = catalogTable.schema.fields.filterNot { field =>
dropCols.get.exists { col =>
col.getColumnName.equalsIgnoreCase(field.name)
}
}
alterSchema(new StructType(fields), catalogTable, tableIdentifier)
}
override def alterColumnChangeDataType(tableIdentifier: TableIdentifier,
schemaParts: String,
columns: Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]])
: Unit = {
val catalogTable = sparkSession.sessionState.catalog.getTableMetadata(tableIdentifier)
val a = catalogTable.schema.fields.flatMap { field =>
columns.get.map { col =>
if (col.getColumnName.equalsIgnoreCase(field.name)) {
StructField(col.getColumnName,
CarbonScalaUtil.convertCarbonToSparkDataType(col.getDataType))
} else {
field
}
}
}
alterSchema(new StructType(a), catalogTable, tableIdentifier)
}
private def alterSchema(structType: StructType,
catalogTable: CatalogTable,
tableIdentifier: TableIdentifier): Unit = {
val copy = catalogTable.copy(schema = structType)
sparkSession.sessionState.catalog.alterTable(copy)
sparkSession.sessionState.catalog.refreshTable(tableIdentifier)
}
lazy val carbonEnv = {
val env = new CarbonEnv
env.init(sparkSession)
env
}
def getCarbonEnv() : CarbonEnv = {
carbonEnv
}
// Initialize all listeners to the Operation bus.
CarbonEnv.initListeners()
def getThriftTableInfo(tablePath: String): TableInfo = {
val tableMetadataFile = CarbonTablePath.getSchemaFilePath(tablePath)
CarbonUtil.readSchemaFile(tableMetadataFile)
}
override def lookupRelation(name: TableIdentifier): LogicalPlan = {
val rtnRelation = super.lookupRelation(name)
val isRelationRefreshed =
CarbonSessionUtil.refreshRelation(rtnRelation, name)(sparkSession)
if (isRelationRefreshed) {
super.lookupRelation(name)
} else {
rtnRelation
}
}
/**
* returns hive client from HiveExternalCatalog
*
* @return
*/
def getClient(): org.apache.spark.sql.hive.client.HiveClient = {
null
}
override def createPartitions(
tableName: TableIdentifier,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
try {
val table = CarbonEnv.getCarbonTable(tableName)(sparkSession)
val updatedParts = CarbonScalaUtil.updatePartitions(parts, table)
super.createPartitions(tableName, updatedParts, ignoreIfExists)
} catch {
case e: Exception =>
super.createPartitions(tableName, parts, ignoreIfExists)
}
}
/**
* This is alternate way of getting partition information. It first fetches all partitions from
* hive and then apply filter instead of querying hive along with filters.
* @param partitionFilters
* @param sparkSession
* @param identifier
* @return
*/
override def getPartitionsAlternate(partitionFilters: Seq[Expression],
sparkSession: SparkSession,
identifier: TableIdentifier) = {
CarbonSessionUtil.prunePartitionsByFilter(partitionFilters, sparkSession, identifier)
}
/**
* Update the storageformat with new location information
*/
override def updateStorageLocation(
path: Path,
storage: CatalogStorageFormat,
newTableName: String,
dbName: String): CatalogStorageFormat = {
storage.copy(locationUri = Some(path.toUri))
}
}
class CarbonInMemorySessionStateBuilder (sparkSession: SparkSession,
parentState: Option[SessionState] = None)
extends SessionStateBuilder(sparkSession, parentState) {
override lazy val sqlParser: ParserInterface = new CarbonSparkSqlParser(conf, sparkSession)
experimentalMethods.extraStrategies =
Seq(new StreamingTableStrategy(sparkSession),
new CarbonLateDecodeStrategy,
new DDLStrategy(sparkSession)
)
experimentalMethods.extraOptimizations = Seq(new CarbonIUDRule,
new CarbonUDFTransformRule,
new CarbonLateDecodeRule)
/**
* Internal catalog for managing table and database states.
*/
override protected lazy val catalog: InMemorySessionCatalog = {
val catalog = new InMemorySessionCatalog(
externalCatalog,
session.sharedState.globalTempViewManager,
functionRegistry,
sparkSession,
conf,
SessionState.newHadoopConf(session.sparkContext.hadoopConfiguration, conf),
sqlParser,
resourceLoader)
parentState.foreach(_.catalog.copyStateTo(catalog))
catalog
}
private def externalCatalog: ExternalCatalog =
session.sharedState.externalCatalog.asInstanceOf[ExternalCatalog]
override protected lazy val resourceLoader: SessionResourceLoader = {
new SessionResourceLoader(session)
}
override lazy val optimizer: Optimizer = new CarbonOptimizer(catalog, conf, experimentalMethods)
override protected def analyzer: Analyzer = new CarbonAnalyzer(catalog, conf, sparkSession,
new Analyzer(catalog, conf) {
override val extendedResolutionRules: Seq[Rule[LogicalPlan]] =
new FindDataSourceTable(session) +:
new ResolveSQLOnFile(session) +:
new CarbonIUDAnalysisRule(sparkSession) +:
new CarbonPreInsertionCasts(sparkSession) +: customResolutionRules
override val extendedCheckRules: Seq[LogicalPlan => Unit] =
PreWriteCheck :: HiveOnlyCheck :: Nil
override val postHocResolutionRules: Seq[Rule[LogicalPlan]] =
PreprocessTableCreation(session) +:
PreprocessTableInsertion(conf) +:
DataSourceAnalysis(conf) +:
customPostHocResolutionRules
}
)
override protected def newBuilder: NewBuilder = new CarbonInMemorySessionStateBuilder(_, _)
}
|
jatin9896/incubator-carbondata
|
integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonInMemorySessionState.scala
|
Scala
|
apache-2.0
| 10,134 |
package org.genericConfig.admin.models.component
import org.genericConfig.admin.controllers.websocket.WebClient
import org.genericConfig.admin.models.logic.RidToHash
import org.genericConfig.admin.models.persistence.orientdb.PropertyKeys
import org.genericConfig.admin.models.{CommonFunction, common}
import org.genericConfig.admin.shared.Actions
import org.genericConfig.admin.shared.component.{ComponentConfigPropertiesDTO, ComponentDTO, ComponentParamsDTO, ComponentUserPropertiesDTO}
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAfterAll
import play.api.Logger
import play.api.libs.json.{JsResult, JsValue, Json}
/**
* Copyright (C) 2016 Gennadi Heimann [email protected]
*
* Created by Gennadi Heimann 18.06.2018
*/
class UpdateComponentSpecs extends Specification
with BeforeAfterAll
with CommonFunction{
val wC: WebClient = WebClient.init
var updateCROnlyNameToShow : JsResult[ComponentDTO] = _
def beforeAll() : Unit = {
before()
}
def afterAll(): Unit = {
val error : Option[common.Error] = deleteVertex(RidToHash.getRId(
updateCROnlyNameToShow.get.result.get.configProperties.get.componentId.get).get,
PropertyKeys.VERTEX_COMPONENT
)
require(error == None, "Beim Loeschen des Components ein Fehler aufgetretten")
}
"Der Benutzer veraendert die Komponente" >> {
"Es wird nur der Name geaendert" >> {
"action = ADD_COMPONENT" >> {updateCROnlyNameToShow.get.action === Actions.UPDATE_COMPONENT}
"componentId < 32 && > 10" >> {updateCROnlyNameToShow.get.result.get.configProperties.get.componentId.get.length must (be_<=(32) and be_>(10))}
"stepId = None" >> { updateCROnlyNameToShow.get.result.get.configProperties.get.stepId === None}
"nameToShow = ComponentUpdated" >> {updateCROnlyNameToShow.get.result.get.userProperties.get.nameToShow.get === "ComponentUpdated"}
"errors = None" >> {updateCROnlyNameToShow.get.result.get.errors === None}
}
}
def before(): Unit = {
val wC: WebClient = WebClient.init
val username = "userUpdateComponent"
val userId = createUser(username, wC)
val configId = createConfig(userId, s"http://contig/$username")
val nameToShow: Option[String] = Some(s"FirstStep$username")
val kind : Option[String] = Some("first")
val stepId = addStep(nameToShow, Some(configId), kind, 1, 1, wC)
val componentId : Option[String] = createComponent(wC, stepId, Some("ComponentToUpdate"))
val updateComponent: JsValue = Json.toJson(ComponentDTO(
action = Actions.UPDATE_COMPONENT,
params = Some(ComponentParamsDTO(
configProperties = Some(ComponentConfigPropertiesDTO(
stepId = stepId,
componentId = componentId
)),
userProperties = Some(ComponentUserPropertiesDTO(
nameToShow = Some("ComponentUpdated")
))
))
))
Logger.info("UPDATE_COMPONENT -> " + updateComponent)
val resultJsValue : JsValue = wC.handleMessage(updateComponent)
Logger.info("UPDATE_COMPONENT <- " + resultJsValue)
updateCROnlyNameToShow = Json.fromJson[ComponentDTO](resultJsValue)
}
}
|
gennadij/admin
|
server/test/org/genericConfig/admin/models/component/UpdateComponentSpecs.scala
|
Scala
|
apache-2.0
| 3,208 |
package sp.optimization.oscarmodels
import sp.optimization.OscaRModel
import oscar.cp.{CPModel, _}
class Queens(data: String) extends OscaRModel(data) with CPModel {
val nQueens = 8 // Number of queens
val Queens = 0 until nQueens
// Variables
val queens = Array.fill(nQueens)(CPIntVar.sparse(0, nQueens - 1))
var solution: Array[AnyVal] = Array.fill(nQueens)(-1)
// Constraints
add(allDifferent(queens))
add(allDifferent(Queens.map(i => queens(i) + i)))
add(allDifferent(Queens.map(i => queens(i) - i)))
// Search heuristic
search(binaryFirstFail(queens))
onSolution {
solution = queens map (x => x.value)
}
// Execution, search for one solution
val stats = start(nSols = 1)
}
|
kristoferB/SP
|
sp1/src/main/scala/sp/optimization/models/OscaR/Queens.scala
|
Scala
|
mit
| 720 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.data
import java.io._
import com.esotericsoftware.kryo.io.{Input, Output}
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.client.BatchWriterConfig
import org.apache.accumulo.core.client.mock.MockInstance
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.data.Mutation
import org.apache.accumulo.core.security.{Authorizations, ColumnVisibility}
import org.apache.arrow.memory.{BufferAllocator, RootAllocator}
import org.apache.hadoop.io.Text
import org.geotools.data.simple.SimpleFeatureSource
import org.geotools.data.{DataStoreFinder, DataUtilities, Query, Transaction}
import org.geotools.util.factory.Hints
import org.geotools.filter.identity.FeatureIdImpl
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithDataStore
import org.locationtech.geomesa.accumulo.index.JoinIndex
import org.locationtech.geomesa.arrow.io.SimpleFeatureArrowFileReader
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.api.GeoMesaFeatureIndex
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.filter.Filter
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class BackCompatibilityTest extends Specification with LazyLogging {
/**
* Runs version tests against old data. To add more versions, generate a new data file by running
* 'BackCompatibilityWriter' against the git tag, then add another call to 'testVersion'.
*/
sequential
implicit val allocator: BufferAllocator = new RootAllocator(Long.MaxValue)
lazy val connector = new MockInstance("mycloud").getConnector("user", new PasswordToken("password"))
val queries = Seq(
("INCLUDE", Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
("IN ('0', '5', '7')", Seq(0, 5, 7)),
("bbox(geom, -130, 45, -120, 50)", Seq(5, 6, 7, 8, 9)),
("bbox(geom, -130, 45, -120, 50) AND dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(5, 6, 7)),
("name = 'name5'", Seq(5)),
("name = 'name5' AND bbox(geom, -130, 45, -120, 50) AND dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(5)),
("name = 'name5' AND dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(5)),
("name = 'name5' AND bbox(geom, -130, 40, -120, 50)", Seq(5)),
("dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(0, 1, 2, 3, 4, 5, 6, 7))
)
val addQueries = Seq(
"IN ('10')",
"name = 'name10'",
"bbox(geom, -111, 44, -109, 46)",
"bbox(geom, -111, 44, -109, 46) AND dtg DURING 2016-01-01T00:00:00.000Z/2016-01-01T01:00:00.000Z"
)
val transforms = Seq(
Array("geom"),
Array("geom", "name")
)
def doQuery(fs: SimpleFeatureSource, query: Query): Seq[Int] = {
logger.debug(s"Running query ${ECQL.toCQL(query.getFilter)} :: " +
Option(query.getPropertyNames).map(_.mkString(",")).getOrElse("All"))
SelfClosingIterator(fs.getFeatures(query).features).toList.map { f =>
logger.debug(DataUtilities.encodeFeature(f))
f.getID.toInt
}
}
def doArrowQuery(fs: SimpleFeatureSource, query: Query): Seq[Int] = {
query.getHints.put(QueryHints.ARROW_ENCODE, java.lang.Boolean.TRUE)
val out = new ByteArrayOutputStream
val results = SelfClosingIterator(fs.getFeatures(query).features)
results.foreach(sf => out.write(sf.getAttribute(0).asInstanceOf[Array[Byte]]))
def in() = new ByteArrayInputStream(out.toByteArray)
WithClose(SimpleFeatureArrowFileReader.streaming(in)) { reader =>
SelfClosingIterator(reader.features()).map(_.getID.toInt).toSeq
}
}
def runVersionTest(tables: Seq[TableMutations]): MatchResult[Any] = {
import scala.collection.JavaConversions._
val sftName = restoreTables(tables)
// get the data store
val ds = DataStoreFinder.getDataStore(Map(
AccumuloDataStoreParams.ConnectorParam.key -> connector,
AccumuloDataStoreParams.CachingParam.key -> false,
AccumuloDataStoreParams.CatalogParam.key -> sftName
)).asInstanceOf[AccumuloDataStore]
val fs = ds.getFeatureSource(sftName)
// test adding features
val writer = ds.getFeatureWriterAppend(sftName, Transaction.AUTO_COMMIT)
val feature = writer.next()
feature.getIdentifier.asInstanceOf[FeatureIdImpl].setID("10")
feature.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
feature.setAttribute(0, "name10")
feature.setAttribute(1, "2016-01-01T00:30:00.000Z")
feature.setAttribute(2, "POINT(-110 45)")
if (feature.getFeatureType.getAttributeCount > 3) {
feature.setAttribute(3, "MULTIPOLYGON(((40 40, 20 45, 45 30, 40 40)),((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)))")
}
writer.write()
writer.close()
// make sure we can read it back
forall(addQueries) { query =>
val filter = ECQL.toFilter(query)
doQuery(fs, new Query(sftName, filter)) mustEqual Seq(10)
forall(transforms) { transform =>
doQuery(fs, new Query(sftName, filter, transform)) mustEqual Seq(10)
}
}
// delete it
var remover = ds.getFeatureWriter(sftName, ECQL.toFilter("IN ('10')"), Transaction.AUTO_COMMIT)
remover.hasNext must beTrue
remover.next
remover.remove()
remover.hasNext must beFalse
remover.close()
// make sure that it no longer comes back
forall(addQueries) { query =>
val filter = ECQL.toFilter(query)
doQuery(fs, new Query(sftName, filter)) must beEmpty
forall(transforms) { transform =>
doQuery(fs, new Query(sftName, filter, transform)) must beEmpty
}
}
// test queries
forall(queries) { case (q, results) =>
val filter = ECQL.toFilter(q)
logger.debug(s"Running query $q")
doQuery(fs, new Query(sftName, filter)) must containTheSameElementsAs(results)
doArrowQuery(fs, new Query(sftName, filter)) must containTheSameElementsAs(results)
forall(transforms) { transform =>
doQuery(fs, new Query(sftName, filter, transform)) must containTheSameElementsAs(results)
doArrowQuery(fs, new Query(sftName, filter, transform)) must containTheSameElementsAs(results)
}
}
// delete one of the old features
remover = ds.getFeatureWriter(sftName, ECQL.toFilter("IN ('5')"), Transaction.AUTO_COMMIT)
remover.hasNext must beTrue
remover.next
remover.remove()
remover.hasNext must beFalse
remover.close()
// make sure that it no longer comes back
forall(queries) { case (q, results) =>
val filter = ECQL.toFilter(q)
logger.debug(s"Running query $q")
doQuery(fs, new Query(sftName, filter)) must containTheSameElementsAs(results.filter(_ != 5))
}
ds.dispose()
ok
}
def testBoundsDelete(): MatchResult[Any] = {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
foreach(Seq("1.2.8-bounds", "1.2.8-bounds-multi")) { name =>
logger.info(s"Running back compatible deletion test on $name")
val sftName = restoreTables(readVersion(getFile(s"data/versioned-data-$name.kryo")))
val ds = DataStoreFinder.getDataStore(Map(
AccumuloDataStoreParams.ConnectorParam.key -> connector,
AccumuloDataStoreParams.CachingParam.key -> false,
AccumuloDataStoreParams.CatalogParam.key -> sftName
)).asInstanceOf[AccumuloDataStore]
val sft = ds.getSchema(sftName)
// verify the features are there
foreach(sft.getIndices) { index =>
val filter = if (index.name == JoinIndex.name) { ECQL.toFilter("name is not null") } else { Filter.INCLUDE }
val query = new Query(sftName, filter)
query.getHints.put(QueryHints.QUERY_INDEX, GeoMesaFeatureIndex.identifier(index))
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList must haveLength(4)
}
// delete the features
val filter = SelfClosingIterator(ds.getFeatureReader(new Query(sftName), Transaction.AUTO_COMMIT)).map(_.getID).mkString("IN('", "', '", "')")
ds.getFeatureSource(sftName).removeFeatures(ECQL.toFilter(filter))
// verify the delete
foreach(sft.getIndices) { index =>
val filter = if (index.name == JoinIndex.name) { ECQL.toFilter("name is not null") } else { Filter.INCLUDE }
val query = new Query(sftName, filter)
query.getHints.put(QueryHints.QUERY_INDEX, GeoMesaFeatureIndex.identifier(index))
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)) must beEmpty
}
}
}
def restoreTables(tables: Seq[TableMutations]): String = {
// reload the tables
tables.foreach { case TableMutations(table, mutations) =>
if (connector.tableOperations.exists(table)) {
connector.tableOperations.delete(table)
}
connector.tableOperations.create(table)
val bw = connector.createBatchWriter(table, new BatchWriterConfig)
bw.addMutations(mutations)
bw.flush()
bw.close()
}
tables.map(_.table).minBy(_.length)
}
def readVersion(file: File): Seq[TableMutations] = {
val input = new Input(new FileInputStream(file))
def readBytes: Array[Byte] = {
val bytes = Array.ofDim[Byte](input.readInt)
input.read(bytes)
bytes
}
val numTables = input.readInt
(0 until numTables).map { _ =>
val tableName = input.readString
val numMutations = input.readInt
val mutations = (0 until numMutations).map { _ =>
val row = readBytes
val cf = readBytes
val cq = readBytes
val vis = new ColumnVisibility(readBytes)
val timestamp = input.readLong
val value = readBytes
val mutation = new Mutation(row)
mutation.put(cf, cq, vis, timestamp, value)
mutation
}
TableMutations(tableName, mutations)
}
}
def testVersion(version: String): MatchResult[Any] = {
val data = readVersion(getFile(s"data/versioned-data-$version.kryo"))
logger.info(s"Running back compatible test on version $version")
runVersionTest(data)
}
"GeoMesa" should {
"support backward compatibility to 1.2.0" >> { testVersion("1.2.0") }
"support backward compatibility to 1.2.1" >> { testVersion("1.2.1") }
"support backward compatibility to 1.2.2" >> { testVersion("1.2.2") }
"support backward compatibility to 1.2.3" >> { testVersion("1.2.3") }
"support backward compatibility to 1.2.4" >> { testVersion("1.2.4") }
// note: data on disk is the same in 1.2.5 and 1.2.6
"support backward compatibility to 1.2.6" >> { testVersion("1.2.6") }
"support backward compatibility to 1.2.7.3" >> { testVersion("1.2.7.3") }
"support backward compatibility to 1.3.1" >> { testVersion("1.3.1") }
"support backward compatibility to 1.3.2" >> { testVersion("1.3.2") }
// note: data on disk is the same from 1.3.3 through 2.0.0-m.1
"support backward compatibility to 2.0.0-m.1" >> { testVersion("2.0.0-m.1") }
"support backward compatibility to 2.1.0" >> { testVersion("2.1.0") }
"support backward compatibility to 2.3.1" >> { testVersion("2.3.1") }
"delete invalid indexed data" >> { testBoundsDelete() }
}
def getFile(name: String): File = new File(getClass.getClassLoader.getResource(name).toURI)
step {
allocator.close()
}
case class TableMutations(table: String, mutations: Seq[Mutation])
}
@RunWith(classOf[JUnitRunner])
class BackCompatibilityWriter extends TestWithDataStore {
override val spec = "name:String:index=join,dtg:Date,*geom:Point:srid=4326,multi:MultiPolygon:srid=4326"
val version = "REPLACEME"
"AccumuloDataStore" should {
"write data" in {
skipped("integration")
addFeatures((0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttribute(0, s"name$i")
sf.setAttribute(1, s"2015-01-01T0$i:01:00.000Z")
sf.setAttribute(2, s"POINT(-12$i 4$i)")
sf.setAttribute(3, s"MULTIPOLYGON(((4$i 40, 20 45, 45 30, 4$i 40)),((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)))")
sf
})
val dataFile = new File(s"src/test/resources/data/versioned-data-$version.kryo")
val fs = new FileOutputStream(dataFile)
val output = new Output(fs)
def writeText(text: Text): Unit = {
output.writeInt(text.getLength)
output.write(text.getBytes, 0, text.getLength)
}
val tables = connector.tableOperations().list().filter(_.startsWith(sftName))
output.writeInt(tables.size)
tables.foreach { table =>
output.writeAscii(table)
output.writeInt(connector.createScanner(table, new Authorizations()).size)
connector.createScanner(table, new Authorizations()).foreach { entry =>
val key = entry.getKey
Seq(key.getRow, key.getColumnFamily, key.getColumnQualifier, key.getColumnVisibility).foreach(writeText)
output.writeLong(key.getTimestamp)
val value = entry.getValue.get
output.writeInt(value.length)
output.write(value)
}
}
output.flush()
output.close()
ok
}
}
}
|
elahrvivaz/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/data/BackCompatibilityTest.scala
|
Scala
|
apache-2.0
| 14,066 |
/*
* Copyright 2016 Guy Van den Broeck and Wannes Meert (UCLA and KU Leuven)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.ucla.cs.starai.forclift.examples;
/*
* Copyright 2016 Guy Van den Broeck and Wannes Meert (UCLA and KU Leuven)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
///*
// * Copyright (C) 2013 Guy Van den Broeck ([email protected])
// *
// * This file is part of WFOMC (http://dtai.cs.kuleuven.be/ml/systems/wfomc).
// *
// * WFOMC is free software: you can redistribute it and/or modify
// * it under the terms of the GNU Lesser General Public License as published by
// * the Free Software Foundation, either version 3 of the License, or
// * (at your option) any later version.
// *
// * WFOMC is distributed in the hope that it will be useful,
// * but WITHOUT ANY WARRANTY; without even the implied warranty of
// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// * GNU Lesser General Public License for more details.
// *
// * You should have received a copy of the GNU Lesser General Public License
// * along with WFOMC. If not, see <http://www.gnu.org/licenses/>.
// *
// */
//
//package liftedinference.examples
//
//import edu.ucla.cs.starai.forclift._
//import java.io._
//import edu.ucla.cs.starai.forclift.examples._
//
//object Random3CNFCounter {
//
// def main(args: Array[String]): Unit = {
//
// val rootDir = new File("experiments/3cnf/")
// rootDir.mkdir()
//
// val output: PrintWriter = new PrintWriter(new FileWriter(new File(rootDir, "counts.dat")));
//
// def logData(line: String) {
// System.out.print(line)
// output.print(line)
// output.flush
// }
//
// def logDataln(line: String = "") {
// System.out.println(line)
// output.println(line)
// output.flush
// }
//
// def pad(s: Any) = s.toString.padTo(25, " ").mkString
//
// val domains = Stream.from(5, 5)
// val ratios = List(4, 4.24, 5, 5.25, 5.5, 5.75, 6, 7)
//
// logDataln(pad("NbVars") + ratios.map { r => pad("Ratio " + r) }.mkString)
//
// for (domain <- domains) {
//
// logData(pad(domain))
//
// for (ratio <- ratios) {
//
// println("nbVars = " + domain)
// println("ratio = " + ratio)
// println("logNbInterpretations = " + (domain * math.log(2)) / math.log(2))
//
// val domains = (
// "domain Variable " + domain + " {} " + "\\n" +
// "domain Clause " + (math.round(ratio * domain).toInt) + " {} " + "\\n")
// val denominator = new models.WeightedCNFModel {
// def theoryString = (domains + """
//
// predicate p(Variable) 0.5 0.5
//
// predicate cv1(Clause,Variable) 0.5 0.5
// predicate cv2(Clause,Variable) 0.5 0.5
// predicate cv3(Clause,Variable) 0.5 0.5
//
// predicate cs1(Clause) 0.5 0.5
// predicate cs2(Clause) 0.5 0.5
// predicate cs3(Clause) 0.5 0.5
//
// !cv1(C,V1) v !cv1(C,V2), V1!=V2
// !cv2(C,V1) v !cv2(C,V2), V1!=V2
// !cv3(C,V1) v !cv3(C,V2), V1!=V2
//
// """)
// }
//
// val denomWmc = denominator.theory.logSmoothWmc
// println("logDenominator = " + denomWmc.logToDouble / math.log(2))
//
// val numerator = new models.WeightedCNFModel {
// def theoryString = (domains + """
//
// predicate p(Variable) 0.5 0.5
//
// predicate cv1(Clause,Variable) 0.5 0.5
// predicate cv2(Clause,Variable) 0.5 0.5
// predicate cv3(Clause,Variable) 0.5 0.5
//
// predicate cs1(Clause) 0.5 0.5
// predicate cs2(Clause) 0.5 0.5
// predicate cs3(Clause) 0.5 0.5
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v !cs1(C) v !cs2(C) v !cs3(C) v p(V1) v p(V2) v p(V3)
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v !cs1(C) v !cs2(C) v cs3(C) v p(V1) v p(V2) v !p(V3)
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v !cs1(C) v cs2(C) v !cs3(C) v p(V1) v !p(V2) v p(V3)
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v !cs1(C) v cs2(C) v cs3(C) v p(V1) v !p(V2) v !p(V3)
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v cs1(C) v !cs2(C) v !cs3(C) v !p(V1) v p(V2) v p(V3)
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v cs1(C) v !cs2(C) v cs3(C) v !p(V1) v p(V2) v !p(V3)
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v cs1(C) v cs2(C) v !cs3(C) v !p(V1) v !p(V2) v p(V3)
//
// !cv1(C,V1) v !cv2(C,V2) v !cv3(C,V3) v cs1(C) v cs2(C) v cs3(C) v !p(V1) v !p(V2) v !p(V3)
//
// !cv1(C,V1) v !cv1(C,V2), V1!=V2
// !cv2(C,V1) v !cv2(C,V2), V1!=V2
// !cv3(C,V1) v !cv3(C,V2), V1!=V2
//
// """)
// }
//
// val numWmc = numerator.theory.logSmoothWmc
// println("logNumerator = " + numWmc.logToDouble / math.log(2))
// println("logModelProb = " + (numWmc / denomWmc).logToDouble / math.log(2))
// println("modelProb = " + (numWmc / denomWmc).toDouble)
//
// val logExpectedNbModels = (numWmc / denomWmc).logToDouble / math.log(2) + domain
// println("logNbModels = " + logExpectedNbModels)
//
// logData(pad(logExpectedNbModels))
//
// println()
//
// }
//
// logDataln()
// }
//
// output.close
//
// }
//
//}
|
UCLA-StarAI/Forclift
|
src/main/scala/edu/ucla/cs/starai/forclift/examples/Random3CNFCounter.scala
|
Scala
|
apache-2.0
| 6,574 |
/*
* Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.sword2
import better.files.FileExtensions
import org.apache.commons.io.FileUtils
import org.scalatest.BeforeAndAfterEach
import java.io.{File => JFile}
import java.util.regex.Pattern
import scala.util.Success
class BagExtractorSpec extends TestSupportFixture with BeforeAndAfterEach {
import BagExtractor._
private val outDir = (testDir / "out").toJava
override def beforeEach: Unit = {
super.beforeEach()
FileUtils.deleteDirectory(outDir)
}
private def getZipFile(name: String): JFile = {
new JFile("src/test/resources/zips", name)
}
"createFilePathMapping" should "generate empty map for empty zip" in {
val maybeMap = createFilePathMapping(getZipFile("empty.zip"), Pattern.compile(""))
maybeMap shouldBe a[Success[_]]
maybeMap.get shouldBe empty
}
it should "generate mappings for files under prefix" in {
val maybeMap = createFilePathMapping(getZipFile("mix.zip"), Pattern.compile("subfolder/"))
maybeMap shouldBe a[Success[_]]
maybeMap.get.keySet should contain("subfolder/test.txt")
}
"unzipWithMappedFilePaths" should "unzip empty zip" in {
unzipWithMappedFilePaths(getZipFile("empty.zip"), outDir, Map[String, String]()) shouldBe a[Success[_]]
outDir.list() shouldBe empty
}
it should "unzip zip with one unmapped root entry" in {
unzipWithMappedFilePaths(getZipFile("one-entry.zip"), outDir, Map[String, String]()) shouldBe a[Success[_]]
outDir.list().length shouldBe 1
FileUtils.readFileToString((outDir.toScala / "test.txt").toJava, "UTF-8").trim shouldBe "test"
}
it should "unzip zip with one mapped root entry" in {
unzipWithMappedFilePaths(getZipFile("one-entry.zip"), outDir, Map("test.txt" -> "renamed.txt")) shouldBe a[Success[_]]
outDir.list().length shouldBe 1
FileUtils.readFileToString((outDir.toScala / "renamed.txt").toJava, "UTF-8").trim shouldBe "test"
}
it should "unzip zip with one unmapped entry in subfolder" in {
unzipWithMappedFilePaths(getZipFile("one-entry-in-subfolder.zip"), outDir, Map[String, String]()) shouldBe a[Success[_]]
outDir.list().length shouldBe 1
FileUtils.readFileToString((outDir.toScala / "subfolder" / "test.txt").toJava, "UTF-8").trim shouldBe "test"
}
it should "unzip zip with one mapped entry in subfolder" in {
unzipWithMappedFilePaths(getZipFile("one-entry-in-subfolder.zip"), outDir, Map("subfolder/test.txt" -> "renamed.txt")) shouldBe a[Success[_]]
outDir.list().length shouldBe 1
FileUtils.readFileToString((outDir.toScala / "renamed.txt").toJava, "UTF-8").trim shouldBe "test"
}
it should "unzip zip with several entries some in subfolders, some mapped" in {
unzipWithMappedFilePaths(getZipFile("mix.zip"), outDir, Map("subfolder/test.txt" -> "renamed.txt", "subfolder2/subsubfolder/leaf.txt" -> "renamed2.txt")) shouldBe a[Success[_]]
outDir.list().length shouldBe 3
FileUtils.readFileToString((outDir.toScala / "root.txt").toJava, "UTF-8").trim shouldBe "in root"
FileUtils.readFileToString((outDir.toScala / "renamed.txt").toJava, "UTF-8").trim shouldBe "test"
FileUtils.readFileToString((outDir.toScala / "renamed2.txt").toJava, "UTF-8").trim shouldBe "in leaf"
}
"extractWithFilepathMapping" should "correctly unzip medium bag and leave it valid" in {
extractWithFilepathMapping(getZipFile("medium.zip"), outDir, "dummyId")
}
it should "accept multiple payload files with the same checksum" in {
extractWithFilepathMapping(getZipFile("double-image.zip"), outDir, "dummyId")
}
}
|
DANS-KNAW/easy-sword2
|
src/test/scala/nl.knaw.dans.easy.sword2/BagExtractorSpec.scala
|
Scala
|
apache-2.0
| 4,212 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dbis.pig.op
import dbis.pig.plan.{InvalidPlanException, DataflowPlan}
import dbis.pig.schema._
import scala.collection.mutable.{ListBuffer, Map}
import dbis.pig.expr.ArithmeticExpr
import dbis.pig.expr.Ref
import dbis.pig.expr.Expr
/**
* A trait for the GENERATE part of a FOREACH operator.
*/
trait ForeachGenerator {
def isNested: Boolean
}
/**
* GeneratorExpr represents a single expression of a Generator.
*
* @param expr
* @param alias
*/
case class GeneratorExpr(expr: ArithmeticExpr, alias: Option[Field] = None) {
override def toString = expr + (if (alias.isDefined) s" -> ${alias.get}" else "")
}
/**
* GeneratorList implements the ForeachGenerator trait and is used to represent
* the FOREACH ... GENERATE operator.
*
* @param exprs
*/
case class GeneratorList(var exprs: List[GeneratorExpr]) extends ForeachGenerator {
def constructFieldList(inputSchema: Option[Schema]): Array[Field] =
exprs.map(e => {
e.alias match {
// if we have an explicit schema (i.e. a field) then we use it
case Some(f) => {
if (f.fType == Types.ByteArrayType) {
// if the type was only bytearray, we should check the expression if we have a more
// specific type
val res = e.expr.resultType(inputSchema)
Field(f.name, res)
}
else
f
}
// otherwise we take the field name from the expression and
// the input schema
case None => {
val res = e.expr.resultType(inputSchema)
Field(e.expr.exprName, res)
}
}
}).toArray
def isNested() = false
}
/**
* GeneratorPlan implements the ForeachGenerator trait and is used to represent
* a nested FOREACH.
*
* @param subPlan
*/
case class GeneratorPlan(var subPlan: List[PigOperator]) extends ForeachGenerator {
def isNested() = true
}
/**
* Foreach represents the FOREACH operator of Pig.
*
* @param out the output pipe (relation).
* @param in the input pipe
* @param generator the generator (a list of expressions or a subplan)
* @param windowMode ???
*/
case class Foreach(
private val out: Pipe,
private val in: Pipe,
var generator: ForeachGenerator,
var windowMode: Boolean = false
) extends PigOperator(out, in) {
var subPlan: Option[DataflowPlan] = None
override def preparePlan: Unit = {
generator match {
case gen @ GeneratorPlan(opList) => {
/*
* Nested foreach require special handling: we construct a subplan for the operator list
* and add our input pipe to the context of the plan.
*/
val plan = new DataflowPlan(opList, Some(List(inputs.head)))
// println("--> " + plan.operators.mkString("\\n"))
plan.operators.foreach(op =>
if (op.isInstanceOf[Generate]) {
val genOp = op.asInstanceOf[Generate]
// we extract the input pipes of the GENERATE statements (which are hidden
// inside the expressions
val pipes = genOp.findInputPipes(plan)
// and update the other ends of the pipes accordingly
pipes.foreach(p => p.producer.addConsumer(p.name, op))
// we need these pipes only to avoid the removal of disconnected operators
genOp.parentOp = this
genOp.setAdditionalPipesFromPlan(plan)
}
else if (op.isInstanceOf[ConstructBag]) {
op.asInstanceOf[ConstructBag].parentOp = Some(this)
}
)
val lastOp = plan.operators.last
if (lastOp.isInstanceOf[Generate]) {
lastOp.asInstanceOf[Generate].parentOp = this
lastOp.constructSchema
}
else
throw new InvalidPlanException("last statement in nested foreach must be a generate: " + lastOp)
gen.subPlan = plan.operators
subPlan = Some(plan)
}
case _ => {}
}
}
override def resolveReferences(mapping: Map[String, Ref]): Unit = generator match {
case GeneratorList(exprs) => exprs.foreach(_.expr.resolveReferences(mapping))
case GeneratorPlan(plan) => {
// TODO
}
}
override def checkConnectivity: Boolean = {
def referencedInGenerate(op: Generate, pipe: Pipe): Boolean =
op.additionalPipes.filter(p => p.name == pipe.name).nonEmpty
generator match {
case GeneratorList(expr) => true
case GeneratorPlan(plan) => {
var result: Boolean = true
val genOp = plan.last.asInstanceOf[Generate]
plan.foreach { op => {
/*
// println("check operator: " + op)
if (!checkSubOperatorConnectivity(op)) {
println("op: " + op + " : not connected")
result = false
}
*/
if (!op.inputs.forall(p => p.producer != null)) {
println("op: " + op + " : invalid input pipes: " + op.inputs.mkString(","))
result = false
}
if (!op.outputs.forall(p => p.consumer.nonEmpty || referencedInGenerate(genOp, p))) {
println("op: " + op + " : invalid output pipes: " + op.outputs.mkString(","))
result = false
}
}
}
result
}
}
}
override def constructSchema: Option[Schema] = {
generator match {
case gen@GeneratorList(expr) => {
val fields = gen.constructFieldList(inputSchema)
schema = Some(Schema(BagType(TupleType(fields))))
}
case GeneratorPlan(_) => {
val plan = subPlan.get.operators
// if we have ConstructBag operators in our subplan, we should add schema information
plan.filter(p => p.isInstanceOf[ConstructBag]).foreach(p => p.asInstanceOf[ConstructBag].parentSchema = inputSchema)
// we invoke constructSchema for all operators of the subplan
plan.foreach(op => op.constructSchema)
/*
* TODO: expressions in generate can refer to _all_ bags in the subplan
*/
val genOp = plan.last
if (genOp.isInstanceOf[Generate]) {
schema = genOp.schema
// schema.get.setBagName(outPipeName)
}
else
throw new InvalidPlanException("last statement in nested foreach must be a generate")
}
}
schema
}
override def checkSchemaConformance: Boolean = {
generator match {
case GeneratorList(expr) => inputSchema match {
case Some(s) => {
// if we know the schema we check all named fields
expr.map(_.expr.traverseAnd(s, Expr.checkExpressionConformance)).foldLeft(true)((b1: Boolean, b2: Boolean) => b1 && b2)
}
case None => {
// if we don't have a schema all expressions should contain only positional fields
expr.map(_.expr.traverseAnd(null, Expr.containsNoNamedFields)).foldLeft(true)((b1: Boolean, b2: Boolean) => b1 && b2)
}
}
case GeneratorPlan(plan) => {
subPlan.get.checkSchemaConformance
true
}
}
}
/**
* Looks for an operator in the subplan that produces a bag with the given name.
*
* @param name
* @return
*/
def findOperatorInSubplan(name: String): Option[PigOperator] = subPlan match {
case Some(plan) => plan.findOperatorForAlias(name)
case None => None
}
/**
* Returns the lineage string describing the sub-plan producing the input for this operator.
*
* @return a string representation of the sub-plan.
*/
override def lineageString: String = {
generator match {
case GeneratorList(expr) => s"""FOREACH%${expr}%""" + super.lineageString
case GeneratorPlan(plan) => s"""FOREACH""" + super.lineageString // TODO: implement lineageString for nested foreach
}
}
def containsFlatten(onBag: Boolean = false): Boolean = {
val theSchema = inputSchema.orNull
generator match {
case GeneratorList(exprs) =>
if (onBag) {
exprs.map(g => g.expr.traverseOr(theSchema, Expr.containsFlattenOnBag)).exists(b => b)
}
else
exprs.map(g => g.expr.traverseOr(theSchema, Expr.containsFlatten)).exists(b => b)
case GeneratorPlan(plan) =>
false // TODO: what happens if GENERATE contains flatten?
}
}
override def printOperator(tab: Int): Unit = {
println(indent(tab) + s"FOREACH { out = ${outPipeNames.mkString(",")} , in = ${inPipeNames.mkString(",")} }")
println(indent(tab + 2) + "inSchema = " + inputSchema)
println(indent(tab + 2) + "outSchema = " + schema)
generator match {
case GeneratorList(exprs) => println(indent(tab + 2) + "exprs = " + exprs.mkString(","))
case GeneratorPlan(_) => subPlan.get.printPlan(tab + 5)
}
}
}
|
ksattler/piglet
|
src/main/scala/dbis/pig/op/Foreach.scala
|
Scala
|
apache-2.0
| 9,521 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io._
import java.util.concurrent.atomic._
import junit.framework.Assert._
import org.scalatest.junit.JUnitSuite
import org.junit.{After, Before, Test}
import kafka.message._
import kafka.common.{MessageSizeTooLargeException, OffsetOutOfRangeException, MessageSetSizeTooLargeException}
import kafka.utils._
import kafka.server.KafkaConfig
class LogTest extends JUnitSuite {
var logDir: File = null
val time = new MockTime(0)
var config: KafkaConfig = null
val logConfig = LogConfig()
@Before
def setUp() {
logDir = TestUtils.tempDir()
val props = TestUtils.createBrokerConfig(0, -1)
config = KafkaConfig.fromProps(props)
}
@After
def tearDown() {
Utils.rm(logDir)
}
def createEmptyLogs(dir: File, offsets: Int*) {
for(offset <- offsets) {
Log.logFilename(dir, offset).createNewFile()
Log.indexFilename(dir, offset).createNewFile()
}
}
/**
* Tests for time based log roll. This test appends messages then changes the time
* using the mock clock to force the log to roll and checks the number of segments.
*/
@Test
def testTimeBasedLogRoll() {
val set = TestUtils.singleMessageSet("test".getBytes())
// create a log
val log = new Log(logDir,
logConfig.copy(segmentMs = 1 * 60 * 60L),
recoveryPoint = 0L,
scheduler = time.scheduler,
time = time)
assertEquals("Log begins with a single empty segment.", 1, log.numberOfSegments)
time.sleep(log.config.segmentMs + 1)
log.append(set)
assertEquals("Log doesn't roll if doing so creates an empty segment.", 1, log.numberOfSegments)
log.append(set)
assertEquals("Log rolls on this append since time has expired.", 2, log.numberOfSegments)
for(numSegments <- 3 until 5) {
time.sleep(log.config.segmentMs + 1)
log.append(set)
assertEquals("Changing time beyond rollMs and appending should create a new segment.", numSegments, log.numberOfSegments)
}
val numSegments = log.numberOfSegments
time.sleep(log.config.segmentMs + 1)
log.append(new ByteBufferMessageSet())
assertEquals("Appending an empty message set should not roll log even if succient time has passed.", numSegments, log.numberOfSegments)
}
/**
* Test for jitter s for time based log roll. This test appends messages then changes the time
* using the mock clock to force the log to roll and checks the number of segments.
*/
@Test
def testTimeBasedLogRollJitter() {
val set = TestUtils.singleMessageSet("test".getBytes())
val maxJitter = 20 * 60L
// create a log
val log = new Log(logDir,
logConfig.copy(segmentMs = 1 * 60 * 60L, segmentJitterMs = maxJitter),
recoveryPoint = 0L,
scheduler = time.scheduler,
time = time)
assertEquals("Log begins with a single empty segment.", 1, log.numberOfSegments)
log.append(set)
time.sleep(log.config.segmentMs - maxJitter)
log.append(set)
assertEquals("Log does not roll on this append because it occurs earlier than max jitter", 1, log.numberOfSegments);
time.sleep(maxJitter - log.activeSegment.rollJitterMs + 1)
log.append(set)
assertEquals("Log should roll after segmentMs adjusted by random jitter", 2, log.numberOfSegments)
}
/**
* Test that appending more than the maximum segment size rolls the log
*/
@Test
def testSizeBasedLogRoll() {
val set = TestUtils.singleMessageSet("test".getBytes)
val setSize = set.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * (setSize - 1) // each segment will be 10 messages
// create a log
val log = new Log(logDir, logConfig.copy(segmentSize = segmentSize), recoveryPoint = 0L, time.scheduler, time = time)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
// segments expire in size
for (i<- 1 to (msgPerSeg + 1)) {
log.append(set)
}
assertEquals("There should be exactly 2 segments.", 2, log.numberOfSegments)
}
/**
* Test that we can open and append to an empty log
*/
@Test
def testLoadEmptyLog() {
createEmptyLogs(logDir, 0)
val log = new Log(logDir, logConfig, recoveryPoint = 0L, time.scheduler, time = time)
log.append(TestUtils.singleMessageSet("test".getBytes))
}
/**
* This test case appends a bunch of messages and checks that we can read them all back using sequential offsets.
*/
@Test
def testAppendAndReadWithSequentialOffsets() {
val log = new Log(logDir, logConfig.copy(segmentSize = 71), recoveryPoint = 0L, time.scheduler, time = time)
val messages = (0 until 100 by 2).map(id => new Message(id.toString.getBytes)).toArray
for(i <- 0 until messages.length)
log.append(new ByteBufferMessageSet(NoCompressionCodec, messages = messages(i)))
for(i <- 0 until messages.length) {
val read = log.read(i, 100, Some(i+1)).messageSet.head
assertEquals("Offset read should match order appended.", i, read.offset)
assertEquals("Message should match appended.", messages(i), read.message)
}
assertEquals("Reading beyond the last message returns nothing.", 0, log.read(messages.length, 100, None).messageSet.size)
}
/**
* This test appends a bunch of messages with non-sequential offsets and checks that we can read the correct message
* from any offset less than the logEndOffset including offsets not appended.
*/
@Test
def testAppendAndReadWithNonSequentialOffsets() {
val log = new Log(logDir, logConfig.copy(segmentSize = 71), recoveryPoint = 0L, time.scheduler, time = time)
val messageIds = ((0 until 50) ++ (50 until 200 by 7)).toArray
val messages = messageIds.map(id => new Message(id.toString.getBytes))
// now test the case that we give the offsets and use non-sequential offsets
for(i <- 0 until messages.length)
log.append(new ByteBufferMessageSet(NoCompressionCodec, new AtomicLong(messageIds(i)), messages = messages(i)), assignOffsets = false)
for(i <- 50 until messageIds.max) {
val idx = messageIds.indexWhere(_ >= i)
val read = log.read(i, 100, None).messageSet.head
assertEquals("Offset read should match message id.", messageIds(idx), read.offset)
assertEquals("Message should match appended.", messages(idx), read.message)
}
}
/**
* This test covers an odd case where we have a gap in the offsets that falls at the end of a log segment.
* Specifically we create a log where the last message in the first segment has offset 0. If we
* then read offset 1, we should expect this read to come from the second segment, even though the
* first segment has the greatest lower bound on the offset.
*/
@Test
def testReadAtLogGap() {
val log = new Log(logDir, logConfig.copy(segmentSize = 300), recoveryPoint = 0L, time.scheduler, time = time)
// keep appending until we have two segments with only a single message in the second segment
while(log.numberOfSegments == 1)
log.append(new ByteBufferMessageSet(NoCompressionCodec, messages = new Message("42".getBytes)))
// now manually truncate off all but one message from the first segment to create a gap in the messages
log.logSegments.head.truncateTo(1)
assertEquals("A read should now return the last message in the log", log.logEndOffset-1, log.read(1, 200, None).messageSet.head.offset)
}
/**
* Test reading at the boundary of the log, specifically
* - reading from the logEndOffset should give an empty message set
* - reading beyond the log end offset should throw an OffsetOutOfRangeException
*/
@Test
def testReadOutOfRange() {
createEmptyLogs(logDir, 1024)
val log = new Log(logDir, logConfig.copy(segmentSize = 1024), recoveryPoint = 0L, time.scheduler, time = time)
assertEquals("Reading just beyond end of log should produce 0 byte read.", 0, log.read(1024, 1000).messageSet.sizeInBytes)
try {
log.read(0, 1024)
fail("Expected exception on invalid read.")
} catch {
case e: OffsetOutOfRangeException => "This is good."
}
try {
log.read(1025, 1000)
fail("Expected exception on invalid read.")
} catch {
case e: OffsetOutOfRangeException => // This is good.
}
}
/**
* Test that covers reads and writes on a multisegment log. This test appends a bunch of messages
* and then reads them all back and checks that the message read and offset matches what was appended.
*/
@Test
def testLogRolls() {
/* create a multipart log with 100 messages */
val log = new Log(logDir, logConfig.copy(segmentSize = 100), recoveryPoint = 0L, time.scheduler, time = time)
val numMessages = 100
val messageSets = (0 until numMessages).map(i => TestUtils.singleMessageSet(i.toString.getBytes))
messageSets.foreach(log.append(_))
log.flush
/* do successive reads to ensure all our messages are there */
var offset = 0L
for(i <- 0 until numMessages) {
val messages = log.read(offset, 1024*1024).messageSet
assertEquals("Offsets not equal", offset, messages.head.offset)
assertEquals("Messages not equal at offset " + offset, messageSets(i).head.message, messages.head.message)
offset = messages.head.offset + 1
}
val lastRead = log.read(startOffset = numMessages, maxLength = 1024*1024, maxOffset = Some(numMessages + 1)).messageSet
assertEquals("Should be no more messages", 0, lastRead.size)
// check that rolling the log forced a flushed the log--the flush is asyn so retry in case of failure
TestUtils.retry(1000L){
assertTrue("Log role should have forced flush", log.recoveryPoint >= log.activeSegment.baseOffset)
}
}
/**
* Test reads at offsets that fall within compressed message set boundaries.
*/
@Test
def testCompressedMessages() {
/* this log should roll after every messageset */
val log = new Log(logDir, logConfig.copy(segmentSize = 100), recoveryPoint = 0L, time.scheduler, time = time)
/* append 2 compressed message sets, each with two messages giving offsets 0, 1, 2, 3 */
log.append(new ByteBufferMessageSet(DefaultCompressionCodec, new Message("hello".getBytes), new Message("there".getBytes)))
log.append(new ByteBufferMessageSet(DefaultCompressionCodec, new Message("alpha".getBytes), new Message("beta".getBytes)))
def read(offset: Int) = ByteBufferMessageSet.decompress(log.read(offset, 4096).messageSet.head.message)
/* we should always get the first message in the compressed set when reading any offset in the set */
assertEquals("Read at offset 0 should produce 0", 0, read(0).head.offset)
assertEquals("Read at offset 1 should produce 0", 0, read(1).head.offset)
assertEquals("Read at offset 2 should produce 2", 2, read(2).head.offset)
assertEquals("Read at offset 3 should produce 2", 2, read(3).head.offset)
}
/**
* Test garbage collecting old segments
*/
@Test
def testThatGarbageCollectingSegmentsDoesntChangeOffset() {
for(messagesToAppend <- List(0, 1, 25)) {
logDir.mkdirs()
// first test a log segment starting at 0
val log = new Log(logDir, logConfig.copy(segmentSize = 100), recoveryPoint = 0L, time.scheduler, time = time)
for(i <- 0 until messagesToAppend)
log.append(TestUtils.singleMessageSet(i.toString.getBytes))
var currOffset = log.logEndOffset
assertEquals(currOffset, messagesToAppend)
// time goes by; the log file is deleted
log.deleteOldSegments(_ => true)
assertEquals("Deleting segments shouldn't have changed the logEndOffset", currOffset, log.logEndOffset)
assertEquals("We should still have one segment left", 1, log.numberOfSegments)
assertEquals("Further collection shouldn't delete anything", 0, log.deleteOldSegments(_ => true))
assertEquals("Still no change in the logEndOffset", currOffset, log.logEndOffset)
assertEquals("Should still be able to append and should get the logEndOffset assigned to the new append",
currOffset,
log.append(TestUtils.singleMessageSet("hello".toString.getBytes)).firstOffset)
// cleanup the log
log.delete()
}
}
/**
* MessageSet size shouldn't exceed the config.segmentSize, check that it is properly enforced by
* appending a message set larger than the config.segmentSize setting and checking that an exception is thrown.
*/
@Test
def testMessageSetSizeCheck() {
val messageSet = new ByteBufferMessageSet(NoCompressionCodec, new Message ("You".getBytes), new Message("bethe".getBytes))
// append messages to log
val configSegmentSize = messageSet.sizeInBytes - 1
val log = new Log(logDir, logConfig.copy(segmentSize = configSegmentSize), recoveryPoint = 0L, time.scheduler, time = time)
try {
log.append(messageSet)
fail("message set should throw MessageSetSizeTooLargeException.")
} catch {
case e: MessageSetSizeTooLargeException => // this is good
}
}
@Test
def testCompactedTopicConstraints() {
val keyedMessage = new Message(bytes = "this message has a key".getBytes, key = "and here it is".getBytes)
val anotherKeyedMessage = new Message(bytes = "this message also has a key".getBytes, key ="another key".getBytes)
val unkeyedMessage = new Message(bytes = "this message does not have a key".getBytes)
val messageSetWithUnkeyedMessage = new ByteBufferMessageSet(NoCompressionCodec, unkeyedMessage, keyedMessage)
val messageSetWithOneUnkeyedMessage = new ByteBufferMessageSet(NoCompressionCodec, unkeyedMessage)
val messageSetWithCompressedKeyedMessage = new ByteBufferMessageSet(GZIPCompressionCodec, keyedMessage)
val messageSetWithKeyedMessage = new ByteBufferMessageSet(NoCompressionCodec, keyedMessage)
val messageSetWithKeyedMessages = new ByteBufferMessageSet(NoCompressionCodec, keyedMessage, anotherKeyedMessage)
val log = new Log(logDir, logConfig.copy(compact = true), recoveryPoint = 0L, time.scheduler, time)
try {
log.append(messageSetWithUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
try {
log.append(messageSetWithOneUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
try {
log.append(messageSetWithCompressedKeyedMessage)
fail("Compacted topics cannot accept compressed messages.")
} catch {
case e: InvalidMessageException => // this is good
}
// the following should succeed without any InvalidMessageException
log.append(messageSetWithKeyedMessage)
log.append(messageSetWithKeyedMessages)
// test that a compacted topic with broker-side compression type set to uncompressed can accept compressed messages
val uncompressedLog = new Log(logDir, logConfig.copy(compact = true, compressionType = "uncompressed"),
recoveryPoint = 0L, time.scheduler, time)
uncompressedLog.append(messageSetWithCompressedKeyedMessage)
uncompressedLog.append(messageSetWithKeyedMessage)
uncompressedLog.append(messageSetWithKeyedMessages)
try {
uncompressedLog.append(messageSetWithUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
try {
uncompressedLog.append(messageSetWithOneUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
}
/**
* We have a max size limit on message appends, check that it is properly enforced by appending a message larger than the
* setting and checking that an exception is thrown.
*/
@Test
def testMessageSizeCheck() {
val first = new ByteBufferMessageSet(NoCompressionCodec, new Message ("You".getBytes), new Message("bethe".getBytes))
val second = new ByteBufferMessageSet(NoCompressionCodec, new Message("change".getBytes))
// append messages to log
val maxMessageSize = second.sizeInBytes - 1
val log = new Log(logDir, logConfig.copy(maxMessageSize = maxMessageSize), recoveryPoint = 0L, time.scheduler, time = time)
// should be able to append the small message
log.append(first)
try {
log.append(second)
fail("Second message set should throw MessageSizeTooLargeException.")
} catch {
case e: MessageSizeTooLargeException => // this is good
}
}
/**
* Append a bunch of messages to a log and then re-open it both with and without recovery and check that the log re-initializes correctly.
*/
@Test
def testLogRecoversToCorrectOffset() {
val numMessages = 100
val messageSize = 100
val segmentSize = 7 * messageSize
val indexInterval = 3 * messageSize
val config = logConfig.copy(segmentSize = segmentSize, indexInterval = indexInterval, maxIndexSize = 4096)
var log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
for(i <- 0 until numMessages)
log.append(TestUtils.singleMessageSet(TestUtils.randomBytes(messageSize)))
assertEquals("After appending %d messages to an empty log, the log end offset should be %d".format(numMessages, numMessages), numMessages, log.logEndOffset)
val lastIndexOffset = log.activeSegment.index.lastOffset
val numIndexEntries = log.activeSegment.index.entries
val lastOffset = log.logEndOffset
log.close()
log = new Log(logDir, config, recoveryPoint = lastOffset, time.scheduler, time)
assertEquals("Should have %d messages when log is reopened w/o recovery".format(numMessages), numMessages, log.logEndOffset)
assertEquals("Should have same last index offset as before.", lastIndexOffset, log.activeSegment.index.lastOffset)
assertEquals("Should have same number of index entries as before.", numIndexEntries, log.activeSegment.index.entries)
log.close()
// test recovery case
log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
assertEquals("Should have %d messages when log is reopened with recovery".format(numMessages), numMessages, log.logEndOffset)
assertEquals("Should have same last index offset as before.", lastIndexOffset, log.activeSegment.index.lastOffset)
assertEquals("Should have same number of index entries as before.", numIndexEntries, log.activeSegment.index.entries)
log.close()
}
/**
* Test that if we manually delete an index segment it is rebuilt when the log is re-opened
*/
@Test
def testIndexRebuild() {
// publish the messages and close the log
val numMessages = 200
val config = logConfig.copy(segmentSize = 200, indexInterval = 1)
var log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
for(i <- 0 until numMessages)
log.append(TestUtils.singleMessageSet(TestUtils.randomBytes(10)))
val indexFiles = log.logSegments.map(_.index.file)
log.close()
// delete all the index files
indexFiles.foreach(_.delete())
// reopen the log
log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
assertEquals("Should have %d messages when log is reopened".format(numMessages), numMessages, log.logEndOffset)
for(i <- 0 until numMessages)
assertEquals(i, log.read(i, 100, None).messageSet.head.offset)
log.close()
}
/**
* Test the Log truncate operations
*/
@Test
def testTruncateTo() {
val set = TestUtils.singleMessageSet("test".getBytes())
val setSize = set.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * setSize // each segment will be 10 messages
// create a log
val log = new Log(logDir, logConfig.copy(segmentSize = segmentSize), recoveryPoint = 0L, scheduler = time.scheduler, time = time)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 1 segments.", 1, log.numberOfSegments)
assertEquals("Log end offset should be equal to number of messages", msgPerSeg, log.logEndOffset)
val lastOffset = log.logEndOffset
val size = log.size
log.truncateTo(log.logEndOffset) // keep the entire log
assertEquals("Should not change offset", lastOffset, log.logEndOffset)
assertEquals("Should not change log size", size, log.size)
log.truncateTo(log.logEndOffset + 1) // try to truncate beyond lastOffset
assertEquals("Should not change offset but should log error", lastOffset, log.logEndOffset)
assertEquals("Should not change log size", size, log.size)
log.truncateTo(msgPerSeg/2) // truncate somewhere in between
assertEquals("Should change offset", log.logEndOffset, msgPerSeg/2)
assertTrue("Should change log size", log.size < size)
log.truncateTo(0) // truncate the entire log
assertEquals("Should change offset", 0, log.logEndOffset)
assertEquals("Should change log size", 0, log.size)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("Should be back to original offset", log.logEndOffset, lastOffset)
assertEquals("Should be back to original size", log.size, size)
log.truncateFullyAndStartAt(log.logEndOffset - (msgPerSeg - 1))
assertEquals("Should change offset", log.logEndOffset, lastOffset - (msgPerSeg - 1))
assertEquals("Should change log size", log.size, 0)
for (i<- 1 to msgPerSeg)
log.append(set)
assertTrue("Should be ahead of to original offset", log.logEndOffset > msgPerSeg)
assertEquals("log size should be same as before", size, log.size)
log.truncateTo(0) // truncate before first start offset in the log
assertEquals("Should change offset", 0, log.logEndOffset)
assertEquals("Should change log size", log.size, 0)
}
/**
* Verify that when we truncate a log the index of the last segment is resized to the max index size to allow more appends
*/
@Test
def testIndexResizingAtTruncation() {
val set = TestUtils.singleMessageSet("test".getBytes())
val setSize = set.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * setSize // each segment will be 10 messages
val config = logConfig.copy(segmentSize = segmentSize)
val log = new Log(logDir, config, recoveryPoint = 0L, scheduler = time.scheduler, time = time)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 2 segment.", 2, log.numberOfSegments)
assertEquals("The index of the first segment should be trimmed to empty", 0, log.logSegments.toList(0).index.maxEntries)
log.truncateTo(0)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
assertEquals("The index of segment 1 should be resized to maxIndexSize", log.config.maxIndexSize/8, log.logSegments.toList(0).index.maxEntries)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
}
/**
* When we open a log any index segments without an associated log segment should be deleted.
*/
@Test
def testBogusIndexSegmentsAreRemoved() {
val bogusIndex1 = Log.indexFilename(logDir, 0)
val bogusIndex2 = Log.indexFilename(logDir, 5)
val set = TestUtils.singleMessageSet("test".getBytes())
val log = new Log(logDir,
logConfig.copy(segmentSize = set.sizeInBytes * 5,
maxIndexSize = 1000,
indexInterval = 1),
recoveryPoint = 0L,
time.scheduler,
time)
assertTrue("The first index file should have been replaced with a larger file", bogusIndex1.length > 0)
assertFalse("The second index file should have been deleted.", bogusIndex2.exists)
// check that we can append to the log
for(i <- 0 until 10)
log.append(set)
log.delete()
}
/**
* Verify that truncation works correctly after re-opening the log
*/
@Test
def testReopenThenTruncate() {
val set = TestUtils.singleMessageSet("test".getBytes())
val config = logConfig.copy(segmentSize = set.sizeInBytes * 5,
maxIndexSize = 1000,
indexInterval = 10000)
// create a log
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
// add enough messages to roll over several segments then close and re-open and attempt to truncate
for(i <- 0 until 100)
log.append(set)
log.close()
log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
log.truncateTo(3)
assertEquals("All but one segment should be deleted.", 1, log.numberOfSegments)
assertEquals("Log end offset should be 3.", 3, log.logEndOffset)
}
/**
* Test that deleted files are deleted after the appropriate time.
*/
@Test
def testAsyncDelete() {
val set = TestUtils.singleMessageSet("test".getBytes())
val asyncDeleteMs = 1000
val config = logConfig.copy(segmentSize = set.sizeInBytes * 5,
fileDeleteDelayMs = asyncDeleteMs,
maxIndexSize = 1000,
indexInterval = 10000)
val log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
// append some messages to create some segments
for(i <- 0 until 100)
log.append(set)
// files should be renamed
val segments = log.logSegments.toArray
val oldFiles = segments.map(_.log.file) ++ segments.map(_.index.file)
log.deleteOldSegments((s) => true)
assertEquals("Only one segment should remain.", 1, log.numberOfSegments)
assertTrue("All log and index files should end in .deleted", segments.forall(_.log.file.getName.endsWith(Log.DeletedFileSuffix)) &&
segments.forall(_.index.file.getName.endsWith(Log.DeletedFileSuffix)))
assertTrue("The .deleted files should still be there.", segments.forall(_.log.file.exists) &&
segments.forall(_.index.file.exists))
assertTrue("The original file should be gone.", oldFiles.forall(!_.exists))
// when enough time passes the files should be deleted
val deletedFiles = segments.map(_.log.file) ++ segments.map(_.index.file)
time.sleep(asyncDeleteMs + 1)
assertTrue("Files should all be gone.", deletedFiles.forall(!_.exists))
}
/**
* Any files ending in .deleted should be removed when the log is re-opened.
*/
@Test
def testOpenDeletesObsoleteFiles() {
val set = TestUtils.singleMessageSet("test".getBytes())
val config = logConfig.copy(segmentSize = set.sizeInBytes * 5, maxIndexSize = 1000)
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
// append some messages to create some segments
for(i <- 0 until 100)
log.append(set)
log.deleteOldSegments((s) => true)
log.close()
log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
assertEquals("The deleted segments should be gone.", 1, log.numberOfSegments)
}
@Test
def testAppendMessageWithNullPayload() {
val log = new Log(logDir,
LogConfig(),
recoveryPoint = 0L,
time.scheduler,
time)
log.append(new ByteBufferMessageSet(new Message(bytes = null)))
val messageSet = log.read(0, 4096, None).messageSet
assertEquals(0, messageSet.head.offset)
assertTrue("Message payload should be null.", messageSet.head.message.isNull)
}
@Test
def testCorruptLog() {
// append some messages to create some segments
val config = logConfig.copy(indexInterval = 1, maxMessageSize = 64*1024, segmentSize = 1000)
val set = TestUtils.singleMessageSet("test".getBytes())
val recoveryPoint = 50L
for(iteration <- 0 until 50) {
// create a log and write some messages to it
logDir.mkdirs()
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
val numMessages = 50 + TestUtils.random.nextInt(50)
for(i <- 0 until numMessages)
log.append(set)
val messages = log.logSegments.flatMap(_.log.iterator.toList)
log.close()
// corrupt index and log by appending random bytes
TestUtils.appendNonsenseToFile(log.activeSegment.index.file, TestUtils.random.nextInt(1024) + 1)
TestUtils.appendNonsenseToFile(log.activeSegment.log.file, TestUtils.random.nextInt(1024) + 1)
// attempt recovery
log = new Log(logDir, config, recoveryPoint, time.scheduler, time)
assertEquals(numMessages, log.logEndOffset)
assertEquals("Messages in the log after recovery should be the same.", messages, log.logSegments.flatMap(_.log.iterator.toList))
Utils.rm(logDir)
}
}
@Test
def testCleanShutdownFile() {
// append some messages to create some segments
val config = logConfig.copy(indexInterval = 1, maxMessageSize = 64*1024, segmentSize = 1000)
val set = TestUtils.singleMessageSet("test".getBytes())
val parentLogDir = logDir.getParentFile
assertTrue("Data directory %s must exist", parentLogDir.isDirectory)
val cleanShutdownFile = new File(parentLogDir, Log.CleanShutdownFile)
cleanShutdownFile.createNewFile()
assertTrue(".kafka_cleanshutdown must exist", cleanShutdownFile.exists())
var recoveryPoint = 0L
// create a log and write some messages to it
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
for(i <- 0 until 100)
log.append(set)
log.close()
// check if recovery was attempted. Even if the recovery point is 0L, recovery should not be attempted as the
// clean shutdown file exists.
recoveryPoint = log.logEndOffset
log = new Log(logDir, config, 0L, time.scheduler, time)
assertEquals(recoveryPoint, log.logEndOffset)
cleanShutdownFile.delete()
}
@Test
def testParseTopicPartitionName() {
val topic: String = "test_topic"
val partition:String = "143"
val dir: File = new File(logDir + topicPartitionName(topic, partition))
val topicAndPartition = Log.parseTopicPartitionName(dir);
assertEquals(topic, topicAndPartition.asTuple._1)
assertEquals(partition.toInt, topicAndPartition.asTuple._2)
}
@Test
def testParseTopicPartitionNameForEmptyName() {
try {
val dir: File = new File("")
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForNull() {
try {
val dir: File = null
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForMissingSeparator() {
val topic: String = "test_topic"
val partition:String = "1999"
val dir: File = new File(logDir + File.separator + topic + partition)
try {
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForMissingTopic() {
val topic: String = ""
val partition:String = "1999"
val dir: File = new File(logDir + topicPartitionName(topic, partition))
try {
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForMissingPartition() {
val topic: String = "test_topic"
val partition:String = ""
val dir: File = new File(logDir + topicPartitionName(topic, partition))
try {
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
def topicPartitionName(topic: String, partition: String): String = {
File.separator + topic + "-" + partition
}
}
|
roadboy/KafkaACL
|
core/src/test/scala/unit/kafka/log/LogTest.scala
|
Scala
|
apache-2.0
| 34,228 |
package com.github.truerss.base
import utest._
object EnclosureTypeTest extends TestSuite {
override val tests: Tests = Tests {
test("decode audio formats") {
test("audio/mpeg") {
assert(EnclosureType.withName("audio/mpeg") == EnclosureType.Audio.`audio/mpeg`)
}
test("audio/x-m4a") {
assert(EnclosureType.withName("audio/x-m4a") == EnclosureType.Audio.`audio/x-m4a`)
}
}
test("decode video formats") {
test("video/mp4") {
assert(EnclosureType.withName("video/mp4") == EnclosureType.Video.`video/mp4`)
}
test("video/quicktime") {
assert(EnclosureType.withName("video/quicktime") == EnclosureType.Video.`video/quicktime`)
}
test("video/wmv") {
assert(EnclosureType.withName("video/wmv") == EnclosureType.Video.`video/wmv`)
}
}
test("fail to decode non-existing formats") {
test("audio/m4a") {
assert(EnclosureType.withNameEither("audio/m4a").isLeft)
}
test("video/mkv") {
assert(EnclosureType.withNameEither("video/mkv").isLeft)
}
}
}
}
|
truerss/plugins
|
base/src/test/scala/com/github/truerss/base/EnclosureTypeTest.scala
|
Scala
|
mit
| 1,108 |
// Project: surf (https://github.com/jokade/surf)
// Module: rest / shared
// Description: Interface for instances that resolve a RESTService for a given path.
// Copyright (c) 2016 Johannes Kastner <[email protected]>
// Distributed under the MIT license (see included LICENSE file)
package surf.rest
import surf.{Service, ServiceRefFactory, ServiceProps, ServiceRef}
/**
* Resolves RESTActions into [[RESTService]]s.
*/
trait RESTResolver {
/**
* Returns a ServiceRef to the RESTService to be used for the specified URL path,
* or None if there is no RESTService registered to handle the specified action.
* The second argument in the returned tuple represents the RESTAction to be used with
* the resolved service (which may differ from the provided `action`)
*
* @param action RESTAction for which the handling service shall be resolved.
* @return Tuple containing the resolved service ref and the updated RESTAction to be sent to this service.
*/
def resolveRESTService(action: RESTAction) : Option[(ServiceRef,RESTAction)]
}
object RESTResolver {
/**
* Creates a RESTResolver that delegates to a collection of resolvers until a resolver is found
* that can handle the specified RESTAction.
*
* @param resolvers sub-resolvers to which all calls are delegated
*/
def fromResolvers(resolvers: Iterable[RESTResolver]): RESTResolver = new WrapperResolver(resolvers)
/**
* Creates a RESTResolver that uses the specified list of mappings to handle path prefixes by sub-resolvers.
*
* @param prefixResolvers Mappings from path prefixes to sub-resolvers
*/
def fromPrefixResolvers(prefixResolvers: (Path,RESTResolver)*): RESTResolver = new PrefixWrapperResolver(prefixResolvers.toMap)
def fromPrefixMappings(prefixMappings: (Path,ServiceProps)*)(implicit f: ServiceRefFactory): RESTResolver =
new MappingResolver(prefixMappings.toMap)
/**
* Creates a RESTResolver that sends all requests to the specified service.
*
* @param props
* @param f
*/
def fromService(props: ServiceProps)(implicit f: ServiceRefFactory): RESTResolver = new SingleResolver(f.serviceOf(props))
def fromService(service: =>Service)(implicit f: ServiceRefFactory): RESTResolver = fromService(ServiceProps(service))
def fromHandler(handler: RESTHandler)(implicit f: ServiceRefFactory): RESTResolver = new SingleResolver(handler)
class SingleResolver(handler: ServiceRef) extends RESTResolver {
def this(handler: RESTHandler)(implicit f: ServiceRefFactory) = this(f.serviceOf(new RESTService {
override def handle: RESTHandler = ???
}))
override def resolveRESTService(action: RESTAction): Option[(ServiceRef, RESTAction)] = Some((handler,action))
}
trait Wrapper extends RESTResolver {
def subresolvers: collection.IterableView[RESTResolver,Iterable[RESTResolver]]
// TODO: better algorithm?
override def resolveRESTService(action: RESTAction) = subresolvers.map( _.resolveRESTService(action) ).collectFirst{
case Some(m) => m
}
}
class WrapperResolver(resolvers: Iterable[RESTResolver]) extends Wrapper {
override val subresolvers = resolvers.view
}
trait PrefixWrapper extends RESTResolver {
def prefixes: Iterable[(Path,RESTResolver)]
override def resolveRESTService(action: RESTAction) = prefixes.view.
map( p => (Path.matchPrefix(p._1,action.path),p._2) ).
find( _._1.isDefined ).
flatMap( p => p._2.resolveRESTService(action.withPath(p._1.get)) )
}
class PrefixWrapperResolver(val prefixes: Map[Path,RESTResolver]) extends PrefixWrapper
trait Mapping extends RESTResolver {
def mappings: collection.IterableView[(Path,ServiceRef),Iterable[(Path,ServiceRef)]]
override def resolveRESTService(action: RESTAction) = mappings.
map( p => (RESTAction.matchPrefix(p._1,action),p._2) ).
collectFirst{
case (Some(act),service) => (service,act)
}
}
class MappingResolver(map: Map[Path,ServiceRef]) extends Mapping {
def this(map: Map[Path,ServiceProps])(implicit f: ServiceRefFactory) =
this( map.map( p => p.copy(_2 = f.serviceOf(p._2))) )
override val mappings = map.view
}
}
|
jokade/surf
|
rest/shared/src/main/scala/surf/rest/RESTResolver.scala
|
Scala
|
mit
| 4,222 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.declaration
import jto.validation.{Invalid, Path, Valid, ValidationError}
import org.scalatestplus.play.PlaySpec
import play.api.libs.json.{JsPath, JsSuccess, Json}
class BusinessNominatedOfficerSpec extends PlaySpec {
"Form Validation" must {
"successfully validate" when {
"successfully validate given a valid person name" in {
val data = Map("value" -> Seq("PersonName"))
val result = BusinessNominatedOfficer.formRule.validate(data)
result mustBe Valid(BusinessNominatedOfficer("PersonName"))
}
}
"fail validation" when {
"fail validation for missing data represented by an empty Map" in {
val result = BusinessNominatedOfficer.formRule.validate(Map.empty)
result mustBe Invalid(Seq((Path \\ "value", Seq(ValidationError("error.required.declaration.nominated.officer")))))
}
}
"write correct data from true value" in {
val result = BusinessNominatedOfficer.formWrites.writes(BusinessNominatedOfficer("PersonName"))
result must be(Map("value" -> Seq("PersonName")))
}
}
"JSON validation" must {
"successfully validate given an model value" in {
val json = Json.obj("value" -> "PersonName")
Json.fromJson[BusinessNominatedOfficer](json) must
be(JsSuccess(BusinessNominatedOfficer("PersonName"), JsPath))
}
"successfully validate json read write" in {
Json.toJson(BusinessNominatedOfficer("PersonName")) must
be(Json.obj("value" -> "PersonName"))
}
}
}
|
hmrc/amls-frontend
|
test/models/declaration/BusinessNominatedOfficerSpec.scala
|
Scala
|
apache-2.0
| 2,141 |
package org.zalando.grafter.macros
import scala.annotation.StaticAnnotation
import scala.language.experimental.macros
object ReadersMacro {
def impl(c: scala.reflect.macros.whitebox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
import c.universe._
val inputs : (Tree, Tree, Option[Tree]) =
annottees.toList match {
case classDecl :: companion :: rest =>
(classDecl.tree, c.typecheck(classDecl.tree), Option(companion.tree))
case classDecl :: rest =>
(classDecl.tree, c.typecheck(classDecl.tree), None)
case Nil => c.abort(c.enclosingPosition, "no target")
}
val outputs: List[Tree] = inputs match {
case (original, ClassDef(_, className, _, Template(_, _, fields)), companion) =>
def readerInstances =
fields.
collect { case field @ ValDef(_, fieldName, fieldType, _) => (fieldName, fieldType) }.
groupBy(_._2.tpe.typeSymbol.name.decodedName.toString).values.map(_.head).toList.
map { case (fieldName, fieldType) =>
val readerName = TermName(fieldName.toString.trim+"Reader")
val fieldAccessor = TermName(fieldName.toString.trim)
c.Expr[Any](
q"""
implicit def $readerName: cats.data.Reader[$className, $fieldType] =
cats.data.Reader(_.$fieldAccessor)""")
}
def readerIdentity =
c.Expr[Any] {
q"""
implicit def ${TermName(className.toString.uncapitalize+"Reader")}: cats.data.Reader[$className, $className] =
cats.data.Reader(identity)
"""
}
val companionObject =
companion match {
case Some(q"""$mod object $companionName extends { ..$earlydefns } with ..$parents { ..$body }""") =>
q"""$mod object $companionName extends { ..$earlydefns } with ..$parents {
..$body
..$readerInstances
..$readerIdentity
}"""
case None =>
q"""object ${TermName(className.decodedName.toString)} {
..$readerInstances
..$readerIdentity
}"""
}
original :: companionObject :: Nil
case other => c.abort(c.enclosingPosition, "The @readers annotation can only annotate a simple case class with no extension or type parameters")
}
c.Expr[Any](Block(outputs, Literal(Constant(()))))
}
implicit class StringOps(s: String) {
def uncapitalize: String =
s.take(1).map(_.toLower)++s.drop(1)
}
}
class readers extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro ReadersMacro.impl
}
|
jcranky/grafter
|
macros/src/main/scala/org/zalando/grafter/macros/ReadersMacro.scala
|
Scala
|
mit
| 2,696 |
/*
* Copyright 2012 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.core.samplers
import com.comcast.money.api.SpanId
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class AlwaysOnSamplerSpec extends AnyWordSpec with Matchers {
"AlwaysOnSampler" should {
"always return a record result with sampling" in {
val spanId = SpanId.createNew()
AlwaysOnSampler.shouldSample(spanId, None, "name") should matchPattern { case RecordResult(true, Nil) => }
}
}
}
|
Comcast/money
|
money-core/src/test/scala/com/comcast/money/core/samplers/AlwaysOnSamplerSpec.scala
|
Scala
|
apache-2.0
| 1,104 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.receiver.scribe
import com.twitter.finagle.CancelledRequestException
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.logging.{BareFormatter, Logger, StringHandler}
import com.twitter.scrooge.BinaryThriftStructSerializer
import com.twitter.util.{Await, Future}
import com.twitter.zipkin.collector.QueueFullException
import com.twitter.zipkin.common._
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.thriftscala.{LogEntry, ResultCode, Span => ThriftSpan}
import org.scalatest.FunSuite
import java.util.concurrent.CancellationException
class ScribeSpanReceiverTest extends FunSuite {
val serializer = new BinaryThriftStructSerializer[ThriftSpan] {
def codec = ThriftSpan
}
val category = "zipkin"
val validSpan = Span(123, "boo", 456, None, List(new Annotation(1, "bah", None)))
val validList = List(LogEntry(category, serializer.toString(validSpan.toThrift)))
val base64 = "CgABAAAAAAAAAHsLAAMAAAADYm9vCgAEAAAAAAAAAcgPAAYMAAAAAQoAAQAAAAAAAAABCwACAAAAA2JhaAAPAAgMAAAAAAIACQAA"
test("processes entries") {
var recvdSpan: Option[Seq[ThriftSpan]] = None
val receiver = new ScribeReceiver(Set(category), { s =>
recvdSpan = Some(s)
Future.Done
})
assert(Await.result(receiver.log(Seq(validList.head, validList.head))) === ResultCode.Ok)
assert(!recvdSpan.isEmpty)
assert(recvdSpan.get.map(_.toSpan) === Seq(validSpan, validSpan))
}
test("ok when scribe client cancels their request") {
val cancelled = new CancellationException()
cancelled.initCause(new CancelledRequestException())
val receiver = new ScribeReceiver(Set(category), { _ => Future.exception(cancelled) })
assert(Await.result(receiver.log(validList)) === ResultCode.Ok)
}
test("pushes back on QueueFullException, but doesn't log or increment errors") {
val stats = new InMemoryStatsReceiver()
val log = logHandle(classOf[ScribeReceiver])
val receiver = new ScribeReceiver(Set(category), { _ => Future.exception(new QueueFullException(1)) }, stats)
assert(Await.result(receiver.log(validList)) === ResultCode.TryLater)
assert(stats.counters(List("pushBack")) === 1)
assert(!stats.counters.contains(List("processingError", classOf[QueueFullException].getName)))
assert(log.get.trim === "")
}
test("logs and increments processingError on Exception with message") {
val stats = new InMemoryStatsReceiver()
val log = logHandle(classOf[ScribeReceiver])
val receiver = new ScribeReceiver(Set(category), { _ =>
Future.exception(new NullPointerException("foo was null")) }, stats)
assert(Await.result(receiver.log(validList)) === ResultCode.TryLater)
assert(stats.counters(List("processingError", classOf[NullPointerException].getName)) === 1)
assert(log.get.trim === "Sending TryLater due to NullPointerException(foo was null)")
}
test("don't print null when exception hasn't any message") {
val log = logHandle(classOf[ScribeReceiver])
val receiver = new ScribeReceiver(Set(category), { _ => Future.exception(new Exception) })
Await.result(receiver.log(validList))
assert(log.get.trim === "Sending TryLater due to Exception()")
}
def logHandle(clazz: Class[_]): StringHandler = {
val handler = new StringHandler(BareFormatter, None)
val logger = Logger.get(classOf[ScribeReceiver])
logger.clearHandlers()
logger.addHandler(handler)
handler
}
test("ignores bad categories") {
var recvdSpan: Option[ThriftSpan] = None
val receiver = new ScribeReceiver(Set("othercat"), { s =>
recvdSpan = Some(s.head)
Future.Done
})
assert(Await.result(receiver.log(validList)) === ResultCode.Ok)
assert(recvdSpan.isEmpty)
}
test("ignores bad messages") {
var recvdSpan: Option[ThriftSpan] = None
val receiver = new ScribeReceiver(Set(category), { s =>
recvdSpan = Some(s.head)
Future.Done
})
assert(Await.result(receiver.log(Seq(LogEntry(category, "badencoding")))) === ResultCode.Ok)
assert(recvdSpan.isEmpty)
}
}
|
jfeltesse-mdsol/zipkin
|
zipkin-receiver-scribe/src/test/scala/com/twitter/zipkin/receiver/scribe/ScribeSpanReceiverTest.scala
|
Scala
|
apache-2.0
| 4,694 |
package mesosphere.marathon.api.v2
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.state.AppDefinition
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.{ MarathonConf, MarathonSpec }
import mesosphere.marathon.tasks.TaskQueue
import org.scalatest.Matchers
import play.api.libs.json.{ JsObject, Json }
class QueueResourceTest extends MarathonSpec with Matchers {
// regression test for #1210
test("return well formatted JSON") {
val queue = new TaskQueue
val app1 = AppDefinition(id = "app1".toRootPath)
val app2 = AppDefinition(id = "app2".toRootPath)
val resource = new QueueResource(queue, mock[MarathonConf])
queue.add(app1, 4)
queue.add(app2, 2)
for (_ <- 0 until 10)
queue.rateLimiter.addDelay(app2)
val json = Json.parse(resource.index().getEntity.toString)
val queuedApp1 = Json.obj(
"count" -> 4,
"delay" -> Json.obj(
"overdue" -> true
),
"app" -> app1
)
val queuedApp2 = Json.obj(
"count" -> 2,
"delay" -> Json.obj(
"overdue" -> false
),
"app" -> app2
)
val queuedApps = (json \\ "queue").as[Seq[JsObject]]
assert(queuedApps.contains(queuedApp1))
assert(queuedApps.contains(queuedApp2))
}
}
|
14Zen/marathon
|
src/test/scala/mesosphere/marathon/api/v2/QueueResourceTest.scala
|
Scala
|
apache-2.0
| 1,293 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.IOException
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogTable, CatalogTablePartition}
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode
import org.apache.spark.sql.util.SchemaUtils
/**
* A command for writing data to a [[HadoopFsRelation]]. Supports both overwriting and appending.
* Writing to dynamic partitions is also supported.
*
* @param staticPartitions partial partitioning spec for write. This defines the scope of partition
* overwrites: when the spec is empty, all partitions are overwritten.
* When it covers a prefix of the partition keys, only partitions matching
* the prefix are overwritten.
* @param ifPartitionNotExists If true, only write if the partition does not exist.
* Only valid for static partitions.
*/
case class InsertIntoHadoopFsRelationCommand(
outputPath: Path,
staticPartitions: TablePartitionSpec,
ifPartitionNotExists: Boolean,
partitionColumns: Seq[Attribute],
bucketSpec: Option[BucketSpec],
fileFormat: FileFormat,
options: Map[String, String],
query: LogicalPlan,
mode: SaveMode,
catalogTable: Option[CatalogTable],
fileIndex: Option[FileIndex],
outputColumnNames: Seq[String])
extends DataWritingCommand {
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.escapePathName
override def run(sparkSession: SparkSession, child: SparkPlan): Seq[Row] = {
// Most formats don't do well with duplicate columns, so lets not allow that
SchemaUtils.checkColumnNameDuplication(
outputColumnNames,
s"when inserting into $outputPath",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
val hadoopConf = sparkSession.sessionState.newHadoopConfWithOptions(options)
val fs = outputPath.getFileSystem(hadoopConf)
val qualifiedOutputPath = outputPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
val partitionsTrackedByCatalog = sparkSession.sessionState.conf.manageFilesourcePartitions &&
catalogTable.isDefined &&
catalogTable.get.partitionColumnNames.nonEmpty &&
catalogTable.get.tracksPartitionsInCatalog
var initialMatchingPartitions: Seq[TablePartitionSpec] = Nil
var customPartitionLocations: Map[TablePartitionSpec, String] = Map.empty
var matchingPartitions: Seq[CatalogTablePartition] = Seq.empty
// When partitions are tracked by the catalog, compute all custom partition locations that
// may be relevant to the insertion job.
if (partitionsTrackedByCatalog) {
matchingPartitions = sparkSession.sessionState.catalog.listPartitions(
catalogTable.get.identifier, Some(staticPartitions))
initialMatchingPartitions = matchingPartitions.map(_.spec)
customPartitionLocations = getCustomPartitionLocations(
fs, catalogTable.get, qualifiedOutputPath, matchingPartitions)
}
val parameters = CaseInsensitiveMap(options)
val partitionOverwriteMode = parameters.get("partitionOverwriteMode")
// scalastyle:off caselocale
.map(mode => PartitionOverwriteMode.withName(mode.toUpperCase))
// scalastyle:on caselocale
.getOrElse(sparkSession.sessionState.conf.partitionOverwriteMode)
val enableDynamicOverwrite = partitionOverwriteMode == PartitionOverwriteMode.DYNAMIC
// This config only makes sense when we are overwriting a partitioned dataset with dynamic
// partition columns.
val dynamicPartitionOverwrite = enableDynamicOverwrite && mode == SaveMode.Overwrite &&
staticPartitions.size < partitionColumns.length
val committer = FileCommitProtocol.instantiate(
sparkSession.sessionState.conf.fileCommitProtocolClass,
jobId = java.util.UUID.randomUUID().toString,
outputPath = outputPath.toString,
dynamicPartitionOverwrite = dynamicPartitionOverwrite)
val doInsertion = if (mode == SaveMode.Append) {
true
} else {
val pathExists = fs.exists(qualifiedOutputPath)
(mode, pathExists) match {
case (SaveMode.ErrorIfExists, true) =>
throw new AnalysisException(s"path $qualifiedOutputPath already exists.")
case (SaveMode.Overwrite, true) =>
if (ifPartitionNotExists && matchingPartitions.nonEmpty) {
false
} else if (dynamicPartitionOverwrite) {
// For dynamic partition overwrite, do not delete partition directories ahead.
true
} else {
deleteMatchingPartitions(fs, qualifiedOutputPath, customPartitionLocations, committer)
true
}
case (SaveMode.Overwrite, _) | (SaveMode.ErrorIfExists, false) =>
true
case (SaveMode.Ignore, exists) =>
!exists
case (s, exists) =>
throw new IllegalStateException(s"unsupported save mode $s ($exists)")
}
}
if (doInsertion) {
def refreshUpdatedPartitions(updatedPartitionPaths: Set[String]): Unit = {
val updatedPartitions = updatedPartitionPaths.map(PartitioningUtils.parsePathFragment)
if (partitionsTrackedByCatalog) {
val newPartitions = updatedPartitions -- initialMatchingPartitions
if (newPartitions.nonEmpty) {
AlterTableAddPartitionCommand(
catalogTable.get.identifier, newPartitions.toSeq.map(p => (p, None)),
ifNotExists = true).run(sparkSession)
}
// For dynamic partition overwrite, we never remove partitions but only update existing
// ones.
if (mode == SaveMode.Overwrite && !dynamicPartitionOverwrite) {
val deletedPartitions = initialMatchingPartitions.toSet -- updatedPartitions
if (deletedPartitions.nonEmpty) {
AlterTableDropPartitionCommand(
catalogTable.get.identifier, deletedPartitions.toSeq,
ifExists = true, purge = false,
retainData = true /* already deleted */).run(sparkSession)
}
}
}
}
val updatedPartitionPaths =
FileFormatWriter.write(
sparkSession = sparkSession,
plan = child,
fileFormat = fileFormat,
committer = committer,
outputSpec = FileFormatWriter.OutputSpec(
qualifiedOutputPath.toString, customPartitionLocations, outputColumns),
hadoopConf = hadoopConf,
partitionColumns = partitionColumns,
bucketSpec = bucketSpec,
statsTrackers = Seq(basicWriteJobStatsTracker(hadoopConf)),
options = options)
// update metastore partition metadata
if (updatedPartitionPaths.isEmpty && staticPartitions.nonEmpty
&& partitionColumns.length == staticPartitions.size) {
// Avoid empty static partition can't loaded to datasource table.
val staticPathFragment =
PartitioningUtils.getPathFragment(staticPartitions, partitionColumns)
refreshUpdatedPartitions(Set(staticPathFragment))
} else {
refreshUpdatedPartitions(updatedPartitionPaths)
}
// refresh cached files in FileIndex
fileIndex.foreach(_.refresh())
// refresh data cache if table is cached
sparkSession.catalog.refreshByPath(outputPath.toString)
if (catalogTable.nonEmpty) {
CommandUtils.updateTableStats(sparkSession, catalogTable.get)
}
} else {
logInfo("Skipping insertion into a relation that already exists.")
}
Seq.empty[Row]
}
/**
* Deletes all partition files that match the specified static prefix. Partitions with custom
* locations are also cleared based on the custom locations map given to this class.
*/
private def deleteMatchingPartitions(
fs: FileSystem,
qualifiedOutputPath: Path,
customPartitionLocations: Map[TablePartitionSpec, String],
committer: FileCommitProtocol): Unit = {
val staticPartitionPrefix = if (staticPartitions.nonEmpty) {
"/" + partitionColumns.flatMap { p =>
staticPartitions.get(p.name) match {
case Some(value) =>
Some(escapePathName(p.name) + "=" + escapePathName(value))
case None =>
None
}
}.mkString("/")
} else {
""
}
// first clear the path determined by the static partition keys (e.g. /table/foo=1)
val staticPrefixPath = qualifiedOutputPath.suffix(staticPartitionPrefix)
if (fs.exists(staticPrefixPath) && !committer.deleteWithJob(fs, staticPrefixPath, true)) {
throw new IOException(s"Unable to clear output " +
s"directory $staticPrefixPath prior to writing to it")
}
// now clear all custom partition locations (e.g. /custom/dir/where/foo=2/bar=4)
for ((spec, customLoc) <- customPartitionLocations) {
assert(
(staticPartitions.toSet -- spec).isEmpty,
"Custom partition location did not match static partitioning keys")
val path = new Path(customLoc)
if (fs.exists(path) && !committer.deleteWithJob(fs, path, true)) {
throw new IOException(s"Unable to clear partition " +
s"directory $path prior to writing to it")
}
}
}
/**
* Given a set of input partitions, returns those that have locations that differ from the
* Hive default (e.g. /k1=v1/k2=v2). These partitions were manually assigned locations by
* the user.
*
* @return a mapping from partition specs to their custom locations
*/
private def getCustomPartitionLocations(
fs: FileSystem,
table: CatalogTable,
qualifiedOutputPath: Path,
partitions: Seq[CatalogTablePartition]): Map[TablePartitionSpec, String] = {
partitions.flatMap { p =>
val defaultLocation = qualifiedOutputPath.suffix(
"/" + PartitioningUtils.getPathFragment(p.spec, table.partitionSchema)).toString
val catalogLocation = new Path(p.location).makeQualified(
fs.getUri, fs.getWorkingDirectory).toString
if (catalogLocation != defaultLocation) {
Some(p.spec -> catalogLocation)
} else {
None
}
}.toMap
}
}
|
caneGuy/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala
|
Scala
|
apache-2.0
| 11,508 |
/*
Copyright 2016 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.online
import com.twitter.bijection.twitter_util.UtilBijections
import com.twitter.summingbird.online.option.{ MaxFutureWaitTime, MaxWaitingFutures }
import com.twitter.util._
import org.scalacheck._
import org.scalacheck.Gen._
import org.scalacheck.Arbitrary._
import org.scalacheck.Prop._
import org.scalatest.WordSpec
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Seconds, Span }
import scala.util.{ Failure, Random, Success }
case class NonNegativeShort(get: Short) {
require(get >= 0)
}
class FutureQueueLaws extends Properties("FutureQueue") with Eventually {
def genTry[T](implicit arb: Arbitrary[T]): Gen[Try[T]] = Gen.oneOf(arb.arbitrary.map(Return(_)), Arbitrary.arbitrary[java.lang.Exception].map(Throw(_)))
implicit def arbTry[T: Arbitrary] = Arbitrary(genTry[T])
implicit val arbNonNegativeShort: Arbitrary[NonNegativeShort] = Arbitrary(
Arbitrary.arbitrary[Short].filter { _ >= 0 }.map { NonNegativeShort }
)
val twitterToScala = UtilBijections.twitter2ScalaTry[String]
property("waitN should wait for exactly n futures to finish") =
forAll { (futuresCount: NonNegativeShort, waitOn: NonNegativeShort, valueToFill: Try[Unit]) =>
val ps = 0.until(futuresCount.get).map { _ => Promise[Unit]() }.toArray
val t = new Thread {
@volatile var unblocked = false
override def run() = {
Await.result(FutureQueue.waitN(ps, waitOn.get))
unblocked = true
}
}
t.start
for (i <- 0 until Math.min(futuresCount.get, waitOn.get)) {
assert(t.unblocked == false)
valueToFill match {
case Return(v) =>
ps(i).setValue(v)
case Throw(e) =>
ps(i).setException(e)
}
}
eventually(timeout(Span(5, Seconds)))(assert(t.unblocked == true))
t.join
true
}
property("not block in dequeue if within bound") =
forAll { (futuresCount: NonNegativeShort, slackSpace: NonNegativeShort) =>
val fq = new FutureQueue[Unit, Unit](
MaxWaitingFutures(futuresCount.get + slackSpace.get),
MaxFutureWaitTime(Duration.fromSeconds(20))
)
fq.addAll((0 until futuresCount.get).map { _ =>
() -> Promise[Unit]
})
val start = Time.now
val res = fq.dequeue(futuresCount.get)
val end = Time.now
res.isEmpty &&
(end - start < Duration.fromSeconds(15))
fq.numPendingOutstandingFutures.get == futuresCount.get
}
property("preserves status of Future.const via addAll") =
forAll { inputs: Seq[(String, Try[String])] =>
val count = inputs.size
val fq = new FutureQueue[String, String](
MaxWaitingFutures(count + 1),
MaxFutureWaitTime(Duration.fromSeconds(20))
)
fq.addAll(inputs.map {
case (state, t) =>
state -> Future.const(t)
})
fq.dequeue(count) == inputs.map {
case (state, t) =>
state -> twitterToScala(t)
}
}
property("preserves status of Future.const via add") =
forAll { inputs: Seq[(String, Try[String])] =>
val count = inputs.size
val fq = new FutureQueue[String, String](
MaxWaitingFutures(count + 1),
MaxFutureWaitTime(Duration.fromSeconds(20))
)
inputs.foreach {
case (state, t) =>
fq.add(state, Future.const(t))
}
fq.dequeue(count) == inputs.map {
case (state, t) =>
state -> twitterToScala(t)
}
}
property("accounts for completed futures") =
forAll { (incomplete: NonNegativeShort, complete: NonNegativeShort) =>
val incompleteFuture = Promise[Unit]
val completeFuture = Promise[Unit]
val incompleteFutures = Seq.fill(incomplete.get)(incompleteFuture)
val completeFutures = Seq.fill(complete.get)(completeFuture)
val mixedFutures = Random.shuffle(incompleteFutures ++ completeFutures)
val fq = new FutureQueue[Unit, Unit](
MaxWaitingFutures(1),
MaxFutureWaitTime(Duration.fromSeconds(20))
)
fq.addAll(mixedFutures.map { () -> _ })
val initialPendingCount = fq.numPendingOutstandingFutures.get
completeFuture.setValue(())
initialPendingCount == (incomplete.get + complete.get) &&
fq.numPendingOutstandingFutures.get == incomplete.get
}
}
|
twitter/summingbird
|
summingbird-online/src/test/scala/com/twitter/summingbird/online/FutureQueueLaws.scala
|
Scala
|
apache-2.0
| 4,939 |
/**
* This file is part of mycollab-web.
*
* mycollab-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-web. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.user.accountsettings.view.parameters
import com.esofthead.mycollab.module.user.domain.Role
import com.esofthead.mycollab.module.user.domain.criteria.RoleSearchCriteria
import com.esofthead.mycollab.vaadin.mvp.ScreenData
/**
* @author MyCollab Ltd.
* @since 5.1.0
*/
object RoleScreenData {
class Read(params: Integer) extends ScreenData[Integer](params) {}
class Add(params: Role) extends ScreenData[Role](params) {}
class Edit(params: Role) extends ScreenData[Role](params) {}
class Search(params: RoleSearchCriteria) extends ScreenData[RoleSearchCriteria](params) {}
}
|
maduhu/mycollab
|
mycollab-web/src/main/scala/com/esofthead/mycollab/module/user/accountsettings/view/parameters/RoleScreenData.scala
|
Scala
|
agpl-3.0
| 1,330 |
package uk.gov.digital.ho.proving.financialstatus.client
import java.util
import org.springframework.beans.factory.annotation.{Autowired, Value}
import org.springframework.http._
import org.springframework.retry.backoff.FixedBackOffPolicy
import org.springframework.retry.policy.SimpleRetryPolicy
import org.springframework.retry.support.RetryTemplate
import org.springframework.retry.{RetryCallback, RetryContext}
import org.springframework.stereotype.Service
import org.springframework.web.client.{HttpServerErrorException, ResourceAccessException, RestTemplate}
case class HttpClientResponse(httpStatus: HttpStatus, body: String)
@Service
class HttpUtils @Autowired()(rest: RestTemplate,
@Value("${retry.attempts}") maxAttempts: Int,
@Value("${retry.delay}") backOffPeriod: Long) {
private val emptyBody = ""
private val retryTemplate = createRetryTemplate(maxAttempts, backOffPeriod)
class RetryableCall(url: String, requestEntity: HttpEntity[String]) extends RetryCallback[ResponseEntity[String], RuntimeException] {
def doWithRetry(retryContext: RetryContext): ResponseEntity[String] = {
rest.exchange(this.url, HttpMethod.GET, requestEntity, classOf[String])
}
}
def performRequest(url: String, userId: String, requestId: String): HttpClientResponse = {
val defaultHeaders = new HttpHeaders()
defaultHeaders.add("userId", userId)
defaultHeaders.add("requestId", requestId)
val requestEntity = new HttpEntity[String](emptyBody, defaultHeaders)
val responseEntity = retryTemplate.execute(new RetryableCall(url, requestEntity))
HttpClientResponse(responseEntity.getStatusCode, responseEntity.getBody)
}
def exceptionsToRetry: java.util.Map[java.lang.Class[_ <: java.lang.Throwable], java.lang.Boolean] = {
val javaMap: java.util.Map[java.lang.Class[_ <: java.lang.Throwable], java.lang.Boolean] = new util.HashMap()
javaMap.put(classOf[ResourceAccessException], true)
javaMap.put(classOf[HttpServerErrorException], true)
javaMap
}
def createRetryTemplate(maxAttempts: Int, backOffPeriod: Long): RetryTemplate = {
val retryTemplate = new RetryTemplate()
val simpleRetryPolicy = new SimpleRetryPolicy(maxAttempts, exceptionsToRetry)
val fixedBackOffPolicy = new FixedBackOffPolicy()
fixedBackOffPolicy.setBackOffPeriod(backOffPeriod)
retryTemplate.setBackOffPolicy(fixedBackOffPolicy)
retryTemplate.setRetryPolicy(simpleRetryPolicy)
retryTemplate
}
}
|
UKHomeOffice/pttg-fs-api
|
src/main/scala/uk/gov/digital/ho/proving/financialstatus/client/HttpUtils.scala
|
Scala
|
mit
| 2,524 |
package controllers
import exceptions.{ StorageInsertionException, AccessKeyException }
import play.api.mvc._
import java.util.zip.{ GZIPOutputStream, ZipEntry, ZipOutputStream, GZIPInputStream }
import java.io._
import org.apache.commons.io.{ FileUtils, IOUtils }
import play.api.libs.iteratee.Enumerator
import play.libs.Akka
import akka.actor.Actor
import play.api.{ Play, Logger }
import core._
import scala.{ Either, Option }
import core.storage.FileStorage
import util.SimpleDataSetParser
import java.util.concurrent.TimeUnit
import models._
import play.api.libs.Files.TemporaryFile
import eu.delving.stats.Stats
import scala.collection.JavaConverters._
import java.util.Date
import models.statistics._
import HubMongoContext.hubFileStores
import xml.Node
import play.api.libs.concurrent.Promise
import org.apache.commons.lang.StringEscapeUtils
import scala.util.matching.Regex.Match
import java.util.regex.Matcher
import plugins.DataSetPlugin
import models.statistics.DataSetStatisticsContext
import models.statistics.FieldFrequencies
import models.statistics.FieldValues
import play.api.libs.MimeTypes
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.duration._
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.gridfs.{ GridFSDBFile, GridFS }
import com.escalatesoft.subcut.inject.BindingModule
import play.api.Play.current
/**
* This Controller is responsible for all the interaction with the SIP-Creator.
* Access control is done using OAuth2
*
* @author Manuel Bernhardt <[email protected]>
*/
class SipCreatorEndPoint(implicit val bindingModule: BindingModule) extends ApplicationController with Logging {
val organizationServiceLocator = HubModule.inject[DomainServiceLocator[OrganizationService]](name = None)
val DOT_PLACEHOLDER = "--"
private def basexStorage(implicit configuration: OrganizationConfiguration) = HubServices.basexStorages.getResource(configuration)
private var connectedUserObject: Option[HubUser] = None
def AuthenticatedAction[A](accessToken: Option[String])(action: Action[A]): Action[A] = MultitenantAction(action.parser) {
implicit request =>
{
if (accessToken.isEmpty && Play.isDev) {
connectedUserObject = HubUser.dao.findByUsername(request.queryString.get("userName").get.head)
action(request)
} else if (accessToken.isEmpty) {
Unauthorized("No access token provided")
} else if (!HubUser.isValidToken(accessToken.get)) {
Unauthorized("Access Key %s not accepted".format(accessToken.get))
} else {
connectedUserObject = HubUser.getUserByToken(accessToken.get)
action(request)
}
}
}
def OrganizationAction[A](orgId: String, accessToken: Option[String])(action: Action[A]): Action[A] = AuthenticatedAction(accessToken) {
MultitenantAction(action.parser) {
implicit request =>
if (orgId == null || orgId.isEmpty) {
BadRequest("No orgId provided")
} else {
if (!organizationServiceLocator.byDomain.exists(orgId)) {
NotFound("Unknown organization " + orgId)
} else {
action(request)
}
}
}
}
def getConnectedUser: HubUser = connectedUserObject.getOrElse({
log.warn("Attemtping to connect with an invalid access token")
throw new AccessKeyException("No access token provided")
})
def connectedUser = getConnectedUser.userName
def listAll(accessToken: Option[String]) = AuthenticatedAction(accessToken) {
MultitenantAction {
implicit request =>
val dataSets = DataSet.dao.findAllForUser(
connectedUserObject.get.userName,
configuration.orgId,
DataSetPlugin.ROLE_DATASET_EDITOR
)
val dataSetsXml = <data-set-list>{
dataSets.map {
ds =>
val creator = HubUser.dao.findByUsername(ds.getCreator)
val lockedBy = ds.getLockedBy
<data-set>
<spec>{ ds.spec }</spec>
<name>{ ds.details.name }</name>
<orgId>{ ds.orgId }</orgId>
{
if (creator.isDefined) {
<createdBy>
<username>{ creator.get.userName }</username>
<fullname>{ creator.get.fullname }</fullname>
<email>{ creator.get.email }</email>
</createdBy>
} else {
<createdBy>
<username>{ ds.getCreator }</username>
</createdBy>
}
}{
if (lockedBy != None) {
<lockedBy>
<username>{ lockedBy.get.userName }</username>
<fullname>{ lockedBy.get.fullname }</fullname>
<email>{ lockedBy.get.email }</email>
</lockedBy>
}
}
<state>{ ds.state.name }</state>
<schemaVersions>
{
ds.getAllMappingSchemas.map { schema =>
<schemaVersion>
<prefix>{ schema.getPrefix }</prefix>
<version>{ schema.getVersion }</version>
</schemaVersion>
}
}
</schemaVersions>
<recordCount>{ ds.details.total_records }</recordCount>
</data-set>
}
}</data-set-list>
Ok(dataSetsXml)
}
}
def unlock(orgId: String, spec: String, accessToken: Option[String]): Action[AnyContent] =
OrganizationAction(orgId, accessToken) {
MultitenantAction {
implicit request =>
val dataSet = DataSet.dao.findBySpecAndOrgId(spec, orgId)
if (dataSet.isEmpty) {
val msg = "Unknown spec %s".format(spec)
NotFound(msg)
} else {
if (dataSet.get.lockedBy == None) {
Ok
} else if (dataSet.get.lockedBy.get == connectedUser) {
val updated = dataSet.get.copy(lockedBy = None)
DataSet.dao.save(updated)
Ok
} else {
Error("You cannot unlock a DataSet locked by someone else")
}
}
}
}
/**
* Takes a request of filenames and replies with the ones it is missing:
*
* 15E64004081B71EE5CA8D55EF735DE44__hints.txt
* 19EE613335AFBFFAD3F8BA271FBC4E96__mapping_icn.xml
* 45109F902FCE191BBBFC176287B9B2A4__source.xml.gz
* 19EE613335AFBFFAD3F8BA271FBC4E96__valid_icn.bit
*/
def acceptFileList(orgId: String, spec: String, accessToken: Option[String]): Action[AnyContent] =
OrganizationAction(orgId, accessToken) {
MultitenantAction {
implicit request =>
val dataSet = DataSet.dao.findBySpecAndOrgId(spec, orgId)
if (dataSet.isEmpty) {
val msg = "DataSet with spec %s not found".format(spec)
NotFound(msg)
} else {
val fileList: String = request.body.asText.getOrElse("")
log.debug("Receiving file upload request, possible files to receive are: \n" + fileList)
val lines = fileList.split('\n').map(_.trim).toList
def fileRequired(fileName: String): Option[String] = {
val Array(hash, name) = fileName split ("__")
val maybeHash = dataSet.get.hashes.get(name.replaceAll("\\.", DOT_PLACEHOLDER))
maybeHash match {
case Some(storedHash) if hash != storedHash => Some(fileName)
case Some(storedHash) if hash == storedHash => None
case None => Some(fileName)
}
}
val requiredFiles = (lines flatMap fileRequired).map(_.trim).mkString("\n")
Ok(requiredFiles)
}
}
}
def acceptFile(orgId: String, spec: String, fileName: String, accessToken: Option[String]) =
OrganizationAction(orgId, accessToken) {
MultitenantAction(parse.temporaryFile) {
implicit request =>
val dataSet = DataSet.dao.findBySpecAndOrgId(spec, orgId)
if (dataSet.isEmpty) {
val msg = "DataSet with spec %s not found".format(spec)
NotFound(msg)
} else {
val SipCreatorEndPoint.FileName(hash, kind, prefix, extension) = fileName
if (hash.isEmpty) {
val msg = "No hash available for file name " + fileName
Error(msg)
} else if (request.contentType == None) {
BadRequest("Request has no content type")
} else if (!DataSet.dao.canEdit(dataSet.get, connectedUser)) {
log.warn("User %s tried to edit dataSet %s without the necessary rights"
.format(connectedUser, dataSet.get.spec))
Forbidden("You are not allowed to modify this DataSet")
} else {
val inputStream = if (request.contentType == Some("application/x-gzip"))
new GZIPInputStream(new FileInputStream(request.body.file))
else
new FileInputStream(request.body.file)
val actionResult: Either[String, String] = kind match {
case "hints" if extension == "txt" =>
receiveHints(dataSet.get, inputStream)
case "mapping" if extension == "xml" =>
receiveMapping(dataSet.get, inputStream, spec, hash)
case "source" if extension == "xml.gz" => {
if (dataSet.get.state == DataSetState.PROCESSING) {
Left("%s: Cannot upload source while the set is being processed".format(spec))
} else {
val receiveActor = Akka.system.actorFor("akka://application/user/plugin-dataSet/dataSetParser")
receiveActor ! SourceStream(
dataSet.get, connectedUser, inputStream, request.body, configuration
)
DataSet.dao.updateState(dataSet.get, DataSetState.PARSING)
Right("Received it")
}
}
case "validation" if extension == "int" =>
receiveInvalidRecords(dataSet.get, prefix, inputStream)
case x if x.startsWith("stats-") =>
receiveSourceStats(dataSet.get, inputStream, prefix, fileName, request.body.file)
case "links" =>
receiveLinks(dataSet.get, prefix, fileName, request.body.file)
case "image" =>
FileStorage.storeFile(
request.body.file,
MimeTypes.forFileName(fileName).getOrElse("unknown/unknown"),
fileName,
dataSet.get.spec,
Some("sourceImage"),
Map("spec" -> dataSet.get.spec, "orgId" -> dataSet.get.orgId)
).map { f =>
Right("Ok")
}.getOrElse(
Left("Couldn't store file " + fileName)
)
case _ => {
val msg = "Unknown file type %s".format(kind)
Left(msg)
}
}
actionResult match {
case Right(ok) => {
DataSet.dao.addHash(dataSet.get, fileName.split("__")(1).replaceAll("\\.", DOT_PLACEHOLDER), hash)
log.info("Successfully accepted file %s for DataSet %s".format(fileName, spec))
Ok
}
case Left(houston) => {
Error("Error accepting file %s for DataSet %s: %s".format(fileName, spec, houston))
}
}
}
}
}
}
private def receiveInvalidRecords(dataSet: DataSet, prefix: String, inputStream: InputStream) = {
val dis = new DataInputStream(inputStream)
val howMany = dis.readInt()
val invalidIndexes: List[Int] = (for (i <- 1 to howMany) yield dis.readInt()).toList
DataSet.dao(dataSet.orgId).updateInvalidRecords(dataSet, prefix, invalidIndexes)
Right("All clear")
}
private def receiveMapping(dataSet: DataSet, inputStream: InputStream, spec: String, hash: String)(implicit configuration: OrganizationConfiguration): Either[String, String] = {
val mappingString = IOUtils.toString(inputStream, "UTF-8")
DataSet.dao(dataSet.orgId).updateMapping(dataSet, mappingString)
Right("Good news everybody")
}
private def receiveSourceStats(
dataSet: DataSet, inputStream: InputStream, schemaPrefix: String, fileName: String, file: File)(implicit configuration: OrganizationConfiguration): Either[String, String] = {
try {
val f = hubFileStores.getResource(configuration).createFile(file)
val stats = Stats.read(inputStream)
val context = DataSetStatisticsContext(
dataSet.orgId,
dataSet.spec,
schemaPrefix,
dataSet.details.facts.get("provider").toString,
dataSet.details.facts.get("dataProvider").toString,
if (dataSet.details.facts.containsField("providerUri"))
dataSet.details.facts.get("providerUri").toString
else
"",
if (dataSet.details.facts.containsField("dataProviderUri"))
dataSet.details.facts.get("dataProviderUri").toString
else
"",
new Date()
)
f.put("contentType", "application/x-gzip")
f.put("orgId", dataSet.orgId)
f.put("spec", dataSet.spec)
f.put("schema", schemaPrefix)
f.put("uploadDate", context.uploadDate)
f.put("hubFileType", "source-statistics")
f.put("filename", fileName)
f.save()
val dss = DataSetStatistics(
context = context,
recordCount = stats.recordStats.recordCount,
fieldCount = Histogram(stats.recordStats.fieldCount)
)
DataSetStatistics.dao.insert(dss).map {
dssId =>
{
stats.fieldValueMap.asScala.foreach {
fv =>
val fieldValues = FieldValues(
parentId = dssId,
context = context,
path = fv._1.toString,
valueStats = ValueStats(fv._2)
)
DataSetStatistics.dao.values.insert(fieldValues)
}
stats.recordStats.frequencies.asScala.foreach {
ff =>
val frequencies = FieldFrequencies(
parentId = dssId,
context = context,
path = ff._1.toString,
histogram = Histogram(ff._2)
)
DataSetStatistics.dao.frequencies.insert(frequencies)
}
Right("Good")
}
}.getOrElse {
Left("Could not store DataSetStatistics")
}
} catch {
case t: Throwable =>
t.printStackTrace()
Left("Error receiving source statistics: " + t.getMessage)
}
}
private def receiveHints(dataSet: DataSet, inputStream: InputStream) = {
val freshHints = Stream.continually(inputStream.read).takeWhile(-1 != _).map(_.toByte).toArray
val updatedDataSet = dataSet.copy(hints = freshHints)
DataSet.dao(dataSet.orgId).save(updatedDataSet)
Right("Allright")
}
private def receiveLinks(dataSet: DataSet, schemaPrefix: String, fileName: String, file: File)(implicit configuration: OrganizationConfiguration) = {
val store = hubFileStores.getResource(configuration)
import com.mongodb.casbah.gridfs.Imports._
// remove previous version
findLinksFile(dataSet.orgId, dataSet.spec, schemaPrefix, store) foreach { previous =>
store.remove(previous._id.get)
}
val f = store.createFile(file)
try {
f.put("contentType", "application/x-gzip")
f.put("orgId", dataSet.orgId)
f.put("spec", dataSet.spec)
f.put("schema", schemaPrefix)
f.put("uploadDate", new Date())
f.put("hubFileType", "links")
f.put("filename", fileName)
f.save()
Right("Ok")
} catch {
case t: Throwable =>
log.error("SipCreatorEndPoint: Could not store links", t)
Left("Error while receiving links: " + t.getMessage)
}
}
private def findLinksFile(orgId: String, spec: String, schemaPrefix: String, store: GridFS): Option[GridFSDBFile] = {
store.findOne(MongoDBObject("orgId" -> orgId, "spec" -> spec, "schema" -> schemaPrefix, "hubFileType" -> "links"))
}
def fetchSIP(orgId: String, spec: String, accessToken: Option[String]) = OrganizationAction(orgId, accessToken) {
MultitenantAction {
implicit request =>
Async {
Promise.pure {
val maybeDataSet = DataSet.dao.findBySpecAndOrgId(spec, orgId)
if (maybeDataSet.isEmpty) {
Left(NotFound("Unknown spec %s".format(spec)))
} else if (maybeDataSet.isDefined && maybeDataSet.get.state == DataSetState.PARSING) {
Left(Error("DataSet %s is being uploaded at the moment, so you cannot download it at the same time"
.format(spec)))
} else {
val dataSet = maybeDataSet.get
// lock it right away
DataSet.dao.lock(dataSet, connectedUser)
val updatedDataSet = DataSet.dao.findBySpecAndOrgId(spec, orgId).get
DataSet.dao.save(updatedDataSet)
Right(Enumerator.outputStream(outputStream => writeSipStream(dataSet, outputStream)))
}
}.map {
result =>
if (result.isLeft) {
result.left.get
} else {
Ok.stream(result.right.get >>> Enumerator.eof).withHeaders("Content-Type" -> "application/zip")
}
}
}
}
}
def writeSipStream(dataSet: DataSet, outputStream: OutputStream)(implicit configuration: OrganizationConfiguration) {
val zipOut = new ZipOutputStream(outputStream)
val store = hubFileStores.getResource(configuration)
writeEntry("dataset_facts.txt", zipOut) {
out => IOUtils.write(dataSet.details.getFactsAsText, out)
}
writeEntry("hints.txt", zipOut) {
out => IOUtils.copy(new ByteArrayInputStream(dataSet.hints), out)
}
store.find(MongoDBObject("orgId" -> dataSet.orgId, "spec" -> dataSet.spec, "hubFileType" -> "links")).toSeq.foreach { links =>
val prefix: String = links.get("schema").toString
writeEntry(s"links_$prefix.csv.gz", zipOut) {
out => IOUtils.copy(links.getInputStream, out)
}
}
val recordCount = basexStorage.count(dataSet)
if (recordCount > 0) {
writeEntry("source.xml", zipOut) {
out => writeDataSetSource(dataSet, out)
}
}
for (mapping <- dataSet.mappings) {
if (mapping._2.recordMapping != None) {
writeEntry("mapping_%s.xml".format(mapping._1), zipOut) {
out => writeContent(mapping._2.recordMapping.get, out)
}
}
}
zipOut.flush()
outputStream.close()
}
private def writeDataSetSource(dataSet: DataSet, outputStream: OutputStream)(implicit configuration: OrganizationConfiguration) {
val now = System.currentTimeMillis()
val tagContentMatcher = """>([^<]+)<""".r
val inputTagMatcher = """<input (.*) id="(.*)">""".r
def buildNamespaces(attrs: Map[String, String]): String = {
val attrBuilder = new StringBuilder
attrs.filterNot(_._1.isEmpty).toSeq.sortBy(_._1).foreach(
ns => attrBuilder.append("""xmlns:%s="%s"""".format(ns._1, ns._2)).append(" ")
)
attrBuilder.mkString.trim
}
def buildAttributes(attrs: Map[String, String]): String = {
attrs.map(a => (a._1 -> a._2)).toList.sortBy(_._1).map(
a => """%s="%s"""".format(a._1, escapeXml(a._2))
).mkString(" ")
}
def serializeElement(n: Node): String = {
n match {
case e if !e.child.filterNot(
e => e.isInstanceOf[scala.xml.Text] || e.isInstanceOf[scala.xml.PCData]
).isEmpty =>
val content = e.child.filterNot(_.label == "#PCDATA").map(serializeElement(_)).mkString("\n")
"""<%s %s>%s</%s>""".format(
e.label, buildAttributes(e.attributes.asAttrMap), content + "\n", e.label
)
case e if e.child.isEmpty => """<%s/>""".format(e.label)
case e if !e.attributes.isEmpty => """<%s %s>%s</%s>""".format(
e.label, buildAttributes(e.attributes.asAttrMap), escapeXml(e.text), e.label
)
case e if e.attributes.isEmpty => """<%s>%s</%s>""".format(e.label, escapeXml(e.text), e.label)
case _ => "" // nope
}
}
// do not use StringEscapeUtils.escapeXml because it also escapes UTF-8 characters, which are however valid and would break source identity
def escapeXml(s: String): String = {
s.
replaceAll("&", "&").
replaceAll("<", "<").
replaceAll(">", ">").
replaceAll("\"", """).
replaceAll("'", "'")
}
val pw = new PrintWriter(new OutputStreamWriter(outputStream, "utf-8"))
val builder = new StringBuilder
builder.append("<?xml version='1.0' encoding='UTF-8'?>").append("\n")
builder.append("<delving-sip-source ")
builder.append("%s".format(buildNamespaces(dataSet.getNamespaces)))
builder.append(">")
write(builder.toString(), pw, outputStream)
basexStorage.withSession(dataSet) {
implicit session =>
val total = basexStorage.count
var count = 0
basexStorage.findAllCurrentDocuments foreach {
record =>
// the output coming from BaseX differs from the original source as follows:
// - the <input> tags contain the namespace declarations
// - the formatted XML escapes all entities including UTF-8 characters
// the following lines fix this
val noNamespaces = inputTagMatcher.replaceSomeIn(record, {
m => Some("""<input id="%s">""".format(m.group(2)))
})
def cleanup: Match => String = {
s => ">" + escapeXml(StringEscapeUtils.unescapeXml(s.group(1))) + "<"
}
val escapeForRegex = Matcher.quoteReplacement(noNamespaces)
try {
val cleaned = tagContentMatcher.replaceAllIn(escapeForRegex, cleanup)
pw.println(cleaned)
} catch {
case t: Throwable =>
log.error(
"Error while trying to sanitize following record:\n\n" + escapeForRegex
)
throw t
}
if (count % 10000 == 0) pw.flush()
if (count % 10000 == 0) {
log.info("%s: Prepared %s of %s records for download".format(dataSet
.spec, count, total))
}
count += 1
}
pw.print("</delving-sip-source>")
log.info(s"Done preparing DataSet ${dataSet.spec} for download, it took ${System.currentTimeMillis() - now} ms")
pw.flush()
}
}
private def writeEntry(name: String, out: ZipOutputStream)(f: ZipOutputStream => Unit) {
out.putNextEntry(new ZipEntry(name))
f(out)
out.flush()
out.closeEntry()
}
private def writeContent(content: String, out: OutputStream) {
val printWriter = new PrintWriter(new OutputStreamWriter(out, "utf-8"))
write(content, printWriter, out)
}
private def write(content: String, pw: PrintWriter, out: OutputStream) {
pw.println(content)
pw.flush()
}
}
object SipCreatorEndPoint {
// HASH__type[_prefix].extension
val FileName = """([^_]*)__([^._]*)_?([^.]*).(.*)""".r
private def basexStorage(implicit configuration: OrganizationConfiguration) = HubServices.basexStorages.getResource(configuration)
def loadSourceData(dataSet: DataSet, source: InputStream)(implicit configuration: OrganizationConfiguration): Long = {
// until we have a better concept on how to deal with per-collection versions, do not make use of them here, but drop the data instead
val mayCollection = basexStorage.openCollection(dataSet)
val collection = if (mayCollection.isDefined) {
basexStorage.deleteCollection(mayCollection.get)
basexStorage.createCollection(dataSet)
} else {
basexStorage.createCollection(dataSet)
}
val parser = new SimpleDataSetParser(source, dataSet)
// use the uploaded statistics to know how many records we expect. For that purpose, use the mappings to know what prefixes we have...
// TODO we should have a more direct route to know what to expect here.
val totalRecords = dataSet.mappings.keySet.headOption.flatMap {
schema => DataSetStatistics.dao.getMostRecent(dataSet.orgId, dataSet.spec, schema).map(_.recordCount)
}
val modulo = if (totalRecords.isDefined) math.round(totalRecords.get / 100) else 100
def onRecordInserted(count: Long) {
if (count % (if (modulo == 0) 100 else modulo) == 0) DataSet.dao.updateRecordCount(dataSet, count)
}
basexStorage.store(collection, parser, parser.namespaces, onRecordInserted)
}
}
class ReceiveSource extends Actor {
var tempFileRef: TemporaryFile = null
def receive = {
case SourceStream(dataSet, userName, inputStream, tempFile, conf) =>
implicit val configuration = conf
val now = System.currentTimeMillis()
// explicitly reference the TemporaryFile so it can't get garbage collected as long as this actor is around
tempFileRef = tempFile
try {
receiveSource(dataSet, userName, inputStream) match {
case Left(t) =>
DataSet.dao.invalidateHashes(dataSet)
val message = if (t.isInstanceOf[StorageInsertionException]) {
Some("""Error while inserting record:
|
|%s
|
|Cause:
|
|%s
| """.stripMargin.format(t.getMessage, t.getCause.getMessage)
)
} else {
Some(t.getMessage)
}
DataSet.dao.updateState(dataSet, DataSetState.ERROR, Some(userName), message)
Logger("CultureHub").error(
"Error while parsing records for spec %s of org %s".format(
dataSet.spec, dataSet.orgId
),
t
)
ErrorReporter.reportError(
"DataSet Source Parser", t,
"Error occured while parsing records for spec %s of org %s".format(
dataSet.spec, dataSet.orgId
)
)
case Right(inserted) =>
val duration = Duration(System.currentTimeMillis() - now, TimeUnit.MILLISECONDS)
Logger("CultureHub").info(
"Finished parsing source for DataSet %s of organization %s. %s records inserted in %s seconds."
.format(
dataSet.spec, dataSet.orgId, inserted, duration.toSeconds
)
)
}
} catch {
case t: Throwable =>
Logger("CultureHub").error(
"Exception while processing uploaded source %s for DataSet %s".format(
tempFile.file.getAbsolutePath, dataSet.spec
),
t
)
DataSet.dao.invalidateHashes(dataSet)
DataSet.dao.updateState(
dataSet, DataSetState.ERROR, Some(userName),
Some("Error while parsing uploaded source: " + t.getMessage)
)
} finally {
tempFileRef = null
}
}
private def receiveSource(dataSet: DataSet, userName: String, inputStream: InputStream)(implicit configuration: OrganizationConfiguration): Either[Throwable, Long] = {
try {
val uploadedRecords = SipCreatorEndPoint.loadSourceData(dataSet, inputStream)
DataSet.dao.updateRecordCount(dataSet, uploadedRecords)
DataSet.dao.updateState(dataSet, DataSetState.UPLOADED, Some(userName))
Right(uploadedRecords)
} catch {
case t: Exception => return Left(t)
}
}
}
case class SourceStream(
dataSet: DataSet,
userName: String,
stream: InputStream,
temporaryFile: TemporaryFile,
configuration: OrganizationConfiguration)
|
delving/culture-hub
|
modules/dataset/app/controllers/SipCreatorEndPoint.scala
|
Scala
|
apache-2.0
| 28,513 |
/*
* Copyright (c) 2014 Ben Whitehead.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.benwhitehead.gw2.api.model
import com.fasterxml.jackson.annotation.JsonProperty
import java.util.UUID
/**
* @author Ben Whitehead
*/
case class EventName(
@JsonProperty("id") id: UUID,
@JsonProperty("name") name: String
)
case class Event(
@JsonProperty("world_id") worldId: Int,
@JsonProperty("map_id") mapId: Int,
@JsonProperty("event_id") eventId: UUID,
@JsonProperty("state") state: String
)
case class EventDetails(
@JsonProperty("name") name: String,
@JsonProperty("level") level: Int,
@JsonProperty("map_id") mapId: Int,
@JsonProperty("flags") flags: List[String],
@JsonProperty("location") location: Location
)
case class Location(
@JsonProperty("type") shape: String,
@JsonProperty("center") center: List[Double],
@JsonProperty("height") height: Double,
@JsonProperty("radius") radius: Double,
@JsonProperty("rotation") rotation: Double,
@JsonProperty("z_range") zRange: Double,
@JsonProperty("points") points: List[List[Double]]
)
|
BenWhitehead/finagle-gw2-client
|
src/main/scala/io/github/benwhitehead/gw2/api/model/Event.scala
|
Scala
|
apache-2.0
| 1,603 |
package net.lshift.diffa.kernel.config
import net.lshift.diffa.kernel.frontend.DomainEndpointDef
class NoopEndpointLifecycleListener extends EndpointLifecycleListener {
/**
* Indicates that the given endpoint has become available (or has been updated).
*/
def onEndpointAvailable(e: DomainEndpointDef) {}
/**
* Indicates that the endpoint with the given domain and name is no longer available within the system.
*/
def onEndpointRemoved(space: Long, endpoint: String) {}
}
|
0x6e6562/diffa
|
kernel/src/main/scala/net/lshift/diffa/kernel/config/NoopEndpointLifecycleListener.scala
|
Scala
|
apache-2.0
| 496 |
/*------------------------------------------------------------------------------
MiniLight Scala : minimal global illumination renderer
Harrison Ainsworth / HXA7241 : 2008-2013
http://www.hxa.name/minilight
------------------------------------------------------------------------------*/
package hxa7241.general
/**
* Simple, fast, good random number generator.
*
* @implementation
*
* 'Maximally Equidistributed Combined Tausworthe Generators'; L'Ecuyer; 1996.
* http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
* http://www.iro.umontreal.ca/~simardr/rng/lfsr113.c
*
* 'Conversion of High-Period Random Numbers to Floating Point'; Doornik; 2006.
* http://www.doornik.com/research/randomdouble.pdf
*/
final class Random() extends NotNull
{
/// queries --------------------------------------------------------------------
def int32 :Int =
{
z_m(0) = ((z_m(0) & -2) << 18) ^ (((z_m(0) << 6) ^ z_m(0)) >>> 13)
z_m(1) = ((z_m(1) & -8) << 2) ^ (((z_m(1) << 2) ^ z_m(1)) >>> 27)
z_m(2) = ((z_m(2) & -16) << 7) ^ (((z_m(2) << 13) ^ z_m(2)) >>> 21)
z_m(3) = ((z_m(3) & -128) << 13) ^ (((z_m(3) << 3) ^ z_m(3)) >>> 12)
z_m(0) ^ z_m(1) ^ z_m(2) ^ z_m(3)
}
/**
* Single precision, [0,1) interval (never returns 1).
*/
//def real32 =
// (int32 & 0xFFFFFF00).toFloat * (1.0f / 4294967296.0f) + 0.5f
/**
* Double precision, [0,1) interval (never returns 1).
*/
def real64 =
int32.toDouble * (1.0 / 4294967296.0) + 0.5 +
(int32 & 0x001FFFFF).toDouble * (1.0 / 9007199254740992.0)
/**
* Double precision, (0,1) interval (never returns 0 or 1).
*/
//def real64_ =
// int32.toDouble * (1.0 / 4294967296.0) +
// (0.5 + (1.0 / 4503599627370496.0) * 0.5) +
// (int32 & 0x000FFFFF).toDouble * (1.0 / 4503599627370496.0)
/// fields ---------------------------------------------------------------------
/*private val z_m :Array[Int] =
{
// get UUID in 32-bit chunks
val u32s :Array[Long] =
{
val u = java.util.UUID.randomUUID
val ul = Array( u.getLeastSignificantBits, u.getMostSignificantBits )
Array.tabulate( 4 )( i => (ul(i >> 1) >> (32 * (i & 1))) & 0xFFFFFFFFL)
}
// *** VERY IMPORTANT ***
// The initial seeds z1, z2, z3, z4 MUST be larger
// than 1, 7, 15, and 127 respectively.
Array.tabulate( 4 )( i =>
if (u32s(i) >= Random.SEED_MINS(i)) u32s(i).toInt else Random.SEED )
}*/
/* *** VERY IMPORTANT ***
The initial seeds z1, z2, z3, z4 MUST be larger
than 1, 7, 15, and 127 respectively. */
private val z_m :Array[Int] = Array.fill( 4 )( Random.SEED )
//val id = "%08X".format( z_m(3) )
}
object Random
{
/// constants ------------------------------------------------------------------
// default seed and seed minimums
private final val SEED :Int = 987654321
//private final val SEED_MINS = Array( 2, 8, 16, 128 )
}
|
hxa7241/minilight-scala
|
src/hxa7241/general/Random.scala
|
Scala
|
cc0-1.0
| 3,021 |
/*
* Copyright (C) 2017
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package code
package model
import net.liftweb.common._
import net.liftweb.mapper._
import net.liftweb.util._
import net.liftweb.http._
class CommitteeTopic extends LongKeyedMapper[CommitteeTopic] with IdPK {
def getSingleton = CommitteeTopic
object committee extends MappedLongForeignKey(this, Committee)
object topic extends MappedLongForeignKey(this, Topic)
object dateIntroduced extends MappedDateTime(this)
object vote extends MappedInt(this) {
override def defaultValue = 0
}
}
object CommitteeTopic extends CommitteeTopic with LongKeyedMetaMapper[CommitteeTopic] {
override def dbTableName = "committeetopic"
def join (committee: Committee, topic: Topic) = this.create.committee(committee).topic(topic).dateIntroduced(new java.util.Date()).save
}
|
EasterTheBunny/ourdistrict
|
src/main/scala/code/model/CommitteeTopic.scala
|
Scala
|
gpl-3.0
| 1,445 |
package main
class WorldMap {
private var myRegions = Set.empty[Region]
private var mySuperRegions = Set.empty[SuperRegion]
def add(region: Region): Unit = {
myRegions += region
}
def add(region: SuperRegion): Unit = {
mySuperRegions += region
}
def getRegion(id: Int): Option[Region] = {
regions find (_.id == id)
}
def getSuperRegion(id: Int): Option[SuperRegion] = {
superRegions find (_.id == id)
}
def regions = myRegions
def superRegions = mySuperRegions
def removeRegion(region: Region): Unit = {
myRegions = myRegions - region
}
def getMapString: String = {
val regionsAsStrings = regions.map { r =>
Seq(r.id, r.playerName, r.armies).mkString(";")
}
regionsAsStrings.mkString(" ")
}
def copy: WorldMap = {
val newMap = new WorldMap
for (sr <- superRegions) {
newMap.add(new SuperRegion(sr.id, sr.reward))
}
for (r <- regions) {
val superRegionOpt = newMap.getSuperRegion(r.superRegion.id)
superRegionOpt foreach { superRegion =>
val newRegion = new Region(r.id, superRegion, r.playerName, r.armies)
newMap.add(newRegion)
}
}
for (r <- regions) {
val newRegionOpt = newMap.getRegion(r.id)
for {
newRegion <- newRegionOpt
neighbor <- r.neighbors
neighborRegion <- newMap.getRegion(neighbor.id)
} newRegion.addNeighbor(neighborRegion)
}
newMap
}
}
|
ramn/warlight-starterbot-scala
|
src/main/scala/main/WorldMap.scala
|
Scala
|
apache-2.0
| 1,453 |
// Copyright 2012 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.hfile.common
trait HFileMetadataKeys {
val ThriftClassKey: String = "key.thrift.class"
val ThriftClassValue: String = "value.thrift.class"
val ThriftEncodingKey: String = "thrift.protocol.factory.class"
val TimestampKey: String = "generation.millis"
val TaskIdKey: String = "generation.taskId"
val LastKeyKey: String = "hfile.LASTKEY"
val AverageKeyLengthKey = "hfile.AVG_KEY_LEN"
val AverageValueLengthKey = "hfile.AVG_VALUE_LEN"
val ComparatorKey = "hfile.COMPARATOR"
val NumEntries = "hfile.NUM_ENTRIES"
val NumUniqueKeys = "hfile.NUM_UNIQUE_KEYS"
val TotalKeyLength = "hfile.TOTAL_KEY_LENGTH"
val TotalValueLength = "hfile.TOTAL_VALUE_LENGTH"
}
object HFileMetadataKeys extends HFileMetadataKeys
|
foursquare/fsqio
|
src/jvm/io/fsq/hfile/common/HFileMetadataKeys.scala
|
Scala
|
apache-2.0
| 805 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.jdbc
import com.spotify.scio.values.SCollection
import com.spotify.scio.ScioContext
import com.spotify.scio.io.{EmptyTap, EmptyTapOf, ScioIO, Tap, TestIO}
import org.apache.beam.sdk.io.{jdbc => beam}
import java.sql.{PreparedStatement, ResultSet}
import com.spotify.scio.coders.{Coder, CoderMaterializer}
import com.spotify.scio.io.TapT
sealed trait JdbcIO[T] extends ScioIO[T]
object JdbcIO {
final def apply[T](opts: JdbcIoOptions): JdbcIO[T] =
new JdbcIO[T] with TestIO[T] {
final override val tapT = EmptyTapOf[T]
override def testId: String = s"JdbcIO(${jdbcIoId(opts)})"
}
private[jdbc] def jdbcIoId(opts: JdbcIoOptions): String = opts match {
case JdbcReadOptions(connOpts, query, _, _, _) => jdbcIoId(connOpts, query)
case JdbcWriteOptions(connOpts, statement, _, _) =>
jdbcIoId(connOpts, statement)
}
private[jdbc] def jdbcIoId(opts: JdbcConnectionOptions, query: String): String = {
val user = opts.password
.fold(s"${opts.username}")(password => s"${opts.username}:$password")
s"$user@${opts.connectionUrl}:$query"
}
private[jdbc] def dataSourceConfiguration(
opts: JdbcConnectionOptions
): beam.JdbcIO.DataSourceConfiguration =
opts.password match {
case Some(pass) =>
beam.JdbcIO.DataSourceConfiguration
.create(opts.driverClass.getCanonicalName, opts.connectionUrl)
.withUsername(opts.username)
.withPassword(pass)
case None =>
beam.JdbcIO.DataSourceConfiguration
.create(opts.driverClass.getCanonicalName, opts.connectionUrl)
.withUsername(opts.username)
}
}
final case class JdbcSelect[T: Coder](readOptions: JdbcReadOptions[T]) extends JdbcIO[T] {
override type ReadP = Unit
override type WriteP = Nothing
final override val tapT: TapT.Aux[T, Nothing] = EmptyTapOf[T]
override def testId: String = s"JdbcIO(${JdbcIO.jdbcIoId(readOptions)})"
override protected def read(sc: ScioContext, params: ReadP): SCollection[T] = {
var transform = beam.JdbcIO
.read[T]()
.withCoder(CoderMaterializer.beam(sc, Coder[T]))
.withDataSourceConfiguration(JdbcIO.dataSourceConfiguration(readOptions.connectionOptions))
.withQuery(readOptions.query)
.withRowMapper(new beam.JdbcIO.RowMapper[T] {
override def mapRow(resultSet: ResultSet): T =
readOptions.rowMapper(resultSet)
})
if (readOptions.statementPreparator != null) {
transform = transform
.withStatementPreparator(new beam.JdbcIO.StatementPreparator {
override def setParameters(preparedStatement: PreparedStatement): Unit =
readOptions.statementPreparator(preparedStatement)
})
}
if (readOptions.fetchSize != JdbcIoOptions.BeamDefaultFetchSize) {
// override default fetch size.
transform = transform.withFetchSize(readOptions.fetchSize)
}
sc.applyTransform(transform)
}
override protected def write(data: SCollection[T], params: WriteP): Tap[Nothing] =
throw new UnsupportedOperationException("jdbc.Select is read-only")
override def tap(params: ReadP): Tap[Nothing] =
EmptyTap
}
final case class JdbcWrite[T](writeOptions: JdbcWriteOptions[T]) extends JdbcIO[T] {
override type ReadP = Nothing
override type WriteP = Unit
final override val tapT: TapT.Aux[T, Nothing] = EmptyTapOf[T]
override def testId: String = s"JdbcIO(${JdbcIO.jdbcIoId(writeOptions)})"
override protected def read(sc: ScioContext, params: ReadP): SCollection[T] =
throw new UnsupportedOperationException("jdbc.Write is write-only")
override protected def write(data: SCollection[T], params: WriteP): Tap[Nothing] = {
var transform = beam.JdbcIO
.write[T]()
.withDataSourceConfiguration(JdbcIO.dataSourceConfiguration(writeOptions.connectionOptions))
.withStatement(writeOptions.statement)
if (writeOptions.preparedStatementSetter != null) {
transform = transform
.withPreparedStatementSetter(new beam.JdbcIO.PreparedStatementSetter[T] {
override def setParameters(element: T, preparedStatement: PreparedStatement): Unit =
writeOptions.preparedStatementSetter(element, preparedStatement)
})
}
if (writeOptions.batchSize != JdbcIoOptions.BeamDefaultBatchSize) {
// override default batch size.
transform = transform.withBatchSize(writeOptions.batchSize)
}
data.applyInternal(transform)
EmptyTap
}
override def tap(params: ReadP): Tap[Nothing] =
EmptyTap
}
|
regadas/scio
|
scio-jdbc/src/main/scala/com/spotify/scio/jdbc/JdbcIO.scala
|
Scala
|
apache-2.0
| 5,165 |
package com.socrata.soda.server.highlevel
import com.socrata.soda.clients.datacoordinator.{FeedbackSecondaryManifestClient, DataCoordinatorClient}
import com.socrata.soda.server.copy.{Published, Unpublished}
import com.socrata.soda.server.id.ResourceName
import com.socrata.soda.server.persistence.NameAndSchemaStore
import org.scalamock.proxy.ProxyMockFactory
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FunSuiteLike, Matchers}
import scala.util.Random
class DatasetDAOSpec extends FunSuiteLike with Matchers with MockFactory with ProxyMockFactory {
test("current copy is latest published") {
val dataset = new ResourceName("dataset")
val expectedCopyNum = Some(42)
val dc = mock[DataCoordinatorClient]
val fbm = new FeedbackSecondaryManifestClient(dc, Map.empty)
val ns = mock[NameAndSchemaStore]
ns.expects('lookupCopyNumber)(dataset, None).returning(Some(1)).anyNumberOfTimes()
ns.expects('lookupCopyNumber)(dataset, Some(Published)).returning(expectedCopyNum)
val col = new ColumnSpecUtils(Random)
val instance = () => "test"
val dao: DatasetDAO = new DatasetDAOImpl(dc, fbm, ns, col, instance)
val copynum = dao.getCurrentCopyNum(dataset)
copynum should be(expectedCopyNum)
}
test("current copy is latest unpublished") {
val dataset = new ResourceName("dataset")
val expectedCopyNum = Some(42)
val dc = mock[DataCoordinatorClient]
val fbm = new FeedbackSecondaryManifestClient(dc, Map.empty)
val ns = mock[NameAndSchemaStore]
ns.expects('lookupCopyNumber)(dataset, Some(Published)).returning(None)
ns.expects('lookupCopyNumber)(dataset, Some(Unpublished)).returning(expectedCopyNum)
val col = new ColumnSpecUtils(Random)
val instance = () => "test"
val dao: DatasetDAO = new DatasetDAOImpl(dc, fbm, ns, col, instance)
val copynum = dao.getCurrentCopyNum(dataset)
copynum should be(expectedCopyNum)
}
}
|
socrata-platform/soda-fountain
|
soda-fountain-lib/src/test/scala/com/socrata/soda/server/highlevel/DatasetDAOSpec.scala
|
Scala
|
apache-2.0
| 1,945 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package std
import scala.annotation.tailrec
import scala.reflect.macros._
import sbt.util.OptJsonWriter
private[sbt] object KeyMacro {
def settingKeyImpl[T: c.WeakTypeTag](
c: blackbox.Context
)(description: c.Expr[String]): c.Expr[SettingKey[T]] =
keyImpl2[T, SettingKey[T]](c) { (name, mf, ojw) =>
c.universe.reify { SettingKey[T](name.splice, description.splice)(mf.splice, ojw.splice) }
}
def taskKeyImpl[T: c.WeakTypeTag](
c: blackbox.Context
)(description: c.Expr[String]): c.Expr[TaskKey[T]] =
keyImpl[T, TaskKey[T]](c) { (name, mf) =>
c.universe.reify { TaskKey[T](name.splice, description.splice)(mf.splice) }
}
def inputKeyImpl[T: c.WeakTypeTag](
c: blackbox.Context
)(description: c.Expr[String]): c.Expr[InputKey[T]] =
keyImpl[T, InputKey[T]](c) { (name, mf) =>
c.universe.reify { InputKey[T](name.splice, description.splice)(mf.splice) }
}
def keyImpl[T: c.WeakTypeTag, S: c.WeakTypeTag](c: blackbox.Context)(
f: (c.Expr[String], c.Expr[Manifest[T]]) => c.Expr[S]
): c.Expr[S] =
f(getName(c), getImplicit[Manifest[T]](c))
private def keyImpl2[T: c.WeakTypeTag, S: c.WeakTypeTag](c: blackbox.Context)(
f: (c.Expr[String], c.Expr[Manifest[T]], c.Expr[OptJsonWriter[T]]) => c.Expr[S]
): c.Expr[S] =
f(getName(c), getImplicit[Manifest[T]](c), getImplicit[OptJsonWriter[T]](c))
private def getName[S: c.WeakTypeTag, T: c.WeakTypeTag](c: blackbox.Context): c.Expr[String] = {
import c.universe._
val enclosingValName = definingValName(
c,
methodName =>
s"""$methodName must be directly assigned to a val, such as `val x = $methodName[Int]("description")`."""
)
c.Expr[String](Literal(Constant(enclosingValName)))
}
private def getImplicit[T: c.WeakTypeTag](c: blackbox.Context): c.Expr[T] = {
import c.universe._
c.Expr[T](c.inferImplicitValue(weakTypeOf[T]))
}
def definingValName(c: blackbox.Context, invalidEnclosingTree: String => String): String = {
import c.universe.{ Apply => ApplyTree, _ }
val methodName = c.macroApplication.symbol.name
def processName(n: Name): String =
n.decodedName.toString.trim // trim is not strictly correct, but macros don't expose the API necessary
@tailrec def enclosingVal(trees: List[c.Tree]): String = {
trees match {
case ValDef(_, name, _, _) :: _ => processName(name)
case (_: ApplyTree | _: Select | _: TypeApply) :: xs => enclosingVal(xs)
// lazy val x: X = <methodName> has this form for some reason (only when the explicit type is present, though)
case Block(_, _) :: DefDef(mods, name, _, _, _, _) :: _ if mods.hasFlag(Flag.LAZY) =>
processName(name)
case _ =>
c.error(c.enclosingPosition, invalidEnclosingTree(methodName.decodedName.toString))
"<error>"
}
}
enclosingVal(enclosingTrees(c).toList)
}
def enclosingTrees(c: blackbox.Context): Seq[c.Tree] =
c.asInstanceOf[reflect.macros.runtime.Context]
.callsiteTyper
.context
.enclosingContextChain
.map(_.tree.asInstanceOf[c.Tree])
}
|
xuwei-k/xsbt
|
main-settings/src/main/scala/sbt/std/KeyMacro.scala
|
Scala
|
apache-2.0
| 3,326 |
package streams
import common._
/**
* This component implements the solver for the Bloxorz game
*/
trait Solver extends GameDef {
/**
* Returns `true` if the block `b` is at the final position
*/
def done(b: Block): Boolean = b == Block(goal, goal)
/**
* This function takes two arguments: the current block `b` and
* a list of moves `history` that was required to reach the
* position of `b`.
*
* The `head` element of the `history` list is the latest move
* that was executed, i.e. the last move that was performed for
* the block to end up at position `b`.
*
* The function returns a stream of pairs: the first element of
* the each pair is a neighboring block, and the second element
* is the augmented history of moves required to reach this block.
*
* It should only return valid neighbors, i.e. block positions
* that are inside the terrain.
*/
def neighborsWithHistory(b: Block, history: List[Move]): Stream[(Block, List[Move])] = {
val n = for {
neighbor <- b.legalNeighbors
} yield (neighbor._1, neighbor._2 :: history)
n.toStream
}
/**
* This function returns the list of neighbors without the block
* positions that have already been explored. We will use it to
* make sure that we don't explore circular paths.
*/
def newNeighborsOnly(neighbors: Stream[(Block, List[Move])],
explored: Set[Block]): Stream[(Block, List[Move])] = neighbors.filter((t) => !explored.contains(t._1))
/**
* The function `from` returns the stream of all possible paths
* that can be followed, starting at the `head` of the `initial`
* stream.
*
* The blocks in the stream `initial` are sorted by ascending path
* length: the block positions with the shortest paths (length of
* move list) are at the head of the stream.
*
* The parameter `explored` is a set of block positions that have
* been visited before, on the path to any of the blocks in the
* stream `initial`. When search reaches a block that has already
* been explored before, that position should not be included a
* second time to avoid cycles.
*
* The resulting stream should be sorted by ascending path length,
* i.e. the block positions that can be reached with the fewest
* amount of moves should appear first in the stream.
*
* Note: the solution should not look at or compare the lengths
* of different paths - the implementation should naturally
* construct the correctly sorted stream.
*/
def from(initial: Stream[(Block, List[Move])],
explored: Set[Block]): Stream[(Block, List[Move])] = {
if (initial.isEmpty) Stream.Empty else {
val m = for {
path <- initial
next <- newNeighborsOnly(neighborsWithHistory(path._1, path._2), explored)
} yield next
initial ++ from(m, explored ++ (m map (_._1)))
}
}
/**
* The stream of all paths that begin at the starting block.
*/
lazy val pathsFromStart: Stream[(Block, List[Move])] = from(Stream((startBlock, List())), Set(startBlock))
/**
* Returns a stream of all possible pairs of the goal block along
* with the history how it was reached.
*/
lazy val pathsToGoal: Stream[(Block, List[Move])] = pathsFromStart.filter((p) => done(p._1))
/**
* The (or one of the) shortest sequence(s) of moves to reach the
* goal. If the goal cannot be reached, the empty list is returned.
*
* Note: the `head` element of the returned list should represent
* the first move that the player should perform from the starting
* position.
*/
lazy val solution: List[Move] =
pathsToGoal match {
case Stream.Empty => List()
case h #:: _ => h._2.reverse
}
}
|
syhan/coursera
|
progfun2/streams/src/main/scala/streams/Solver.scala
|
Scala
|
gpl-3.0
| 3,763 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_10.scalatest1_9_2
import org.jetbrains.plugins.scala.DependencyManagerBase._
import org.jetbrains.plugins.scala.base.libraryLoaders.{IvyManagedLoader, LibraryLoader}
import org.jetbrains.plugins.scala.debugger.{ScalaVersion, Scala_2_10}
import org.jetbrains.plugins.scala.testingSupport.scalatest.ScalaTestTestCase
/**
* @author Roman.Shein
* @since 11.02.2015.
*/
abstract class Scalatest2_10_1_9_2_Base extends ScalaTestTestCase {
override implicit val version: ScalaVersion = Scala_2_10
override protected def additionalLibraries: Seq[LibraryLoader] =
IvyManagedLoader("org.scalatest" %% "scalatest" % "1.9.2") :: Nil
}
|
jastice/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_10/scalatest1_9_2/Scalatest2_10_1_9_2_Base.scala
|
Scala
|
apache-2.0
| 709 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.collection.immutable.ListSet
import Suite._
import Spec.isTestMethod
import Spec.equalIfRequiredCompactify
import org.scalatest.events._
import scala.reflect.NameTransformer._
import java.lang.reflect.{Method, Modifier, InvocationTargetException}
/**
* Facilitates a “behavior-driven” style of development (BDD), in which tests
* are methods, optionally nested inside singleton objects defining textual scopes.
*
* <table><tr><td class="usage">
* <strong>Recommended Usage</strong>:
* Class <code>Spec</code> allows you to define tests as methods, which saves one function literal per test compared to style classes that represent tests as functions.
* Fewer function literals translates into faster compile times and fewer generated class files, which can help minimize build times.
* As a result, using <code>Spec</code> can be a good choice in large projects where build times are a concern as well as when generating large numbers of
* tests programatically via static code generators.
* </td></tr></table>
*
* <p>
* Here's an example <code>Spec</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec
*
* import org.scalatest.Spec
*
* class SetSpec extends Spec {
*
* object `A Set` {
* object `when empty` {
* def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* def `should produce NoSuchElementException when head is invoked` {
* intercept[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* A <code>Spec</code> can contain <em>scopes</em> and tests. You define a scope
* with a nested singleton object, and a test with a method. The names of both <em>scope objects</em> and <em>test methods</em>
* must be expressed in back ticks and contain at least one space character.
* <p>
*
* <p>
* A space placed in backticks is encoded by the Scala compiler as <code>$u0020</code>, as
* illustrated here:
* </p>
*
* <pre class="stREPL">
* scala> def `an example` = ()
* an$u0020example: Unit
* </pre>
*
* <p>
* <code>Spec</code> uses reflection to discover scope objects and test methods.
* During discovery, <code>Spec</code> will consider any nested singleton object whose name
* includes <code>$u0020</code> a scope object, and any method whose name includes <code>$u0020</code> a test method.
* It will ignore any singleton objects or methods that do not include a <code>$u0020</code> character. Thus, <code>Spec</code> would
* not consider the following singleton object a scope object:
* </p>
*
* <pre class="stHighlight">
* object `Set` { // Not discovered, because no space character
* }
* </pre>
*
* <p>
* You can make such a scope discoverable by placing a space at the end, like this:
* </p>
*
* <pre class="stHighlight">
* object `Set ` { // Discovered, because of the trailing space character
* }
* </pre>
*
* <p>
* Rather than performing this discovery during construction, when instance variables used by scope objects may as yet be uninitialized,
* <code>Spec</code> performs discovery lazily, the first time a method needing the results of discovery is invoked.
* For example, methods <code>run</code>, <code>runTests</code>, <code>tags</code>, <code>expectedTestCount</code>,
* <code>runTest</code>, and <code>testNames</code> all ensure that scopes and tests have already been discovered prior to doing anything
* else. Discovery is performed, and the results recorded, only once for each <code>Spec</code> instance.
* </p>
*
* <p>
* A scope names, or gives more information about, the <em>subject</em> (class or other entity) you are specifying
* and testing. In the previous example, <code>`A Set`</code>
* is the subject under specification and test. With each test name you provide a string (the <em>test text</em>) that specifies
* one bit of behavior of the subject, and a block of code (the body of the test method) that verifies that behavior.
* </p>
*
* <p>
* When you execute a <code>Spec</code>, it will send <a href="events/Formatter.html"><code>Formatter</code></a>s in the events it sends to the
* <a href="Reporter.html"><code>Reporter</code></a>. ScalaTest's built-in reporters will report these events in such a way
* that the output is easy to read as an informal specification of the <em>subject</em> being tested.
* For example, were you to run <code>SetSpec</code> from within the Scala interpreter:
* </p>
*
* <pre class="stREPL">
* scala> new SetSpec execute
* </pre>
*
* <p>
* You would see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stGreen"> - should have size 0</span>
* <span class="stGreen"> - should produce NoSuchElementException when head is invoked</span>
* </pre>
*
* <p>
* Or, to run just the test named <code>A Set when empty should have size 0</code>, you could pass that test's name, or any unique substring of the
* name, such as <code>"size 0"</code> or even just <code>"0"</code>. Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> new SetSuite execute "size 0"
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stGreen"> - should have size 0</span>
* </pre>
*
* <p>
* You can also pass to <code>execute</code> a <a href="ConfigMap.html"><em>config map</em></a> of key-value
* pairs, which will be passed down into suites and tests, as well as other parameters that configure the run itself.
* For more information on running in the Scala interpreter, see the documentation for the
* <a href="Shell.html">ScalaTest shell</a>.
* </p>
*
* <p>
* The <code>execute</code> method invokes a <code>run</code> method that takes two
* parameters. This <code>run</code> method, which actually executes the suite, will usually be invoked by a test runner, such
* as <a href="run$.html"><code>run</code></a>, <a href="tools/Runner$.html"><code>tools.Runner</code></a>, a build tool, or an IDE.
* </p>
*
* <p>
* The test methods shown in this example are parameterless. This is recommended even for test methods with obvious side effects. In production code
* you would normally declare no-arg, side-effecting methods as <em>empty-paren</em> methods, and call them with
* empty parentheses, to make it more obvious to readers of the code that they have a side effect. Whether or not a test method has
* a side effect, however, is a less important distinction than it is for methods in production code. Moreover, test methods are not
* normally invoked directly by client code, but rather through reflection by running the <code>Suite</code> that contains them, so a
* lack of parentheses on an invocation of a side-effecting test method would not normally appear in any client code. Given the empty
* parentheses do not add much value in the test methods case, the recommended style is to simply always leave them off.
* </p>
*
* <p>
* <em>Note: The approach of using backticks around test method names to make it easier to write descriptive test names was
* inspired by the <a href="http://github.com/SimpleFinance/simplespec" target="_blank"><code>SimpleSpec</code></a> test framework, originally created by Coda Hale.</em>
* </p>
*
* <a name="ignoredTests"></a><h2>Ignored tests</h2></a>
*
* <p>
* To support the common use case of temporarily disabling a test in a <code>Spec</code>, with the
* good intention of resurrecting the test at a later time, you can annotate the test method with <code>@Ignore</code>.
* For example, to temporarily disable the test method with the name <code>`should have size zero"</code>, just annotate
* it with <code>@Ignore</code>, like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.ignore
*
* import org.scalatest._
*
* class SetSpec extends Spec {
*
* object `A Set` {
* object `when empty` {
* @Ignore def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* def `should produce NoSuchElementException when head is invoked` {
* intercept[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* If you run this version of <code>SetSpec</code> with:
* </p>
*
* <pre class="stREPL">
* scala> new SetSpec execute
* </pre>
*
* <p>
* It will run only the second test and report that the first test was ignored:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stYellow"> - should have size 0 !!! IGNORED !!!</span>
* <span class="stGreen"> - should produce NoSuchElementException when head is invoked</span>
* </pre>
*
* <p>
* If you wish to temporarily ignore an entire suite of tests, you can annotate the test class with <code>@Ignore</code>, like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.ignoreall
*
* import org.scalatest._
*
* @Ignore
* class SetSpec extends Spec {
*
* object `A Set` {
* object `when empty` {
* def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* def `should produce NoSuchElementException when head is invoked` {
* intercept[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* When you mark a test class with a tag annotation, ScalaTest will mark each test defined in that class with that tag.
* Thus, marking the <code>SetSpec</code> in the above example with the <code>@Ignore</code> tag annotation means that both tests
* in the class will be ignored. If you run the above <code>SetSpec</code> in the Scala interpreter, you'll see:
* </p>
*
* <pre class="stREPL">
* scala> new SetSpec execute
* <span class="stGreen">SetSpec:
* A Set
* when empty</span>
* <span class="stYellow"> - should have size 0 !!! IGNORED !!!</span>
* <span class="stYellow"> - should produce NoSuchElementException when head is invoked !!! IGNORED !!!</span>
* </pre>
*
* <p>
* Note that marking a test class as ignored won't prevent it from being discovered by ScalaTest. Ignored classes
* will be discovered and run, and all their tests will be reported as ignored. This is intended to keep the ignored
* class visible, to encourage the developers to eventually fix and “un-ignore” it. If you want to
* prevent a class from being discovered at all, use the <a href="DoNotDiscover.html"><code>DoNotDiscover</code></a> annotation instead.
* </p>
*
*
* <a name="informers"></a><h2>Informers</h2></a>
*
* <p>
* One of the objects to <code>Spec</code>'s <code>run</code> method is a <code>Reporter</code>, which
* will collect and report information about the running suite of tests.
* Information about suites and tests that were run, whether tests succeeded or failed,
* and tests that were ignored will be passed to the <code>Reporter</code> as the suite runs.
* Most often the reporting done by default by <code>Spec</code>'s methods will be sufficient, but
* occasionally you may wish to provide custom information to the <code>Reporter</code> from a test.
* For this purpose, an <a href="Informer.html"><code>Informer</code></a> that will forward information to the current <code>Reporter</code>
* is provided via the <code>info</code> parameterless method.
* You can pass the extra information to the <code>Informer</code> via one of its <code>apply</code> methods.
* The <code>Informer</code> will then pass the information to the <code>Reporter</code> via an <a href="events/InfoProvided.html"><code>InfoProvided</code></a> event.
* Here's an example in which the <code>Informer</code> returned by <code>info</code> is used implicitly by the
* <code>Given</code>, <code>When</code>, and <code>Then</code> methods of trait <a href="GivenWhenThen.html"><code>GivenWhenThen</code></a>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.info
*
* import collection.mutable
* import org.scalatest._
*
* class SetSpec extends Spec with GivenWhenThen {
*
* object `A mutable Set` {
* def `should allow an element to be added` {
* Given("an empty mutable Set")
* val set = mutable.Set.empty[String]
*
* When("an element is added")
* set += "clarity"
*
* Then("the Set should have size 1")
* assert(set.size === 1)
*
* And("the Set should contain the added element")
* assert(set.contains("clarity"))
*
* info("That's all folks!")
* }
* }
* }
* </pre>
*
* If you run this <code>Spec</code> from the interpreter, you will see the following output:
*
* <pre class="stREPL">
* scala> new SetSpec execute
* <span class="stGreen">A mutable Set
* - should allow an element to be added
* + Given an empty mutable Set
* + When an element is added
* + Then the Set should have size 1
* + And the Set should contain the added element
* + That's all folks! </span>
* </pre>
*
* <a name="documenters"></a><h2>Documenters</h2></a>
*
* <p>
* <code>Spec</code> also provides a <code>markup</code> method that returns a <a href="Documenter.html"><code>Documenter</code></a>, which allows you to send
* to the <code>Reporter</code> text formatted in <a href="http://daringfireball.net/projects/markdown/" target="_blank">Markdown syntax</a>.
* You can pass the extra information to the <code>Documenter</code> via its <code>apply</code> method.
* The <code>Documenter</code> will then pass the information to the <code>Reporter</code> via an <a href="events/MarkupProvided.html"><code>MarkupProvided</code></a> event.
* </p>
*
* <p>
* Here's an example <code>Spec</code> that uses <code>markup</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.markup
*
* import collection.mutable
* import org.scalatest._
*
* class SetSpec extends Spec with GivenWhenThen {
*
* markup { """
*
* Mutable Set
* -----------
*
* A set is a collection that contains no duplicate elements.
*
* To implement a concrete mutable set, you need to provide implementations
* of the following methods:
*
* def contains(elem: A): Boolean
* def iterator: Iterator[A]
* def += (elem: A): this.type
* def -= (elem: A): this.type
*
* If you wish that methods like `take`,
* `drop`, `filter` return the same kind of set,
* you should also override:
*
* def empty: This
*
* It is also good idea to override methods `foreach` and
* `size` for efficiency.
*
* """ }
*
* object `A mutable Set` {
* def `should allow an element to be added` {
* Given("an empty mutable Set")
* val set = mutable.Set.empty[String]
*
* When("an element is added")
* set += "clarity"
*
* Then("the Set should have size 1")
* assert(set.size === 1)
*
* And("the Set should contain the added element")
* assert(set.contains("clarity"))
*
* markup("This test finished with a **bold** statement!")
* }
* }
* }
* </pre>
*
* <p>
* Although all of ScalaTest's built-in reporters will display the markup text in some form,
* the HTML reporter will format the markup information into HTML. Thus, the main purpose of <code>markup</code> is to
* add nicely formatted text to HTML reports. Here's what the above <code>SetSpec</code> would look like in the HTML reporter:
* </p>
*
* <img class="stScreenShot" src="../../lib/spec.gif">
*
* <a name="notifiersAlerters"></a><h2>Notifiers and alerters</h2></a>
*
* <p>
* ScalaTest records text passed to <code>info</code> and <code>markup</code> during tests, and sends the recorded text in the <code>recordedEvents</code> field of
* test completion events like <code>TestSucceeded</code> and <code>TestFailed</code>. This allows string reporters (like the standard out reporter) to show
* <code>info</code> and <code>markup</code> text <em>after</em> the test name in a color determined by the outcome of the test. For example, if the test fails, string
* reporters will show the <code>info</code> and <code>markup</code> text in red. If a test succeeds, string reporters will show the <code>info</code>
* and <code>markup</code> text in green. While this approach helps the readability of reports, it means that you can't use <code>info</code> to get status
* updates from long running tests.
* </p>
*
* <p>
* To get immediate (<em>i.e.</em>, non-recorded) notifications from tests, you can use <code>note</code> (a <a href="Notifier.html"><code>Notifier</code></a>) and <code>alert</code>
* (an <a href="Alerter.html"><code>Alerter</code></a>). Here's an example showing the differences:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.note
*
* import collection.mutable
* import org.scalatest._
*
* class SetSpec extends Spec {
*
* object `A mutable Set` {
* def `should allow an element to be added` {
*
* info("info is recorded")
* markup("markup is *also* recorded")
* note("notes are sent immediately")
* alert("alerts are also sent immediately")
*
* val set = mutable.Set.empty[String]
* set += "clarity"
* assert(set.size === 1)
* assert(set.contains("clarity"))
* }
* }
* }
* </pre>
*
* <p>
* Because <code>note</code> and <code>alert</code> information is sent immediately, it will appear <em>before</em> the test name in string reporters, and its color will
* be unrelated to the ultimate outcome of the test: <code>note</code> text will always appear in green, <code>alert</code> text will always appear in yellow.
* Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> new SetSpec execute
* <span class="stGreen">SetSpec:
* A mutable Set
* + notes are sent immediately</span>
* <span class="stYellow">+ alerts are also sent immediately</span>
* <span class="stGreen">- should allow an element to be added
* + info is recorded
* + markup is *also* recorded</span>
* </pre>
*
* <p>
* In summary, use <code>info</code> and <code>markup</code> for text that should form part of the specification output. Use
* <code>note</code> and <code>alert</code> to send status notifications. (Because the HTML reporter is intended to produce a
* readable, printable specification, <code>info</code> and <code>markup</code> text will appear in the HTML report, but
* <code>note</code> and <code>alert</code> text will not.)
* </p>
*
* <a name="pendingTests"></a><h2>Pending tests</h2></a>
*
* <p>
* A <em>pending test</em> is one that has been given a name but is not yet implemented. The purpose of
* pending tests is to facilitate a style of testing in which documentation of behavior is sketched
* out before tests are written to verify that behavior (and often, before the behavior of
* the system being tested is itself implemented). Such sketches form a kind of specification of
* what tests and functionality to implement later.
* </p>
*
* <p>
* To support this style of testing, a test can be given a name that specifies one
* bit of behavior required by the system being tested. The test can also include some code that
* sends more information about the behavior to the reporter when the tests run. At the end of the test,
* it can call method <code>pending</code>, which will cause it to complete abruptly with <a href="exceptions/TestPendingException.html"><code>TestPendingException</code></a>.
* </p>
*
* <p>
* Because tests in ScalaTest can be designated as pending with <code>TestPendingException</code>, both the test name and any information
* sent to the reporter when running the test can appear in the report of a test run.
* (The code of a pending test is executed just like any other test.) However, because the test completes abruptly
* with <code>TestPendingException</code>, the test will be reported as pending, to indicate
* the actual test, and possibly the functionality, has not yet been implemented.
* </p>
*
* <p>
* You can mark a test as pending in <code>Spec</code> by using "<code>{ pending }</code>" as the body of the test method,
* like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.pending
*
* import org.scalatest._
*
* class SetSpec extends Spec {
*
* object `A Set` {
* object `when empty` {
* def `should have size 0` { pending }
*
* def `should produce NoSuchElementException when head is invoked` {
* intercept[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* (Note: “<code>pending</code>” is the body of the test. Thus the test contains just one statement, an invocation
* of the <code>pending</code> method, which throws <code>TestPendingException</code>.)
* If you run this version of <code>SetSpec</code> with:
* </p>
*
* <pre class="stREPL">
* scala> new SetSpec execute
* </pre>
*
* <p>
* It will run both tests, but report that test "<code>should have size 0</code>" is pending. You'll see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">A Set</span>
* <span class="stGreen"> when empty</span>
* <span class="stYellow"> - should have size 0 (pending)</span>
* <span class="stGreen"> - should produce NoSuchElementException when head is invoked</span>
* </pre>
*
* <a name="taggingTests"></a><h2>Tagging tests</h2>
*
* <p>
* A <code>Spec</code>'s tests may be classified into groups by <em>tagging</em> them with string names. When executing
* a <code>Spec</code>, groups of tests can optionally be included and/or excluded. In this
* trait's implementation, tags are indicated by annotations attached to the test method. To
* create a new tag type to use in <code>Spec</code>s, simply define a new Java annotation that itself is annotated with
* the <code>org.scalatest.TagAnnotation</code> annotation.
* (Currently, for annotations to be
* visible in Scala programs via Java reflection, the annotations themselves must be written in Java.) For example,
* to create tags named <code>SlowTest</code> and <code>DbTest</code>, you would
* write in Java:
* </p>
*
* <pre>
* package org.scalatest.examples.spec.tagging;
* import java.lang.annotation.*;
* import org.scalatest.TagAnnotation;
*
* @TagAnnotation
* @Retention(RetentionPolicy.RUNTIME)
* @Target({ElementType.METHOD, ElementType.TYPE})
* public @interface SlowTest {}
*
* @TagAnnotation
* @Retention(RetentionPolicy.RUNTIME)
* @Target({ElementType.METHOD, ElementType.TYPE})
* public @interface DbTest {}
* </pre>
*
* <p>
* Given these annotations, you could tag <code>Spec</code> tests like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.tagging
*
* import org.scalatest.Spec
*
* class SetSpec extends Spec {
*
* object `A Set` {
* object `when empty` {
* @SlowTest
* def `should have size 0` {
* assert(Set.empty.size === 0)
* }
*
* @SlowTest @DbTest
* def `should produce NoSuchElementException when head is invoked` {
* intercept[NoSuchElementException] {
* Set.empty.head
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* The <code>run</code> method takes a <a href="Filter.html"><code>Filter</code></a>, whose constructor takes an optional
* <code>Set[String]</code> called <code>tagsToInclude</code> and a <code>Set[String]</code> called
* <code>tagsToExclude</code>. If <code>tagsToInclude</code> is <code>None</code>, all tests will be run
* except those those with tags listed in the
* <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is defined, only tests
* with tags mentioned in the <code>tagsToInclude</code> set, and not mentioned in <code>tagsToExclude</code>,
* will be run.
* </p>
*
* <p>
* A tag annotation also allows you to tag all the tests of a <code>Spec</code> in
* one stroke by annotating the class. For more information and examples, see the
* <a href="Tag.html">documentation for class <code>Tag</code></a>.
* </p>
*
* <a name="sharedFixtures"></a>
* <h2>Shared fixtures</h2>
*
* <p>
* A test <em>fixture</em> is composed of the objects and other artifacts (files, sockets, database
* connections, <em>etc.</em>) tests use to do their work.
* When multiple tests need to work with the same fixtures, it is important to try and avoid
* duplicating the fixture code across those tests. The more code duplication you have in your
* tests, the greater drag the tests will have on refactoring the actual production code.
* </p>
*
* <p>
* ScalaTest recommends three techniques to eliminate such code duplication:
* </p>
*
* <ul>
* <li>Refactor using Scala</li>
* <li>Override <code>withFixture</code></li>
* <li>Mix in a <em>before-and-after</em> trait</li>
* </ul>
*
* <p>Each technique is geared towards helping you reduce code duplication without introducing
* instance <code>var</code>s, shared mutable objects, or other dependencies between tests. Eliminating shared
* mutable state across tests will make your test code easier to reason about and more amenable for parallel
* test execution.</p><p>The following sections
* describe these techniques, including explaining the recommended usage
* for each. But first, here's a table summarizing the options:</p>
*
* <table style="border-collapse: collapse; border: 1px solid black">
*
* <tr>
* <td colspan="2" style="background-color: #CCCCCC; border-width: 1px; padding: 3px; padding-top: 7px; border: 1px solid black; text-align: left">
* <strong>Refactor using Scala when different tests need different fixtures.</strong>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#getFixtureMethods">get-fixture methods</a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* The <em>extract method</em> refactor helps you create a fresh instances of mutable fixture objects in each test
* that needs them, but doesn't help you clean them up when you're done.
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#fixtureContextObjects">fixture-context objects</a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* By placing fixture methods and fields into traits, you can easily give each test just the newly created
* fixtures it needs by mixing together traits. Use this technique when you need <em>different combinations
* of mutable fixture objects in different tests</em>, and don't need to clean up after.
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#loanFixtureMethods">loan-fixture methods</a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Factor out dupicate code with the <em>loan pattern</em> when different tests need different fixtures <em>that must be cleaned up afterwards</em>.
* </td>
* </tr>
*
* <tr>
* <td colspan="2" style="background-color: #CCCCCC; border-width: 1px; padding: 3px; padding-top: 7px; border: 1px solid black; text-align: left">
* <strong>Override <code>withFixture</code> when most or all tests need the same fixture.</strong>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#withFixtureNoArgTest">
* <code>withFixture(NoArgTest)</code></a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* <p>
* The recommended default approach when most or all tests need the same fixture treatment. This general technique
* allows you, for example, to perform side effects at the beginning and end of all or most tests,
* transform the outcome of tests, retry tests, make decisions based on test names, tags, or other test data.
* Use this technique unless:
* </p>
* <ul>
* <li>Different tests need different fixtures (refactor using Scala instead)</li>
* <li>An exception in fixture code should abort the suite, not fail the test (use a <em>before-and-after</em> trait instead)</li>
* <li>You have objects to pass into tests (override <code>withFixture(<em>One</em>ArgTest)</code> instead)</li>
* </ul>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#withFixtureOneArgTest">
* <code>withFixture(OneArgTest)</code>
* </a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Use when you want to pass the same fixture object or objects as a parameter into all or most tests.
* </td>
* </tr>
*
* <tr>
* <td colspan="2" style="background-color: #CCCCCC; border-width: 1px; padding: 3px; padding-top: 7px; border: 1px solid black; text-align: left">
* <strong>Mix in a before-and-after trait when you want an aborted suite, not a failed test, if the fixture code fails.</strong>
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#beforeAndAfter"><code>BeforeAndAfter</code></a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Use this boilerplate-buster when you need to perform the same side-effects before and/or after tests, rather than at the beginning or end of tests.
* </td>
* </tr>
*
* <tr>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: right">
* <a href="#composingFixtures"><code>BeforeAndAfterEach</code></a>
* </td>
* <td style="border-width: 1px; padding: 3px; border: 1px solid black; text-align: left">
* Use when you want to <em>stack traits</em> that perform the same side-effects before and/or after tests, rather than at the beginning or end of tests.
* </td>
* </tr>
*
* </table>
*
* <a name="getFixtureMethods"></a>
* <h4>Calling get-fixture methods</h4>
*
* <p>
* If you need to create the same mutable fixture objects in multiple tests, and don't need to clean them up after using them, the simplest approach is to write one or
* more <em>get-fixture</em> methods. A get-fixture method returns a new instance of a needed fixture object (or a holder object containing
* multiple fixture objects) each time it is called. You can call a get-fixture method at the beginning of each
* test that needs the fixture, storing the returned object or objects in local variables. Here's an example:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.getfixture
*
* import org.scalatest.Spec
* import collection.mutable.ListBuffer
*
* class ExampleSpec extends Spec {
*
* def fixture =
* new {
* val builder = new StringBuilder("ScalaTest is ")
* val buffer = new ListBuffer[String]
* }
*
* object `Testing ` {
* def `should be easy` {
* val f = fixture
* f.builder.append("easy!")
* assert(f.builder.toString === "ScalaTest is easy!")
* assert(f.buffer.isEmpty)
* f.buffer += "sweet"
* }
*
* def `should be fun` {
* val f = fixture
* f.builder.append("fun!")
* assert(f.builder.toString === "ScalaTest is fun!")
* assert(f.buffer.isEmpty)
* }
* }
* }
* </pre>
*
* <p>
* The “<code>f.</code>” in front of each use of a fixture object provides a visual indication of which objects
* are part of the fixture, but if you prefer, you can import the the members with “<code>import f._</code>” and use the names directly.
* </p>
*
* <p>
* If you need to configure fixture objects differently in different tests, you can pass configuration into the get-fixture method. For example, you could pass
* in an initial value for a mutable fixture object as a parameter to the get-fixture method.
* </p>
*
* <a name="fixtureContextObjects"></a>
* <h4>Instantiating fixture-context objects </h4>
*
* <p>
* An alternate technique that is especially useful when different tests need different combinations of fixture objects is to define the fixture objects as instance variables
* of <em>fixture-context objects</em> whose instantiation forms the body of tests. Like get-fixture methods, fixture-context objects are only
* appropriate if you don't need to clean up the fixtures after using them.
* </p>
*
* To use this technique, you define instance variables intialized with fixture objects in traits and/or classes, then in each test instantiate an object that
* contains just the fixture objects needed by the test. Traits allow you to mix together just the fixture objects needed by each test, whereas classes
* allow you to pass data in via a constructor to configure the fixture objects. Here's an example in which fixture objects are partitioned into two traits
* and each test just mixes together the traits it needs:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.fixturecontext
*
* import collection.mutable.ListBuffer
* import org.scalatest.Spec
*
* class ExampleSpec extends Spec {
*
* trait Builder {
* val builder = new StringBuilder("ScalaTest is ")
* }
*
* trait Buffer {
* val buffer = ListBuffer("ScalaTest", "is")
* }
*
* object `Testing ` {
* // This test needs the StringBuilder fixture
* def `should be productive` {
* new Builder {
* builder.append("productive!")
* assert(builder.toString === "ScalaTest is productive!")
* }
* }
* }
*
* object `Test code` {
* // This test needs the ListBuffer[String] fixture
* def `should be readable` {
* new Buffer {
* buffer += ("readable!")
* assert(buffer === List("ScalaTest", "is", "readable!"))
* }
* }
*
* // This test needs both the StringBuilder and ListBuffer
* def `should be clear and concise` {
* new Builder with Buffer {
* builder.append("clear!")
* buffer += ("concise!")
* assert(builder.toString === "ScalaTest is clear!")
* assert(buffer === List("ScalaTest", "is", "concise!"))
* }
* }
* }
* }
* </pre>
*
* <a name="withFixtureNoArgTest"></a>
* <h4>Overriding <code>withFixture(NoArgTest)</code></h4>
*
* <p>
* Although the get-fixture method and fixture-context object approaches take care of setting up a fixture at the beginning of each
* test, they don't address the problem of cleaning up a fixture at the end of the test. If you just need to perform a side-effect at the beginning or end of
* a test, and don't need to actually pass any fixture objects into the test, you can override <code>withFixture(NoArgTest)</code>, one of ScalaTest's
* lifecycle methods defined in trait <a href="Suite.html#lifecycle-methods"><code>Suite</code></a>.
* </p>
*
* <p>
* Trait <code>Suite</code>'s implementation of <code>runTest</code> passes a no-arg test function to <code>withFixture(NoArgTest)</code>. It is <code>withFixture</code>'s
* responsibility to invoke that test function. <code>Suite</code>'s implementation of <code>withFixture</code> simply
* invokes the function, like this:
* </p>
*
* <pre class="stHighlight">
* // Default implementation in trait Suite
* protected def withFixture(test: NoArgTest) = {
* test()
* }
* </pre>
*
* <p>
* You can, therefore, override <code>withFixture</code> to perform setup before and/or cleanup after invoking the test function. If
* you have cleanup to perform, you should invoke the test function inside a <code>try</code> block and perform the cleanup in
* a <code>finally</code> clause, in case an exception propagates back through <code>withFixture</code>. (If a test fails because of an exception,
* the test function invoked by withFixture will result in a [[org.scalatest.Failed <code>Failed</code>]] wrapping the exception. Nevertheless,
* best practice is to perform cleanup in a finally clause just in case an exception occurs.)
* </p>
*
* <p>
* The <code>withFixture</code> method is designed to be stacked, and to enable this, you should always call the <code>super</code> implementation
* of <code>withFixture</code>, and let it invoke the test function rather than invoking the test function directly. In other words, instead of writing
* “<code>test()</code>”, you should write “<code>super.withFixture(test)</code>”, like this:
* </p>
*
* <pre class="stHighlight">
* // Your implementation
* override def withFixture(test: NoArgTest) = {
* // Perform setup
* try super.withFixture(test) // Invoke the test function
* finally {
* // Perform cleanup
* }
* }
* </pre>
*
* <p>
* Here's an example in which <code>withFixture(NoArgTest)</code> is used to take a snapshot of the working directory if a test fails, and
* and send that information to the reporter:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.noargtest
*
* import java.io.File
* import org.scalatest._
*
* class ExampleSpec extends Spec {
*
* override def withFixture(test: NoArgTest) = {
*
* super.withFixture(test) match {
* case failed: Failed =>
* val currDir = new File(".")
* val fileNames = currDir.list()
* info("Dir snapshot: " + fileNames.mkString(", "))
* failed
* case other => other
* }
* }
*
* object `This test` {
* def `should succeed` {
* assert(1 + 1 === 2)
* }
*
* def `should fail` {
* assert(1 + 1 === 3)
* }
* }
* }
* </pre>
*
* <p>
* Running this version of <code>ExampleSuite</code> in the interpreter in a directory with two files, <code>hello.txt</code> and <code>world.txt</code>
* would give the following output:
* </p>
*
* <pre class="stREPL">
* scala> new ExampleSuite execute
* <span class="stGreen">ExampleSuite:
* This test
* <span class="stRed">- should fail *** FAILED ***
* 2 did not equal 3 (<console>:33)
* + Dir snapshot: hello.txt, world.txt </span>
* - should succeed
* </pre>
*
* <p>
* Note that the <a href="Suite$NoArgTest.html"><code>NoArgTest</code></a> passed to <code>withFixture</code>, in addition to
* an <code>apply</code> method that executes the test, also includes the test name and the <a href="ConfigMap.html">config
* map</a> passed to <code>runTest</code>. Thus you can also use the test name and configuration objects in your <code>withFixture</code>
* implementation.
* </p>
*
* <a name="loanFixtureMethods"></a>
* <h4>Calling loan-fixture methods</h4>
*
* <p>
* If you need to both pass a fixture object into a test <em>and</em> perform cleanup at the end of the test, you'll need to use the <em>loan pattern</em>.
* If different tests need different fixtures that require cleanup, you can implement the loan pattern directly by writing <em>loan-fixture</em> methods.
* A loan-fixture method takes a function whose body forms part or all of a test's code. It creates a fixture, passes it to the test code by invoking the
* function, then cleans up the fixture after the function returns.
* </p>
*
* <p>
* The following example shows three tests that use two fixtures, a database and a file. Both require cleanup after, so each is provided via a
* loan-fixture method. (In this example, the database is simulated with a <code>StringBuffer</code>.)
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.loanfixture
*
* import java.util.concurrent.ConcurrentHashMap
*
* object DbServer { // Simulating a database server
* type Db = StringBuffer
* private val databases = new ConcurrentHashMap[String, Db]
* def createDb(name: String): Db = {
* val db = new StringBuffer
* databases.put(name, db)
* db
* }
* def removeDb(name: String) {
* databases.remove(name)
* }
* }
*
* import org.scalatest.Spec
* import DbServer._
* import java.util.UUID.randomUUID
* import java.io._
*
* class ExampleSpec extends Spec {
*
* def withDatabase(testCode: Db => Any) {
* val dbName = randomUUID.toString
* val db = createDb(dbName) // create the fixture
* try {
* db.append("ScalaTest is ") // perform setup
* testCode(db) // "loan" the fixture to the test
* }
* finally removeDb(dbName) // clean up the fixture
* }
*
* def withFile(testCode: (File, FileWriter) => Any) {
* val file = File.createTempFile("hello", "world") // create the fixture
* val writer = new FileWriter(file)
* try {
* writer.write("ScalaTest is ") // set up the fixture
* testCode(file, writer) // "loan" the fixture to the test
* }
* finally writer.close() // clean up the fixture
* }
*
* object `Testing ` {
* // This test needs the file fixture
* def `should be productive` {
* withFile { (file, writer) =>
* writer.write("productive!")
* writer.flush()
* assert(file.length === 24)
* }
* }
* }
*
* object `Test code` {
* // This test needs the database fixture
* def `should be readable` {
* withDatabase { db =>
* db.append("readable!")
* assert(db.toString === "ScalaTest is readable!")
* }
* }
*
* // This test needs both the file and the database
* def `should be clear and concise` {
* withDatabase { db =>
* withFile { (file, writer) => // loan-fixture methods compose
* db.append("clear!")
* writer.write("concise!")
* writer.flush()
* assert(db.toString === "ScalaTest is clear!")
* assert(file.length === 21)
* }
* }
* }
* }
* }
* </pre>
*
* <p>
* As demonstrated by the last test, loan-fixture methods compose. Not only do loan-fixture methods allow you to
* give each test the fixture it needs, they allow you to give a test multiple fixtures and clean everything up afterwards.
* </p>
*
* <p>
* Also demonstrated in this example is the technique of giving each test its own "fixture sandbox" to play in. When your fixtures
* involve external side-effects, like creating files or databases, it is a good idea to give each file or database a unique name as is
* done in this example. This keeps tests completely isolated, allowing you to run them in parallel if desired.
* </p>
*
* </pre>
* <a name="withFixtureOneArgTest"></a>
* <h4>Overriding <code>withFixture(OneArgTest)</code></h4>
*
* <p>
* If all or most tests need the same fixture, you can avoid some of the boilerplate of the loan-fixture method approach by using a <code>fixture.Spec</code>
* and overriding <code>withFixture(OneArgTest)</code>.
* Each test in a <code>fixture.Spec</code> takes a fixture as a parameter, allowing you to pass the fixture into
* the test. You must indicate the type of the fixture parameter by specifying <code>FixtureParam</code>, and implement a
* <code>withFixture</code> method that takes a <code>OneArgTest</code>. This <code>withFixture</code> method is responsible for
* invoking the one-arg test function, so you can perform fixture set up before, and clean up after, invoking and passing
* the fixture into the test function.
* </p>
*
* <p>
* To enable the stacking of traits that define <code>withFixture(NoArgTest)</code>, it is a good idea to let
* <code>withFixture(NoArgTest)</code> invoke the test function instead of invoking the test
* function directly. To do so, you'll need to convert the <code>OneArgTest</code> to a <code>NoArgTest</code>. You can do that by passing
* the fixture object to the <code>toNoArgTest</code> method of <code>OneArgTest</code>. In other words, instead of
* writing “<code>test(theFixture)</code>”, you'd delegate responsibility for
* invoking the test function to the <code>withFixture(NoArgTest)</code> method of the same instance by writing:
* </p>
*
* <pre>
* withFixture(test.toNoArgTest(theFixture))
* </pre>
*
* <p>
* Here's a complete example:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.oneargtest
*
* import org.scalatest.fixture
* import java.io._
*
* class ExampleSpec extends fixture.Spec {
*
* case class FixtureParam(file: File, writer: FileWriter)
*
* def withFixture(test: OneArgTest) = {
*
* // create the fixture
* val file = File.createTempFile("hello", "world")
* val writer = new FileWriter(file)
* val theFixture = FixtureParam(file, writer)
*
* try {
* writer.write("ScalaTest is ") // set up the fixture
* withFixture(test.toNoArgTest(theFixture)) // "loan" the fixture to the test
* }
* finally writer.close() // clean up the fixture
* }
*
* object `Testing ` {
* def `should be easy` { f: FixtureParam =>
* f.writer.write("easy!")
* f.writer.flush()
* assert(f.file.length === 18)
* }
*
* def `should be fun` { f: FixtureParam =>
* f.writer.write("fun!")
* f.writer.flush()
* assert(f.file.length === 17)
* }
* }
* }
* </pre>
*
* <p>
* In this example, the tests actually required two fixture objects, a <code>File</code> and a <code>FileWriter</code>. In such situations you can
* simply define the <code>FixtureParam</code> type to be a tuple containing the objects, or as is done in this example, a case class containing
* the objects. For more information on the <code>withFixture(OneArgTest)</code> technique, see the <a href="fixture/Spec.html">documentation for <code>fixture.Spec</code></a>.
* </p>
*
* <a name="beforeAndAfter"></a>
* <h4>Mixing in <code>BeforeAndAfter</code></h4>
*
* <p>
* In all the shared fixture examples shown so far, the activities of creating, setting up, and cleaning up the fixture objects have been
* performed <em>during</em> the test. This means that if an exception occurs during any of these activities, it will be reported as a test failure.
* Sometimes, however, you may want setup to happen <em>before</em> the test starts, and cleanup <em>after</em> the test has completed, so that if an
* exception occurs during setup or cleanup, the entire suite aborts and no more tests are attempted. The simplest way to accomplish this in ScalaTest is
* to mix in trait <a href="BeforeAndAfter.html"><code>BeforeAndAfter</code></a>. With this trait you can denote a bit of code to run before each test
* with <code>before</code> and/or after each test each test with <code>after</code>, like this:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.beforeandafter
*
* import org.scalatest.Spec
* import org.scalatest.BeforeAndAfter
* import collection.mutable.ListBuffer
*
* class ExampleSpec extends Spec with BeforeAndAfter {
*
* val builder = new StringBuilder
* val buffer = new ListBuffer[String]
*
* before {
* builder.append("ScalaTest is ")
* }
*
* after {
* builder.clear()
* buffer.clear()
* }
*
* object `Testing ` {
* def `should be easy` {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def `should be fun` {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* }
* }
* }
* </pre>
*
* <p>
* Note that the only way <code>before</code> and <code>after</code> code can communicate with test code is via some side-effecting mechanism, commonly by
* reassigning instance <code>var</code>s or by changing the state of mutable objects held from instance <code>val</code>s (as in this example). If using
* instance <code>var</code>s or mutable objects held from instance <code>val</code>s you wouldn't be able to run tests in parallel in the same instance
* of the test class unless you synchronized access to the shared, mutable state. This is why ScalaTest's <code>ParallelTestExecution</code> trait extends
* <a href="OneInstancePerTest.html"><code>OneInstancePerTest</code></a>. By running each test in its own instance of the class, each test has its own copy of the instance variables, so you
* don't need to synchronize. If you mixed <code>ParallelTestExecution</code> into the <code>ExampleSuite</code> above, the tests would run in parallel just fine
* without any synchronization needed on the mutable <code>StringBuilder</code> and <code>ListBuffer[String]</code> objects.
* </p>
*
* <p>
* Although <code>BeforeAndAfter</code> provides a minimal-boilerplate way to execute code before and after tests, it isn't designed to enable stackable
* traits, because the order of execution would be non-obvious. If you want to factor out before and after code that is common to multiple test suites, you
* should use trait <code>BeforeAndAfterEach</code> instead, as shown later in the next section,
* <a href="#composingFixtures.html">composing fixtures by stacking traits</a>.
* </p>
*
* <a name="composingFixtures"></a><h2>Composing fixtures by stacking traits</h2>
*
* <p>
* In larger projects, teams often end up with several different fixtures that test classes need in different combinations,
* and possibly initialized (and cleaned up) in different orders. A good way to accomplish this in ScalaTest is to factor the individual
* fixtures into traits that can be composed using the <em>stackable trait</em> pattern. This can be done, for example, by placing
* <code>withFixture</code> methods in several traits, each of which call <code>super.withFixture</code>. Here's an example in
* which the <code>StringBuilder</code> and <code>ListBuffer[String]</code> fixtures used in the previous examples have been
* factored out into two <em>stackable fixture traits</em> named <code>Builder</code> and <code>Buffer</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.composingwithfixture
*
* import org.scalatest._
* import collection.mutable.ListBuffer
*
* trait Builder extends SuiteMixin { this: Suite =>
*
* val builder = new StringBuilder
*
* abstract override def withFixture(test: NoArgTest) = {
* builder.append("ScalaTest is ")
* try super.withFixture(test) // To be stackable, must call super.withFixture
* finally builder.clear()
* }
* }
*
* trait Buffer extends SuiteMixin { this: Suite =>
*
* val buffer = new ListBuffer[String]
*
* abstract override def withFixture(test: NoArgTest) = {
* try super.withFixture(test) // To be stackable, must call super.withFixture
* finally buffer.clear()
* }
* }
*
* class ExampleSpec extends Spec with Builder with Buffer {
*
* object `Testing ` {
* def `should be easy` {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def `should be fun` {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* buffer += "clear"
* }
* }
* }
* </pre>
*
* <p>
* By mixing in both the <code>Builder</code> and <code>Buffer</code> traits, <code>ExampleSpec</code> gets both fixtures, which will be
* initialized before each test and cleaned up after. The order the traits are mixed together determines the order of execution.
* In this case, <code>Builder</code> is “super” to <code>Buffer</code>. If you wanted <code>Buffer</code> to be “super”
* to <code>Builder</code>, you need only switch the order you mix them together, like this:
* </p>
*
* <pre class="stHighlight">
* class Example2Spec extends Spec with Buffer with Builder
* </pre>
*
* <p>
* And if you only need one fixture you mix in only that trait:
* </p>
*
* <pre class="stHighlight">
* class Example3Spec extends Spec with Builder
* </pre>
*
* <p>
* Another way to create stackable fixture traits is by extending the <a href="BeforeAndAfterEach.html"><code>BeforeAndAfterEach</code></a>
* and/or <a href="BeforeAndAfterAll.html"><code>BeforeAndAfterAll</code></a> traits.
* <code>BeforeAndAfterEach</code> has a <code>beforeEach</code> method that will be run before each test (like JUnit's <code>setUp</code>),
* and an <code>afterEach</code> method that will be run after (like JUnit's <code>tearDown</code>).
* Similarly, <code>BeforeAndAfterAll</code> has a <code>beforeAll</code> method that will be run before all tests,
* and an <code>afterAll</code> method that will be run after all tests. Here's what the previously shown example would look like if it
* were rewritten to use the <code>BeforeAndAfterEach</code> methods instead of <code>withFixture</code>:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.spec.composingbeforeandaftereach
*
* import org.scalatest._
* import org.scalatest.BeforeAndAfterEach
* import collection.mutable.ListBuffer
*
* trait Builder extends BeforeAndAfterEach { this: Suite =>
*
* val builder = new StringBuilder
*
* override def beforeEach() {
* builder.append("ScalaTest is ")
* super.beforeEach() // To be stackable, must call super.beforeEach
* }
*
* override def afterEach() {
* try super.afterEach() // To be stackable, must call super.afterEach
* finally builder.clear()
* }
* }
*
* trait Buffer extends BeforeAndAfterEach { this: Suite =>
*
* val buffer = new ListBuffer[String]
*
* override def afterEach() {
* try super.afterEach() // To be stackable, must call super.afterEach
* finally buffer.clear()
* }
* }
*
* class ExampleSpec extends Spec with Builder with Buffer {
*
* object `Testing ` {
* def `should be easy` {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def `should be fun` {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* buffer += "clear"
* }
* }
* }
* </pre>
*
* <p>
* To get the same ordering as <code>withFixture</code>, place your <code>super.beforeEach</code> call at the end of each
* <code>beforeEach</code> method, and the <code>super.afterEach</code> call at the beginning of each <code>afterEach</code>
* method, as shown in the previous example. It is a good idea to invoke <code>super.afterEach</code> in a <code>try</code>
* block and perform cleanup in a <code>finally</code> clause, as shown in the previous example, because this ensures the
* cleanup code is performed even if <code>super.afterEach</code> throws an exception.
* </p>
*
* <p>
* The difference between stacking traits that extend <code>BeforeAndAfterEach</code> versus traits that implement <code>withFixture</code> is
* that setup and cleanup code happens before and after the test in <code>BeforeAndAfterEach</code>, but at the beginning and
* end of the test in <code>withFixture</code>. Thus if a <code>withFixture</code> method completes abruptly with an exception, it is
* considered a failed test. By contrast, if any of the <code>beforeEach</code> or <code>afterEach</code> methods of <code>BeforeAndAfterEach</code>
* complete abruptly, it is considered an aborted suite, which will result in a <a href="events/SuiteAborted.html"><code>SuiteAborted</code></a> event.
* </p>
*
* <a name="sharedTests"></a><h2>Shared tests</h2>
*
* <p>
* Because <code>Spec</code> represents tests as methods, you cannot share or otherwise dynamically generate tests. Instead, use static code generation
* if you want to generate tests in a <code>Spec</code>. In other words, write a program that statically generates the entire source file of
* a <code>Spec</code> subclass.
* </p>
*
* @author Bill Venners
*/
@Finders(Array("org.scalatest.finders.SpecFinder"))
class Spec extends SpecLike {
/**
* Returns a user friendly string for this suite, composed of the
* simple name of the class (possibly simplified further by removing dollar signs if added by the Scala interpeter) and, if this suite
* contains nested suites, the result of invoking <code>toString</code> on each
* of the nested suites, separated by commas and surrounded by parentheses.
*
* @return a user-friendly string for this suite
*/
override def toString: String = Suite.suiteToString(None, this)
}
private[scalatest] object Spec {
def isTestMethod(m: Method): Boolean = {
val isInstanceMethod = !Modifier.isStatic(m.getModifiers())
val hasNoParams = m.getParameterTypes.isEmpty
// name must have at least one encoded space: "$u0220"
val includesEncodedSpace = m.getName.indexOf("$u0020") >= 0
val isOuterMethod = m.getName.endsWith("$$outer")
val isNestedMethod = m.getName.matches(".+\\\\$\\\\$.+\\\\$[1-9]+")
//val isOuterMethod = m.getName.endsWith("$$$outer")
// def maybe(b: Boolean) = if (b) "" else "!"
// println("m.getName: " + m.getName + ": " + maybe(isInstanceMethod) + "isInstanceMethod, " + maybe(hasNoParams) + "hasNoParams, " + maybe(includesEncodedSpace) + "includesEncodedSpace")
isInstanceMethod && hasNoParams && includesEncodedSpace && !isOuterMethod && !isNestedMethod
}
import java.security.MessageDigest
import scala.io.Codec
// The following compactify code is written based on scala compiler source code at:-
// https://github.com/scala/scala/blob/master/src/reflect/scala/reflect/internal/StdNames.scala#L47
private val compactifiedMarker = "$$$$"
def equalIfRequiredCompactify(value: String, compactified: String): Boolean = {
if (compactified.matches(".+\\\\$\\\\$\\\\$\\\\$.+\\\\$\\\\$\\\\$\\\\$.+")) {
val firstDolarIdx = compactified.indexOf("$$$$")
val lastDolarIdx = compactified.lastIndexOf("$$$$")
val prefix = compactified.substring(0, firstDolarIdx)
val suffix = compactified.substring(lastDolarIdx + 4)
val lastIndexOfDot = value.lastIndexOf(".")
val toHash =
if (lastIndexOfDot >= 0)
value.substring(0, value.length - 1).substring(value.lastIndexOf(".") + 1)
else
value
val bytes = Codec.toUTF8(toHash.toArray)
val md5 = MessageDigest.getInstance("MD5")
md5.update(bytes)
val md5chars = (md5.digest() map (b => (b & 0xFF).toHexString)).mkString
(prefix + compactifiedMarker + md5chars + compactifiedMarker + suffix) == compactified
}
else
value == compactified
}
}
|
travisbrown/scalatest
|
src/main/scala/org/scalatest/Spec.scala
|
Scala
|
apache-2.0
| 60,525 |
/*
*
* Copyright 2015 Gaëtan La Marca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
package controllers.permission
case class SearchPermissionForm(refName : Option[String], name : Option[String], applicationId : Option[Int])
|
glamarca/cuam
|
app/controllers/permission/PermissionForms.scala
|
Scala
|
apache-2.0
| 752 |
/*
* Copyright 2017 Workday, Inc.
*
* This software is available under the MIT license.
* Please see the LICENSE.txt file in this project.
*/
package com.workday.esclient
/**
* Utility object for [[com.workday.esclient]] traits.
*/
object EsNames {
val FilterPath = "filter_path"
// Values for ES to return in its result set; we dont include hits.hits because it includes all values in the hits array
// As a side effect though, if no hits are returned, then hits.hits doesn't exist instead of being an empty array
private[this] val WhitelistedParams = Seq(
"_scroll_id", "took", "timed_out",
"hits.total", "hits.max_score",
"hits.hits._id", "hits.hits._score", "hits.hits.matched_queries",
"aggregations"
)
val FilterPathParams = WhitelistedParams.mkString(",")
val standardAnalyzerName = "standard"
// Number of documents to return on each page of scan and scroll searches
val EsScanAndScrollSize = 100
// Amount of time to keep the scroll snapshot (how long we can scan through it, https://www.elastic.co/guide/en/elasticsearch/guide/current/scan-scroll.html)
val EsScanAndScrollTime = "1m"
val NAME_QUERY_DELIMITER = "##"
val DOCS_SUFFIX = "docs"
val NAME_DELIMITER = "@"
// don't store strings greater than the length below for the raw field in the "other" dynamic mapping
val OtherMappingRawFieldIgnoreAboveLength = 1000
}
|
Workday/escalar
|
src/main/scala/com/workday/esclient/EsNames.scala
|
Scala
|
mit
| 1,388 |
import sbt._
import Keys._
import Helpers._
object Selenate extends Build {
lazy val root = top(
Seq(
Core.root
)
)
}
|
mlegac/selenate
|
code/scala/project/Selenate.scala
|
Scala
|
bsd-3-clause
| 140 |
package com.ajjpj.cassdriver.util
import java.nio.ByteBuffer
import com.ajjpj.cassdriver.AbstractCassDriverTest
class ParsableByteBuffersTest extends AbstractCassDriverTest {
private def parsableBuffer(bytes: Int*) = {
ParsableByteBuffers(Seq(buffer(bytes:_*)))
}
private def buffer(bytes: Int*) = {
val buffer = ByteBuffer.allocate(bytes.size)
for (b <- bytes) buffer.put(b.asInstanceOf[Byte])
buffer.flip()
buffer
}
"A Byte" should "be parsed" in {
parsableBuffer(0).readByte() should be (0)
parsableBuffer(1).readByte() should be (1)
parsableBuffer(128).readByte() should be (128)
parsableBuffer(255).readByte() should be (255)
val buf = parsableBuffer(10, 99)
buf.readByte() should be (10)
buf.readByte() should be (99)
}
"An unsigned Short" should "be parsed" in {
parsableBuffer(0,1).readUnsignedShort() should be (1)
parsableBuffer(0,2).readUnsignedShort() should be (2)
parsableBuffer(1,0).readUnsignedShort() should be (256)
parsableBuffer(2,0).readUnsignedShort() should be (512)
parsableBuffer(255,255).readUnsignedShort() should be (65535)
}
it should "be parsed if it spans two ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(1), buffer(2))).readUnsignedShort() should be (258)
}
"An Int" should "be parsed" in {
parsableBuffer(0, 0, 0, 1).readInt() should be (1)
parsableBuffer(1, 2, 3, 4).readInt() should be (0x01020304)
}
it should "be parsed as a signed int" in {
parsableBuffer(128, 0, 0, 0).readInt() should be (Integer.MIN_VALUE)
parsableBuffer(255,255,255,255).readInt() should be (-1)
}
it should "be parsed if it spans two ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(), buffer(1, 2, 3, 4))).readInt() should be (0x01020304)
ParsableByteBuffers(Seq(buffer(1), buffer(2, 3, 4))).readInt() should be (0x01020304)
ParsableByteBuffers(Seq(buffer(1, 2), buffer(3, 4))).readInt() should be (0x01020304)
ParsableByteBuffers(Seq(buffer(1, 2, 3), buffer(4))).readInt() should be (0x01020304)
ParsableByteBuffers(Seq(buffer(255, 255), buffer(255, 255))).readInt() should be (-1)
}
it should "be parsed if it spans three ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(1), buffer(2), buffer(3, 4))).readInt() should be (0x01020304)
ParsableByteBuffers(Seq(buffer(1), buffer(2, 3), buffer(4))).readInt() should be (0x01020304)
}
"A Long" should "be parsed" in {
parsableBuffer(0, 0, 0, 0, 0, 0, 0, 1).readLong() should be (1L)
parsableBuffer(1, 2, 3, 4, 5, 6, 7, 8).readLong() should be (0x0102030405060708L)
}
it should "be parsed as a signed long" in {
parsableBuffer(128, 0, 0, 0, 0, 0, 0, 0).readLong() should be (Long.MinValue)
parsableBuffer(255,255,255,255,255,255,255,255).readLong() should be (-1L)
}
it should "be parsed if it spans two ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(1, 2, 3), buffer(4, 5, 6, 7, 8))).readLong() should be (0x0102030405060708L)
ParsableByteBuffers(Seq(buffer(1, 2, 3, 4, 5, 6), buffer(7, 8))).readLong() should be (0x0102030405060708L)
ParsableByteBuffers(Seq(buffer(255,255,255), buffer(255,255,255,255,255))).readLong() should be (-1L)
}
it should "be parsed if it spans three ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(1, 2, 3), buffer(4), buffer(5, 6, 7, 8))).readLong() should be (0x0102030405060708L)
}
"A string" should "be parsed" in {
parsableBuffer(0, 1, 'A').readString() should be ("A")
parsableBuffer(0, 3, 'A', 'b', 'C').readString() should be ("AbC")
parsableBuffer(0, 2, 'A', 'b', 'C').readString() should be ("Ab")
}
it should "be parsed respecting its length" in {
val buf = parsableBuffer(0, 1, 'A', 0, 1, 'B')
buf.readString() should be ("A")
buf.readString() should be ("B")
}
it should "be parsed if it spans two ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(0, 4), buffer('a', 'b', 'c', 'd'))).readString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 4, 'a'), buffer('b', 'c', 'd'))).readString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 4, 'a', 'b'), buffer('c', 'd'))).readString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 4, 'a', 'b', 'c'), buffer('d'))).readString() should be ("abcd")
}
it should "be parsed if it spans three ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(0, 4), buffer('a', 'b'), buffer('c', 'd'))).readString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 4, 'a'), buffer('b'), buffer('c', 'd'))).readString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 4, 'a', 'b'), buffer('c'), buffer('d'))).readString() should be ("abcd")
}
it should "be parsed as UTF-8" in {
parsableBuffer(0, 6, 195, 164, 195, 182, 195, 188).readString() should be ("äöü")
}
"A LongString" should "be parsed" in {
parsableBuffer(0, 0, 0, 1, 'A').readLongString() should be ("A")
parsableBuffer(0, 0, 0, 3, 'A', 'b', 'C').readLongString() should be ("AbC")
parsableBuffer(0, 0, 0, 2, 'A', 'b', 'C').readLongString() should be ("Ab")
}
it should "be parsed respecting its length" in {
val buf = parsableBuffer(0, 0, 0, 1, 'A', 0, 0, 0, 1, 'B')
buf.readLongString() should be ("A")
buf.readLongString() should be ("B")
}
it should "be parsed if it spans two ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(0, 0, 0, 4), buffer('a', 'b', 'c', 'd'))).readLongString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 0, 0, 4, 'a'), buffer('b', 'c', 'd'))).readLongString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 0, 0, 4, 'a', 'b'), buffer('c', 'd'))).readLongString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 0, 0, 4, 'a', 'b', 'c'), buffer('d'))).readLongString() should be ("abcd")
}
it should "be parsed if it spans three ByteBuffers" in {
ParsableByteBuffers(Seq(buffer(0, 0, 0, 4), buffer('a', 'b'), buffer('c', 'd'))).readLongString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 0, 0, 4, 'a'), buffer('b'), buffer('c', 'd'))).readLongString() should be ("abcd")
ParsableByteBuffers(Seq(buffer(0, 0, 0, 4, 'a', 'b'), buffer('c'), buffer('d'))).readLongString() should be ("abcd")
}
it should "be parsed as UTF-8" in {
parsableBuffer(0, 0, 0, 6, 195, 164, 195, 182, 195, 188).readLongString() should be ("äöü")
}
it should "be parsed if it is longer than 32767 bytes" in {
val byteArray = Array.ofDim[Byte](0x20000)
for (i <- 0 until 0x20000) byteArray(i) = 'A'
val str = ParsableByteBuffers(Seq(buffer(0, 2, 0, 0), ByteBuffer.wrap(byteArray))).readLongString()
str.length should be (0x20000)
str.replace("A", "") shouldBe empty
}
"CassBytes" should "be parsed" in {
parsableBuffer(0, 0, 0, 0).readBytes().b shouldBe empty
val b = parsableBuffer(0, 0, 0, 2, 99, 15).readBytes().b
b.length should be (2)
b(0) should be (99)
b(1) should be (15)
}
it should "be parsed if it spans two ByteBuffers" in {
val b = ParsableByteBuffers(Seq(buffer(0, 0, 0, 2, 99), buffer(15))).readBytes().b
b.length should be (2)
b(0) should be (99)
b(1) should be (15)
}
it should "be parsed if it spans three ByteBuffers" in {
val b = ParsableByteBuffers(Seq(buffer(0, 0, 0, 3, 99), buffer(15), buffer(123))).readBytes().b
b.length should be (3)
b(0) should be (99)
b(1) should be (15)
b(2) should be (123)
}
it should "be parsed if it is a big array" in {
val byteArray = Array.fill[Byte](0x20000)(25)
val b = ParsableByteBuffers(Seq(buffer(0, 2, 0, 0), ByteBuffer.wrap(byteArray))).readBytes().b
b.length should be (0x20000)
b.forall(_ == 25) should be (true)
}
it should "be parsed as NULL for length < 0" in {
parsableBuffer(255, 255, 255, 255).readBytes() shouldBe null.asInstanceOf[CassBytes]
parsableBuffer(128, 0, 0, 0).readBytes() shouldBe null.asInstanceOf[CassBytes]
parsableBuffer(200, 10, 99, 15).readBytes() shouldBe null.asInstanceOf[CassBytes]
}
"mark()" should "create a snapshot that can be restored by reset()" in {
val buf = parsableBuffer(99, 0, 0, 0, 1, 0, 0, 0, 2)
buf.readByte() should be (99)
buf.mark()
buf.readInt() should be (1)
buf.readInt() should be (2)
buf.reset()
buf.readInt() should be (1)
buf.readInt() should be (2)
buf.reset()
buf.readInt() should be (1)
buf.readInt() should be (2)
}
it should "work even if processing progressed to a different ByteBuffer" in {
val buf = ParsableByteBuffers(Seq(buffer(99, 0), buffer(0, 0), buffer(1, 0), buffer(0, 0, 2, 123)))
buf.readByte() should be (99)
buf.mark()
buf.readInt() should be (1)
buf.readInt() should be (2)
buf.reset()
buf.readInt() should be (1)
buf.readInt() should be (2)
buf.reset()
buf.readInt() should be (1)
buf.readInt() should be (2)
}
"remaining" should "return the total number of unprocessed bytes across all ByteBuffers" in {
val buf = ParsableByteBuffers(Seq(buffer(99, 0), buffer(0, 0), buffer(1, 0), buffer(0, 0, 2, 123)))
buf.remaining should be (10)
buf.readByte()
buf.remaining should be (9)
buf.readInt()
buf.remaining should be (5)
buf.readInt()
buf.remaining should be (1)
buf.readByte()
buf.remaining should be (0)
}
}
|
arnohaase/cass-driver
|
src/test/scala/com/ajjpj/cassdriver/util/ParsableByteBuffersTest.scala
|
Scala
|
apache-2.0
| 9,374 |
package net.cassite.jsonbind
import play.api.libs.json.{JsString, JsArray, JsObject, JsValue}
/**
* data and meta data of the json that is in parsing process
*/
class ParsingContext(val $scope: Scope, val appContext: AppContext) {
val ite = appContext.parsers.iterator
/**
* parse the current JsValue with the next parser
* @param current current JsValue to parse
* @return parsed JsValue
*/
def doNext(current: JsValue): JsValue = {
if (ite.hasNext) {
val parser = ite.next()
if (parser.canParse(current)) {
parser.parse(current, this)
} else {
doNext(current)
}
} else
// no more parsers, then parse the inner JsValues if JsObject/JsArray or directly return. null keys/values will be abandoned.
current match {
case JsObject(map) =>
new JsObject(map.map {
entry =>
val subContext = new ParsingContext($scope, appContext)
(entry._1, subContext.doNext(entry._2))
}.filter(p => p._1 != null && p._2 != null))
case JsArray(seq) =>
new JsArray(seq.map { v =>
val subContext = new ParsingContext($scope, appContext)
subContext.doNext(v)
}.filter(p => p != null))
case _ => current
}
}
/**
* parse expression and return the result in JsValue form
* @param str raw string
* @return JsValue result
*/
def parseExpression(str: String): JsValue = {
var result = str
var jsResult: JsValue = null
val it = ParsingContext.regex.findAllMatchIn(str) // all matched {{...}}
// see whether the result is definitely a string
// values taken from expressions that starts with '{{' and ends with '}}' might not be JsString
// but those expressions with multiple '{{...}}' or with non '{{...}}' patterns in it, such as '{{value1}}-{{value2}}' is absolutely JsString
val resultMightNotBeJsString = (ParsingContext.regex.findAllMatchIn(str).count(p => true) == 1) && str.startsWith("{{") && str.endsWith("}}")
for (m <- it; // get Match object
string = m.matched; // get matched string
matched = string.substring(2, string.length - 2).trim // get expression inside '{{' and '}}'
) {
var last: JsValue = null
// foreach sub expressions
for (exp <- matched.split("\\\\|").map(_.trim)) {
// instantiate a PluginContext and do parsing
val pluginContext = new PluginContext(exp, $scope, appContext)
last = pluginContext.doNext(last)
}
if (resultMightNotBeJsString)
jsResult = last
else // fill the string with retrieved JsValues
result = ParsingContext.regex.replaceFirstIn(result, App.scalaObject(last).toString)
}
if (resultMightNotBeJsString)
jsResult
else
JsString(result)
}
}
object ParsingContext {
val regex = """\\{\\{[^\\}\\}]+\\}\\}""".r
}
|
wkgcass/JsonBind
|
src/main/scala/net/cassite/jsonbind/ParsingContext.scala
|
Scala
|
mit
| 2,912 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import models._
import views._
/**
* Manage projects related operations.
*/
object Projects extends Controller with Secured {
/**
* Display the dashboard.
*/
def index = IsAuthenticated { username => _ =>
User.findByEmail(username).map { user =>
Ok(
html.dashboard(
Project.findInvolving(username),
Task.findTodoInvolving(username),
user
)
)
}.getOrElse(Forbidden)
}
// -- Projects
/**
* Add a project.
*/
def add = IsAuthenticated { username => implicit request =>
Form("group" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
folder => Ok(
views.html.projects.item(
Project.create(
Project(None, folder, "New project"),
Seq(username)
)
)
)
)
}
/**
* Delete a project.
*/
def delete(project: Long) = IsMemberOf(project) { username => _ =>
Project.delete(project)
Ok
}
/**
* Rename a project.
*/
def rename(project: Long) = IsMemberOf(project) { _ => implicit request =>
Form("name" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
newName => {
Project.rename(project, newName)
Ok(newName)
}
)
}
// -- Project groups
/**
* Add a new project group.
*/
def addGroup = IsAuthenticated { _ => _ =>
Ok(html.projects.group("New group"))
}
/**
* Delete a project group.
*/
def deleteGroup(folder: String) = IsAuthenticated { _ => _ =>
Project.deleteInFolder(folder)
Ok
}
/**
* Rename a project group.
*/
def renameGroup(folder: String) = IsAuthenticated { _ => implicit request =>
Form("name" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
newName => { Project.renameFolder(folder, newName); Ok(newName) }
)
}
// -- Members
/**
* Add a project member.
*/
def addUser(project: Long) = IsMemberOf(project) { _ => implicit request =>
Form("user" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
user => { Project.addMember(project, user); Ok }
)
}
/**
* Remove a project member.
*/
def removeUser(project: Long) = IsMemberOf(project) { _ => implicit request =>
Form("user" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
user => { Project.removeMember(project, user); Ok }
)
}
}
|
sbtrun-maven-plugin/sbtrun-maven-test-projects
|
play23/scala/zentasks/app/controllers/Projects.scala
|
Scala
|
apache-2.0
| 2,557 |
package com.twitter.finatra.http.internal.request
import com.twitter.finagle.http.ParamMap
private[http] class RouteParamMap(
paramMap: => ParamMap, //avoid constructing paramMap from Finagle request unless needed
params: Map[String, String])
extends ParamMap {
override def isValid = paramMap.isValid
override def get(name: String): Option[String] = {
params.get(name) orElse paramMap.get(name)
}
override def getAll(name: String): Iterable[String] = {
params.get(name).toIterable ++ paramMap.getAll(name)
}
override def iterator: Iterator[(String, String)] = {
params.iterator ++ paramMap.iterator
}
}
|
syamantm/finatra
|
http/src/main/scala/com/twitter/finatra/http/internal/request/RouteParamMap.scala
|
Scala
|
apache-2.0
| 642 |
package wandou.math.random
/**
* Deterministic random number generators are repeatable, which can prove
* useful for testing and validation. This interface defines an operation
* to return the seed data from a repeatable RNG. This seed value can then
* be reused to create a random source with identical output.
* @author Daniel Dyer
*/
trait RepeatableRNG {
/**
* @return The seed data used to initialise this pseudo-random
* number generator.
*/
def getSeed: Array[Byte]
}
|
wandoulabs/wandou-math
|
wandou-math/src/main/scala/wandou/math/random/RepeatableRNG.scala
|
Scala
|
apache-2.0
| 497 |
package ml.combust.mleap.core.feature
import ml.combust.mleap.core.Model
import ml.combust.mleap.core.types.{ScalarType, StructType}
/**
* Created by hollinwilkins on 12/27/16.
*/
sealed trait UnaryOperation {
def name: String
}
object UnaryOperation {
case object Log extends UnaryOperation {
override def name: String = "log"
}
case object Exp extends UnaryOperation {
override def name: String = "exp"
}
case object Sqrt extends UnaryOperation {
override def name: String = "sqrt"
}
case object Sin extends UnaryOperation {
override def name: String = "sin"
}
case object Cos extends UnaryOperation {
override def name: String = "cos"
}
case object Tan extends UnaryOperation {
override def name: String = "tan"
}
case object Abs extends UnaryOperation {
override def name: String = "abs"
}
val all = Set(Log, Exp, Sqrt, Sin, Cos, Tan, Abs)
val forName: Map[String, UnaryOperation] = all.map(o => (o.name, o)).toMap
}
case class MathUnaryModel(operation: UnaryOperation) extends Model {
import UnaryOperation._
def apply(a: Double): Double = operation match {
case Log => Math.log(a)
case Exp => Math.exp(a)
case Sqrt => Math.sqrt(a)
case Sin => Math.sin(a)
case Cos => Math.cos(a)
case Tan => Math.tan(a)
case Abs => Math.abs(a)
case _ => throw new RuntimeException(s"unsupported unary operation: $operation")
}
override def inputSchema: StructType = StructType(
"input" -> ScalarType.Double.nonNullable).get
override def outputSchema: StructType = StructType(
"output" -> ScalarType.Double.nonNullable).get
}
|
combust-ml/mleap
|
mleap-core/src/main/scala/ml/combust/mleap/core/feature/MathUnaryModel.scala
|
Scala
|
apache-2.0
| 1,637 |
package com.microsoft.partnercatalyst.fortis.spark.sinks.cassandra.aggregators
import java.util.{Date, UUID}
import com.microsoft.partnercatalyst.fortis.spark.analyzer.timeseries.Period
import com.microsoft.partnercatalyst.fortis.spark.dba.ConfigurationManager
import com.microsoft.partnercatalyst.fortis.spark.dto.SiteSettings
import com.microsoft.partnercatalyst.fortis.spark.sinks.cassandra.dto._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.mockito.{ArgumentMatchers, Mockito}
import org.scalatest.{BeforeAndAfter, FlatSpec}
class HeatmapsOfflineAggregatorTestSpec extends FlatSpec with BeforeAndAfter {
private var configurationManager: ConfigurationManager = _
private var aggregator: HeatmapOfflineAggregator = _
private var siteSettings: SiteSettings = _
private val conf = new SparkConf()
.setAppName(this.getClass.getSimpleName)
.setMaster("local[*]")
.set("output.consistency.level", "LOCAL_ONE")
private var sc: SparkContext = _
before {
configurationManager = Mockito.mock(classOf[ConfigurationManager])
sc = new SparkContext(conf)
aggregator = new HeatmapOfflineAggregator(configurationManager)
siteSettings = new SiteSettings(
sitename = "Fortis",
geofence = Seq(1, 2, 3, 4),
defaultlanguage = Some("en"),
languages = Seq("en", "es", "fr"),
defaultzoom = 6,
featureservicenamespace = Some("somenamespace"),
title = "Fortis",
logo = "",
translationsvctoken = "",
cogspeechsvctoken = "",
cogvisionsvctoken = "",
cogtextsvctoken = "",
insertiontime = System.currentTimeMillis()
)
Mockito.when(configurationManager.fetchSiteSettings(ArgumentMatchers.any())).thenReturn(siteSettings)
}
after {
sc.stop()
}
it should "produce an all/all aggregates for single event" in {
val period = Period("day-2017-08-11")
val events: RDD[Event] = sc.parallelize(Seq(Event(
pipelinekey = "RSS",
computedfeatures = Features(
mentions = 1,
sentiment = Sentiment(1.0),
keywords = Seq("colombia", "fuerza aerea", "herido", "huracan", "verdad"),
places = Seq(Place("divipola-05001100000000", 6.24604, -75.58013),
Place("divipola-76823100000000", 4.60785, -76.07739),
Place("divipola-52001", 1.05578, -77.19551)),
entities = Seq[Entities]()
),
eventtime = period.startTime(),
eventlangcode = "en",
eventid = "http://www.cancilleria.gov.co/rss.xml",
sourceeventid = UUID.randomUUID().toString,
insertiontime = new Date().getTime,
body = "",
imageurl = None,
summary = "",
fulltext = "",
batchid = UUID.randomUUID().toString,
externalsourceid = "http://www.cancilleria.gov.co/newsroom/news/proximas-horas-llegara-pais-segundo-avion-colombianos-repatriados-puerto-rico",
topics = Seq[String](),
placeids = Seq[String](),
sourceurl = "",
title = ""
),
Event(
pipelinekey = "RSS",
computedfeatures = Features(
mentions = 1,
sentiment = Sentiment(1.0),
keywords = Seq("eln", "herido", "combate"),
places = Seq(Place("divipola-27", 5.94302, -76.94238),
Place("divipola-27372100000000", 7.10349, -77.76281)),
entities = Seq[Entities]()
),
eventtime = period.startTime(),
eventlangcode = "en",
eventid = " RSS.http://casanare.extra.com.co/noticias/judicial/traslado-aeromedico-para-guerrillero-del-eln-herido-en-comba-353329",
sourceeventid = UUID.randomUUID().toString,
insertiontime = new Date().getTime,
body = "",
imageurl = None,
summary = "",
fulltext = "",
batchid = UUID.randomUUID().toString,
externalsourceid = "http://casanare.extra.com.co/rss.xml",
topics = Seq[String](),
placeids = Seq[String](),
sourceurl = "",
title = ""
)))
val eventsExploded = events.flatMap(event=>{
Seq(
event,
event.copy(externalsourceid = "all"),
event.copy(pipelinekey = "all", externalsourceid = "all")
)
})
val heatmaptiles = aggregator.aggregate(eventsExploded)
val tileBuckets = aggregator.aggregateTileBuckets(heatmaptiles)
val heatmaptilescollection = heatmaptiles.collect()
val tileBucketsscollection = tileBuckets.collect()
assert(heatmaptiles.collect.size == 12015)
assert(tileBuckets.collect.size == 11420)
val filteredTopics = heatmaptilescollection.filter(topic=>topic.pipelinekey == "all" && topic.externalsourceid == "all" && topic.periodtype == "day" && topic.tilez == 8)
assert(filteredTopics.size == 89)
}
}
|
CatalystCode/project-fortis-spark
|
src/test/scala/com/microsoft/partnercatalyst/fortis/spark/sinks/cassandra/aggregators/HeatmapsOfflineAggregatorTestSpec.scala
|
Scala
|
mit
| 5,009 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.