code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package com.mesosphere.cosmos.error import com.mesosphere.cosmos.thirdparty.marathon.circe.Encoders._ import com.mesosphere.cosmos.thirdparty.marathon.model.AppId import io.circe.Encoder import io.circe.JsonObject import io.circe.generic.semiauto.deriveEncoder final case class AmbiguousAppId(packageName: String, appIds: List[AppId]) extends CosmosError { override def data: Option[JsonObject] = CosmosError.deriveData(this) override def message: String = { s"Multiple apps named [$packageName] are installed: [${appIds.mkString(", ")}]" } } object AmbiguousAppId { implicit val encoder: Encoder[AmbiguousAppId] = deriveEncoder }
takirala/cosmos
cosmos-common/src/main/scala/com/mesosphere/cosmos/error/AmbiguousAppId.scala
Scala
apache-2.0
646
/* * Copyright 2014–2018 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.ejson import slamdata.Predef.{Byte => SByte, Char => SChar, Int => _, Map => _, _} import quasar.contrib.matryoshka.{project, totally} import matryoshka._ import matryoshka.implicits._ import scalaz.Coproduct import scalaz.std.list._ import scalaz.std.option._ import scalaz.syntax.traverse._ object EJson { def fromJson[A](f: String => A): Json[A] => EJson[A] = json => Coproduct(json.run.leftMap(Extension.fromObj(f))) def fromCommon[T](c: Common[T])(implicit T: Corecursive.Aux[T, EJson]): T = CommonEJson(c).embed def fromExt[T](e: Extension[T])(implicit T: Corecursive.Aux[T, EJson]): T = ExtEJson(e).embed def toJson[A](f: A => Option[String]): EJson[A] => Option[Json[A]] = { val handleExt: Extension[A] => Option[Json[A]] = { case Map(xs) => xs.traverse { case (k, v) => f(k) strengthR v } map (kvs => Coproduct.leftc(Obj(ListMap(kvs : _*)))) case Int(i) => some(Coproduct.rightc(Dec(BigDecimal(i)))) case _ => none } _.run.bitraverse(handleExt, c => some(Coproduct.right[Obj](c))) map (_.merge) } def arr[T](xs: T*)(implicit T: Corecursive.Aux[T, EJson]): T = fromCommon(Arr(xs.toList)) def bool[T](b: Boolean)(implicit T: Corecursive.Aux[T, EJson]): T = fromCommon(Bool(b)) def byte[T](b: SByte)(implicit T: Corecursive.Aux[T, EJson]): T = fromExt(Byte(b)) def char[T](c: SChar)(implicit T: Corecursive.Aux[T, EJson]): T = fromExt(Char(c)) def dec[T](d: BigDecimal)(implicit T: Corecursive.Aux[T, EJson]): T = fromCommon(Dec(d)) def int[T](d: BigInt)(implicit T: Corecursive.Aux[T, EJson]): T = fromExt(Int(d)) def map[T](xs: (T, T)*)(implicit T: Corecursive.Aux[T, EJson]): T = fromExt(Map(xs.toList)) def meta[T](v: T, m: T)(implicit T: Corecursive.Aux[T, EJson]): T = fromExt(Meta(v, m)) def nul[T](implicit T: Corecursive.Aux[T, EJson]): T = fromCommon(Null()) def obj[T](xs: (String, T)*)(implicit T: Corecursive.Aux[T, EJson]): T = map((xs.map { case (s, t) => str[T](s) -> t }): _*) def str[T](s: String)(implicit T: Corecursive.Aux[T, EJson]): T = fromCommon(Str(s)) def isNull[T](ej: T)(implicit T: Recursive.Aux[T, EJson]): Boolean = project[T, EJson].composePrism(optics.nul).nonEmpty(ej) /** Replaces `Meta` nodes with their value component. */ def elideMetadata[T]( implicit T: Recursive.Aux[T, EJson] ): EJson[T] => EJson[T] = totally { case ExtEJson(Meta(v, _)) => v.project } }
jedesah/Quasar
ejson/src/main/scala/quasar/ejson/EJson.scala
Scala
apache-2.0
3,120
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.feature import org.apache.hadoop.fs.Path import org.apache.spark.annotation.Since import org.apache.spark.internal.config.Kryo.KRYO_SERIALIZER_MAX_BUFFER_SIZE import org.apache.spark.ml.{Estimator, Model} import org.apache.spark.ml.linalg.{BLAS, Vector, Vectors, VectorUDT} import org.apache.spark.ml.param._ import org.apache.spark.ml.param.shared._ import org.apache.spark.ml.util._ import org.apache.spark.mllib.feature import org.apache.spark.mllib.linalg.VectorImplicits._ import org.apache.spark.sql.{DataFrame, Dataset, SparkSession} import org.apache.spark.sql.functions._ import org.apache.spark.sql.types._ import org.apache.spark.util.{Utils, VersionUtils} /** * Params for [[Word2Vec]] and [[Word2VecModel]]. */ private[feature] trait Word2VecBase extends Params with HasInputCol with HasOutputCol with HasMaxIter with HasStepSize with HasSeed { /** * The dimension of the code that you want to transform from words. * Default: 100 * @group param */ final val vectorSize = new IntParam( this, "vectorSize", "the dimension of codes after transforming from words (> 0)", ParamValidators.gt(0)) /** @group getParam */ def getVectorSize: Int = $(vectorSize) /** * The window size (context words from [-window, window]). * Default: 5 * @group expertParam */ final val windowSize = new IntParam( this, "windowSize", "the window size (context words from [-window, window]) (> 0)", ParamValidators.gt(0)) /** @group expertGetParam */ def getWindowSize: Int = $(windowSize) /** * Number of partitions for sentences of words. * Default: 1 * @group param */ final val numPartitions = new IntParam( this, "numPartitions", "number of partitions for sentences of words (> 0)", ParamValidators.gt(0)) /** @group getParam */ def getNumPartitions: Int = $(numPartitions) /** * The minimum number of times a token must appear to be included in the word2vec model's * vocabulary. * Default: 5 * @group param */ final val minCount = new IntParam(this, "minCount", "the minimum number of times a token must " + "appear to be included in the word2vec model's vocabulary (>= 0)", ParamValidators.gtEq(0)) /** @group getParam */ def getMinCount: Int = $(minCount) /** * Sets the maximum length (in words) of each sentence in the input data. * Any sentence longer than this threshold will be divided into chunks of * up to `maxSentenceLength` size. * Default: 1000 * @group param */ final val maxSentenceLength = new IntParam(this, "maxSentenceLength", "Maximum length " + "(in words) of each sentence in the input data. Any sentence longer than this threshold will " + "be divided into chunks up to the size (> 0)", ParamValidators.gt(0)) /** @group getParam */ def getMaxSentenceLength: Int = $(maxSentenceLength) setDefault(vectorSize -> 100, windowSize -> 5, numPartitions -> 1, minCount -> 5, maxSentenceLength -> 1000, stepSize -> 0.025, maxIter -> 1) /** * Validate and transform the input schema. */ protected def validateAndTransformSchema(schema: StructType): StructType = { val typeCandidates = List(new ArrayType(StringType, true), new ArrayType(StringType, false)) SchemaUtils.checkColumnTypes(schema, $(inputCol), typeCandidates) SchemaUtils.appendColumn(schema, $(outputCol), new VectorUDT) } } /** * Word2Vec trains a model of `Map(String, Vector)`, i.e. transforms a word into a code for further * natural language processing or machine learning process. */ @Since("1.4.0") final class Word2Vec @Since("1.4.0") ( @Since("1.4.0") override val uid: String) extends Estimator[Word2VecModel] with Word2VecBase with DefaultParamsWritable { @Since("1.4.0") def this() = this(Identifiable.randomUID("w2v")) /** @group setParam */ @Since("1.4.0") def setInputCol(value: String): this.type = set(inputCol, value) /** @group setParam */ @Since("1.4.0") def setOutputCol(value: String): this.type = set(outputCol, value) /** @group setParam */ @Since("1.4.0") def setVectorSize(value: Int): this.type = set(vectorSize, value) /** @group expertSetParam */ @Since("1.6.0") def setWindowSize(value: Int): this.type = set(windowSize, value) /** @group setParam */ @Since("1.4.0") def setStepSize(value: Double): this.type = set(stepSize, value) /** @group setParam */ @Since("1.4.0") def setNumPartitions(value: Int): this.type = set(numPartitions, value) /** @group setParam */ @Since("1.4.0") def setMaxIter(value: Int): this.type = set(maxIter, value) /** @group setParam */ @Since("1.4.0") def setSeed(value: Long): this.type = set(seed, value) /** @group setParam */ @Since("1.4.0") def setMinCount(value: Int): this.type = set(minCount, value) /** @group setParam */ @Since("2.0.0") def setMaxSentenceLength(value: Int): this.type = set(maxSentenceLength, value) @Since("2.0.0") override def fit(dataset: Dataset[_]): Word2VecModel = { transformSchema(dataset.schema, logging = true) val input = dataset.select($(inputCol)).rdd.map(_.getAs[Seq[String]](0)) val wordVectors = new feature.Word2Vec() .setLearningRate($(stepSize)) .setMinCount($(minCount)) .setNumIterations($(maxIter)) .setNumPartitions($(numPartitions)) .setSeed($(seed)) .setVectorSize($(vectorSize)) .setWindowSize($(windowSize)) .setMaxSentenceLength($(maxSentenceLength)) .fit(input) copyValues(new Word2VecModel(uid, wordVectors).setParent(this)) } @Since("1.4.0") override def transformSchema(schema: StructType): StructType = { validateAndTransformSchema(schema) } @Since("1.4.1") override def copy(extra: ParamMap): Word2Vec = defaultCopy(extra) } @Since("1.6.0") object Word2Vec extends DefaultParamsReadable[Word2Vec] { @Since("1.6.0") override def load(path: String): Word2Vec = super.load(path) } /** * Model fitted by [[Word2Vec]]. */ @Since("1.4.0") class Word2VecModel private[ml] ( @Since("1.4.0") override val uid: String, @transient private val wordVectors: feature.Word2VecModel) extends Model[Word2VecModel] with Word2VecBase with MLWritable { import Word2VecModel._ /** * Returns a dataframe with two fields, "word" and "vector", with "word" being a String and * and the vector the DenseVector that it is mapped to. */ @Since("1.5.0") @transient lazy val getVectors: DataFrame = { val spark = SparkSession.builder().getOrCreate() val wordVec = wordVectors.getVectors.mapValues(vec => Vectors.dense(vec.map(_.toDouble))) spark.createDataFrame(wordVec.toSeq).toDF("word", "vector") } /** * Find "num" number of words closest in similarity to the given word, not * including the word itself. * @return a dataframe with columns "word" and "similarity" of the word and the cosine * similarities between the synonyms and the given word. */ @Since("1.5.0") def findSynonyms(word: String, num: Int): DataFrame = { val spark = SparkSession.builder().getOrCreate() spark.createDataFrame(findSynonymsArray(word, num)).toDF("word", "similarity") } /** * Find "num" number of words whose vector representation is most similar to the supplied vector. * If the supplied vector is the vector representation of a word in the model's vocabulary, * that word will be in the results. * @return a dataframe with columns "word" and "similarity" of the word and the cosine * similarities between the synonyms and the given word vector. */ @Since("2.0.0") def findSynonyms(vec: Vector, num: Int): DataFrame = { val spark = SparkSession.builder().getOrCreate() spark.createDataFrame(findSynonymsArray(vec, num)).toDF("word", "similarity") } /** * Find "num" number of words whose vector representation is most similar to the supplied vector. * If the supplied vector is the vector representation of a word in the model's vocabulary, * that word will be in the results. * @return an array of the words and the cosine similarities between the synonyms given * word vector. */ @Since("2.2.0") def findSynonymsArray(vec: Vector, num: Int): Array[(String, Double)] = { wordVectors.findSynonyms(vec, num) } /** * Find "num" number of words closest in similarity to the given word, not * including the word itself. * @return an array of the words and the cosine similarities between the synonyms given * word vector. */ @Since("2.2.0") def findSynonymsArray(word: String, num: Int): Array[(String, Double)] = { wordVectors.findSynonyms(word, num) } /** @group setParam */ @Since("1.4.0") def setInputCol(value: String): this.type = set(inputCol, value) /** @group setParam */ @Since("1.4.0") def setOutputCol(value: String): this.type = set(outputCol, value) /** * Transform a sentence column to a vector column to represent the whole sentence. The transform * is performed by averaging all word vectors it contains. */ @Since("2.0.0") override def transform(dataset: Dataset[_]): DataFrame = { val outputSchema = transformSchema(dataset.schema, logging = true) val vectors = wordVectors.getVectors .mapValues(vv => Vectors.dense(vv.map(_.toDouble))) .map(identity).toMap // mapValues doesn't return a serializable map (SI-7005) val bVectors = dataset.sparkSession.sparkContext.broadcast(vectors) val d = $(vectorSize) val emptyVec = Vectors.sparse(d, Array.emptyIntArray, Array.emptyDoubleArray) val word2Vec = udf { sentence: Seq[String] => if (sentence.isEmpty) { emptyVec } else { val sum = Vectors.zeros(d) sentence.foreach { word => bVectors.value.get(word).foreach { v => BLAS.axpy(1.0, v, sum) } } BLAS.scal(1.0 / sentence.size, sum) sum } } dataset.withColumn($(outputCol), word2Vec(col($(inputCol))), outputSchema($(outputCol)).metadata) } @Since("1.4.0") override def transformSchema(schema: StructType): StructType = { var outputSchema = validateAndTransformSchema(schema) if ($(outputCol).nonEmpty) { outputSchema = SchemaUtils.updateAttributeGroupSize(outputSchema, $(outputCol), $(vectorSize)) } outputSchema } @Since("1.4.1") override def copy(extra: ParamMap): Word2VecModel = { val copied = new Word2VecModel(uid, wordVectors) copyValues(copied, extra).setParent(parent) } @Since("1.6.0") override def write: MLWriter = new Word2VecModelWriter(this) @Since("3.0.0") override def toString: String = { s"Word2VecModel: uid=$uid, numWords=${wordVectors.wordIndex.size}, " + s"vectorSize=${$(vectorSize)}" } } @Since("1.6.0") object Word2VecModel extends MLReadable[Word2VecModel] { private case class Data(word: String, vector: Array[Float]) private[Word2VecModel] class Word2VecModelWriter(instance: Word2VecModel) extends MLWriter { override protected def saveImpl(path: String): Unit = { DefaultParamsWriter.saveMetadata(instance, path, sc) val wordVectors = instance.wordVectors.getVectors val dataPath = new Path(path, "data").toString val bufferSizeInBytes = Utils.byteStringAsBytes( sc.conf.get(KRYO_SERIALIZER_MAX_BUFFER_SIZE.key, "64m")) val numPartitions = Word2VecModelWriter.calculateNumberOfPartitions( bufferSizeInBytes, instance.wordVectors.wordIndex.size, instance.getVectorSize) val spark = sparkSession import spark.implicits._ spark.createDataset[(String, Array[Float])](wordVectors.toSeq) .repartition(numPartitions) .map { case (word, vector) => Data(word, vector) } .toDF() .write .parquet(dataPath) } } private[feature] object Word2VecModelWriter { /** * Calculate the number of partitions to use in saving the model. * [SPARK-11994] - We want to partition the model in partitions smaller than * spark.kryoserializer.buffer.max * @param bufferSizeInBytes Set to spark.kryoserializer.buffer.max * @param numWords Vocab size * @param vectorSize Vector length for each word */ def calculateNumberOfPartitions( bufferSizeInBytes: Long, numWords: Int, vectorSize: Int): Int = { val floatSize = 4L // Use Long to help avoid overflow val averageWordSize = 15 // Calculate the approximate size of the model. // Assuming an average word size of 15 bytes, the formula is: // (floatSize * vectorSize + 15) * numWords val approximateSizeInBytes = (floatSize * vectorSize + averageWordSize) * numWords val numPartitions = (approximateSizeInBytes / bufferSizeInBytes) + 1 require(numPartitions < 10e8, s"Word2VecModel calculated that it needs $numPartitions " + s"partitions to save this model, which is too large. Try increasing " + s"spark.kryoserializer.buffer.max so that Word2VecModel can use fewer partitions.") numPartitions.toInt } } private class Word2VecModelReader extends MLReader[Word2VecModel] { private val className = classOf[Word2VecModel].getName override def load(path: String): Word2VecModel = { val spark = sparkSession import spark.implicits._ val metadata = DefaultParamsReader.loadMetadata(path, sc, className) val (major, minor) = VersionUtils.majorMinorVersion(metadata.sparkVersion) val dataPath = new Path(path, "data").toString val oldModel = if (major < 2 || (major == 2 && minor < 2)) { val data = spark.read.parquet(dataPath) .select("wordIndex", "wordVectors") .head() val wordIndex = data.getAs[Map[String, Int]](0) val wordVectors = data.getAs[Seq[Float]](1).toArray new feature.Word2VecModel(wordIndex, wordVectors) } else { val wordVectorsMap = spark.read.parquet(dataPath).as[Data] .collect() .map(wordVector => (wordVector.word, wordVector.vector)) .toMap new feature.Word2VecModel(wordVectorsMap) } val model = new Word2VecModel(metadata.uid, oldModel) metadata.getAndSetParams(model) model } } @Since("1.6.0") override def read: MLReader[Word2VecModel] = new Word2VecModelReader @Since("1.6.0") override def load(path: String): Word2VecModel = super.load(path) }
rednaxelafx/apache-spark
mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
Scala
apache-2.0
15,287
package com.kakao.cuesheet.deps import java.io.{File, FileInputStream} import java.nio.charset.StandardCharsets.UTF_8 import java.nio.file.Paths import java.util.jar.JarFile import java.util.zip.ZipInputStream import com.kakao.mango.io.{AutoClosing, FileSystems, ZipStreams} import com.kakao.mango.logging.Logging import scala.io.Source import scala.util.Try import scala.xml.XML sealed trait JarArtifactResolver[+T <: DependencyNode] extends Logging { /** attempt to resolve the artifact information and its dependencies from given JAR file * * @param path the path to the JAR file * @return an Option containing the resolved node or None **/ def resolve(path: String): Option[T] } /** Check if the Implementation-Title is "Java Runtime Environment" */ class JavaRuntimeResolver extends JarArtifactResolver[JavaRuntimeDependencyNode] { private val base = new File(sys.props("java.home")).getParent override def resolve(path: String): Option[JavaRuntimeDependencyNode] = { if (path.startsWith(base)) { return Some(JavaRuntimeDependencyNode(path)) } val attributes = new JarFile(path).getManifest.getMainAttributes if (attributes.getValue("Implementation-Title") == "Java Runtime Environment") { return Some(JavaRuntimeDependencyNode(path)) } None } } /** Resolve dependency from the maven metadata file stored in META-INF/maven.group.id.artifactId/pom.xml * This works only for the JARs that were packaged by Maven; SBT is notable exception to this. */ class MavenMetadataArtifactResolver extends JarArtifactResolver[ManagedDependencyNode] { override def resolve(path: String): Option[ManagedDependencyNode] = { AutoClosing(new ZipInputStream(new FileInputStream(path), UTF_8)) { zip => for (entry <- ZipStreams.entries(zip)) { val name = entry.getName if (name.startsWith("META-INF") && name.endsWith("pom.xml")) { val artifact = name.split("/").reverse.apply(1) if (path.contains(artifact)) { val pom = XML.load(zip) return new MavenDependencyEnumerator(pom, path).get } } } None } } } /** resolve a dependency in local maven repo, "/.m2/repository/group/id/artifactId/version/artifactId-version.jar" */ class MavenPathArtifactResolver extends JarArtifactResolver[ManagedDependencyNode] { override def resolve(path: String): Option[ManagedDependencyNode] = { mavenPathFormat.findAllIn(path).matchData.toSeq.headOption.flatMap { m => try { val pom = XML.loadFile(path.replaceAll("\\\\.jar$", ".pom")) new MavenDependencyEnumerator(pom, path).get } catch { case e: Throwable => throw new RuntimeException(s"XML parsing error from $path", e) } } } } /** resolve a dependency in local gradle cache */ class GradlePathArtifactResolver extends JarArtifactResolver[ManagedDependencyNode] { // TODO: may need to read ivy files as well override def resolve(path: String): Option[ManagedDependencyNode] = { gradlePathFormat.findAllIn(path).matchData.toSeq.headOption.flatMap { m => try { // the xml should be under a sibling of the parent directory of this jar val base = Paths.get(path).getParent.getParent val found = FileSystems.entries(base, recursive = true).find(_.getFileName.toString == "pom.xml") found.flatMap { pomPath => val pom = XML.loadFile(pomPath.toFile) new MavenDependencyEnumerator(pom, path).get } } catch { case e: Throwable => throw new RuntimeException(s"XML parsing error from $path", e) } } } } class IvyPathArtifactResolver extends JarArtifactResolver[ManagedDependencyNode] { override def resolve(path: String): Option[ManagedDependencyNode] = { ivyPathFormat.findAllIn(path).matchData.toSeq.headOption.flatMap { m => val group = m.group(1).replace('/', '.') val artifact = m.group(2) val filename = m.group(3) val version = filename.substring(artifact.length + 1, filename.length - 4) val ivy = Paths.get(path).getParent.getParent.resolve(s"ivy-$version.xml").toFile if (ivy.isFile && ivy.canRead) { val lines = Source.fromFile(ivy).getLines().mkString val xml = XML.loadString(ivyNameSpaceFormat.replaceAllIn(lines, "")) new IvyDependencyEnumerator(xml, path).get } else { logger.warn(s"Could not find Ivy XML corresponding to $path") None } } } } class IvyOriginalPathArtifactResolver extends JarArtifactResolver[ManagedDependencyNode] { override def resolve(path: String): Option[ManagedDependencyNode] = { ivyPathFormat.findAllIn(path).matchData.toSeq.headOption.flatMap { m => val group = m.group(1).replace('/', '.') val artifact = m.group(2) val filename = m.group(3) val version = filename.substring(artifact.length + 1, filename.length - 4) try { val ivy = Paths.get(path).getParent.getParent.resolve(s"ivy-$version.xml.original").toFile if (ivy.isFile && ivy.canRead) { new MavenDependencyEnumerator(XML.loadFile(ivy), path).get } else { logger.warn(s"Could not find Ivy original XML corresponding to $path") None } } catch { case e: Throwable => throw new RuntimeException(s"XML parsing error from $path", e) } } } } /** a catch-all resolver that resolves any JARs as Unmanaged */ class UnmanagedJarResolver extends JarArtifactResolver[UnmanagedDependencyNode] { override def resolve(path: String): Option[UnmanagedDependencyNode] = { Some(UnmanagedDependencyNode(path)) } } /** chains multiple artifact resolvers and returns the first successful one */ class ChainedArtifactResolver[T <: DependencyNode](resolvers: JarArtifactResolver[T]*) extends JarArtifactResolver[T] { override def resolve(path: String): Option[T] = { resolvers.toStream.flatMap(r => Try(r.resolve(path)).toOption).collectFirst { case Some(node) => node } } }
kakao/cuesheet
src/main/scala/com/kakao/cuesheet/deps/JarArtifactResolver.scala
Scala
apache-2.0
6,098
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources import java.util.{Locale, TimeZone} import org.apache.hadoop.fs.{FileStatus, GlobFilter} import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils} import org.apache.spark.sql.errors.QueryCompilationErrors import org.apache.spark.sql.internal.SQLConf import org.apache.spark.unsafe.types.UTF8String trait PathFilterStrategy extends Serializable { def accept(fileStatus: FileStatus): Boolean } trait StrategyBuilder { def create(parameters: CaseInsensitiveMap[String]): Option[PathFilterStrategy] } class PathGlobFilter(filePatten: String) extends PathFilterStrategy { private val globFilter = new GlobFilter(filePatten) override def accept(fileStatus: FileStatus): Boolean = globFilter.accept(fileStatus.getPath) } object PathGlobFilter extends StrategyBuilder { val PARAM_NAME = "pathglobfilter" override def create(parameters: CaseInsensitiveMap[String]): Option[PathFilterStrategy] = { parameters.get(PARAM_NAME).map(new PathGlobFilter(_)) } } /** * Provide modifiedAfter and modifiedBefore options when * filtering from a batch-based file data source. * * Example Usages * Load all CSV files modified after date: * {{{ * spark.read.format("csv").option("modifiedAfter","2020-06-15T05:00:00").load() * }}} * * Load all CSV files modified before date: * {{{ * spark.read.format("csv").option("modifiedBefore","2020-06-15T05:00:00").load() * }}} * * Load all CSV files modified between two dates: * {{{ * spark.read.format("csv").option("modifiedAfter","2019-01-15T05:00:00") * .option("modifiedBefore","2020-06-15T05:00:00").load() * }}} */ abstract class ModifiedDateFilter extends PathFilterStrategy { def timeZoneId: String protected def localTime(micros: Long): Long = DateTimeUtils.fromUTCTime(micros, timeZoneId) } object ModifiedDateFilter { def getTimeZoneId(options: CaseInsensitiveMap[String]): String = { options.getOrElse( DateTimeUtils.TIMEZONE_OPTION.toLowerCase(Locale.ROOT), SQLConf.get.sessionLocalTimeZone) } def toThreshold(timeString: String, timeZoneId: String, strategy: String): Long = { val timeZone: TimeZone = DateTimeUtils.getTimeZone(timeZoneId) val ts = UTF8String.fromString(timeString) DateTimeUtils.stringToTimestamp(ts, timeZone.toZoneId).getOrElse { throw QueryCompilationErrors.invalidTimestampProvidedForStrategyError(strategy, timeString) } } } /** * Filter used to determine whether file was modified before the provided timestamp. */ class ModifiedBeforeFilter(thresholdTime: Long, val timeZoneId: String) extends ModifiedDateFilter { override def accept(fileStatus: FileStatus): Boolean = // We standardize on microseconds wherever possible // getModificationTime returns in milliseconds thresholdTime - localTime(DateTimeUtils.millisToMicros(fileStatus.getModificationTime)) > 0 } object ModifiedBeforeFilter extends StrategyBuilder { import ModifiedDateFilter._ val PARAM_NAME = "modifiedbefore" override def create(parameters: CaseInsensitiveMap[String]): Option[PathFilterStrategy] = { parameters.get(PARAM_NAME).map { value => val timeZoneId = getTimeZoneId(parameters) val thresholdTime = toThreshold(value, timeZoneId, PARAM_NAME) new ModifiedBeforeFilter(thresholdTime, timeZoneId) } } } /** * Filter used to determine whether file was modified after the provided timestamp. */ class ModifiedAfterFilter(thresholdTime: Long, val timeZoneId: String) extends ModifiedDateFilter { override def accept(fileStatus: FileStatus): Boolean = // getModificationTime returns in milliseconds // We standardize on microseconds wherever possible localTime(DateTimeUtils.millisToMicros(fileStatus.getModificationTime)) - thresholdTime > 0 } object ModifiedAfterFilter extends StrategyBuilder { import ModifiedDateFilter._ val PARAM_NAME = "modifiedafter" override def create(parameters: CaseInsensitiveMap[String]): Option[PathFilterStrategy] = { parameters.get(PARAM_NAME).map { value => val timeZoneId = getTimeZoneId(parameters) val thresholdTime = toThreshold(value, timeZoneId, PARAM_NAME) new ModifiedAfterFilter(thresholdTime, timeZoneId) } } } object PathFilterFactory { private val strategies = Seq(PathGlobFilter, ModifiedBeforeFilter, ModifiedAfterFilter) def create(parameters: CaseInsensitiveMap[String]): Seq[PathFilterStrategy] = { strategies.flatMap { _.create(parameters) } } }
ueshin/apache-spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/pathFilters.scala
Scala
apache-2.0
5,359
/* Copyright (C) 2008-2010 Univ of Massachusetts Amherst, Computer Science Dept This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible) http://factorie.cs.umass.edu, http://code.google.com/p/factorie/ This software is provided under the terms of the Eclipse Public License 1.0 as published by http://www.opensource.org. For further information, see the file `LICENSE.txt' included with this distribution. */ package cc.factorie.util /* Copyright 2009 David Hall, Daniel Ramage Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import java.lang.ref.SoftReference; import scala.collection.mutable.Map; /** * Provides a cache where both keys and values are only weakly referenced * allowing garbage collection of either at any time, backed by a WeakHashMap. * * This is currently a direct port of a corresponding Java class from JavaNLP, * but could well be adapted to be a scala map at some point. * * @author dramage */ class MapCache[K, V] extends Map[K, V] { /**cache of values */ protected val inner = new java.util.HashMap[HashableSoftReference, SoftReference[Option[V]]]; /**queue of objects to remove */ protected val removalQueue = new scala.collection.mutable.Queue[HashableSoftReference]; /**Removes all objects in the removal queue */ protected def dequeue() = { while (!removalQueue.isEmpty) { inner.remove(removalQueue.dequeue); } } /** * Resolves the soft reference, returning None if the reference * has dissappeared or Some(value) or Some(null) depending on whether * null was the stored value. */ private def resolve(key: K, ref: SoftReference[Option[V]]): Option[V] = { val got = ref.get; if (ref.get == null) { // value has been gc'd, free key inner.remove(new HashableSoftReference(key)); None } else { got match { case Some(value) => Some(value); case None => Some(null.asInstanceOf[V]); } } } override def clear = { dequeue(); removalQueue.clear; inner.clear(); } override def contains(key: K) = { dequeue(); inner.containsKey(new HashableSoftReference(key)); } /** * Returns the value currently associated with the given key if one * has been set with put and not been subsequently garbage collected. */ override def get(key: K): Option[V] = { dequeue(); val ref = inner.get(new HashableSoftReference(key)); if (ref != null) { resolve(key, ref); } else { None; } }; /** * Returns the expected size of the cache. Note that this may over-report * as objects may have been garbage collected. */ override def size(): Int = { dequeue(); inner.size; } /** * Iterates the elements of the cache that are currently present. */ override def iterator: Iterator[(K, V)] = { dequeue(); for (pair <- JavaCollections.iScalaIterator(inner.entrySet.iterator); val k = pair.getKey.get; val v = resolve(k, pair.getValue); if k != null && v != None) yield (k, v.asInstanceOf[Some[V]].get); } /** * Associates the given key with a weak reference to the given value. * Either key or value or both may be garbage collected at any point. * Returns the previously associated value or null if none was * associated. Value must be non-null. */ override def update(key: K, value: V): Unit = { dequeue(); inner.put(new HashableSoftReference(key), new SoftReference(Some(value))); } /** * Removes the given key from the map. */ override def -=(key: K): this.type = { dequeue(); inner.remove(new HashableSoftReference(key)); this } def +=(kv:(K,V)): this.type = { throw new Error // TODO Implement this } /** * A SoftReference with equality and hashcode based on the underlying * object. Automatically removes itself from the containing map if the * reference has been gc'd. * * @author dramage */ class HashableSoftReference(ref: SoftReference[K], hash: Int) { def this(key: K) = this (new SoftReference(key), key.hashCode); var removing = false; def get = { val got = ref.get; if (!removing && got == null) { removing = true; MapCache.this.removalQueue += this; } got; } override def hashCode = hash; override def equals(other: Any) = { if (other.isInstanceOf[HashableSoftReference]) { val otherref = other.asInstanceOf[HashableSoftReference]; (this eq otherref) || (this.get == otherref.get); } else { false; } } } }
andrewmilkowski/factorie
src/main/scala/cc/factorie/util/MapCache.scala
Scala
epl-1.0
5,122
import scala.reflect.{ClassTag, classTag} object Test extends App { println(classTag[Int]) println(classTag[Array[Int]]) println(classTag[Array[Array[Int]]]) println(classTag[Array[Array[Array[Int]]]]) println(classTag[Array[Array[Array[Array[Int]]]]]) }
som-snytt/dotty
tests/disabled/reflect/run/classtags_multi.scala
Scala
apache-2.0
266
package one.lockstep.monolock.client import java.util.concurrent.atomic._ import one.lockstep.lock._ import one.lockstep.lock.client._ import one.lockstep.lock.client.transport.RequestRejectedException import one.lockstep.monolock.client.pipeline._ import one.lockstep.monolock.protocol._ import one.lockstep.util._ import one.lockstep.util.crypto._ import one.lockstep.util.storage._ import scodec.codecs import scala.concurrent._ import scala.concurrent.duration.Duration import scala.reflect.ClassTag import scala.util.control.NonFatal import scala.util._ import scala.concurrent.duration._ import one.lockstep.util.protocol._ class MonolockManager(createPipeline: () => MessagePipeline, protocolCiphersuite: Ciphersuite) (implicit ec: ExecutionContext) extends LockManager with Logging { protected lazy val lockRepository = new Repository("locks")(Protocol.codec[EncryptedLock]) protected lazy val timestampsRepository = new Repository("timestamps")(codecs.int64) private def lockHandle(lockId: LockId)(implicit tx: Transaction) = lockRepository.handle(lockId.asBase64) private def timestampHandle(lockId: LockId)(implicit tx: Transaction) = timestampsRepository.handle(lockId.asBase64) private implicit val implicitCiphersuite = protocolCiphersuite private object resources { var pipeline: MessagePipeline = _ def warmUp(): Unit = if (pipeline == null) { logger.trace(s"creating new pipeline..") pipeline = createPipeline() } def suspend(): Unit = { logger.trace(s"shutting down pipeline..") pipeline.shutdown() pipeline = null } } private object operations { type ID = Int private var ongoingOperations = Set[ID]() private val lastId = new AtomicInteger() private def nextId(): ID = lastId.incrementAndGet() def addNew(): ID = operations.synchronized { resources.warmUp() nextId() tap (id => ongoingOperations += id) tap (id => logger.trace(s"new online-operation added, id = $id")) } def drop(id: ID): Unit = operations.synchronized { logger.trace(s"online-operation dropped, id = $id") if (ongoingOperations.contains(id)) { ongoingOperations -= id if (ongoingOperations.isEmpty) resources.suspend() } } } private type Passcode = Bytes private type Timeout = Duration private def operation[A](f: (Passcode, Timeout) => Future[A]) = new OnlineOperation[A] { private val outcome = Promise[A]() private val executeInvoked = new AtomicBoolean(false) private val id = operations.addNew() // this will warm-up the pipeline.. private val dropTimeout = 30.seconds val scheduledTimeout = schedule(dropTimeout) { operations.drop(id) } //just in case abandoned.. outcome.future.onComplete { _ => operations.drop(id); scheduledTimeout.cancel() } override def cancel(): Boolean = outcome.tryFailure(new LockException(LockErr.Cancelled)) override def execute(passcode: Bytes, timeout: Duration): Future[A] = { require(executeInvoked.compareAndSet(false, true), "the operation has already been executed") if (!outcome.isCompleted) { // here we use a separate operation-id for the execution, because execute() and cancel() may be called independently. // also the drop-timeout may occur independently of execute() and cancel() // this way, we ensure that suspend() is NOT called while execute() is in-progress val execId = operations.addNew() f(passcode, timeout).onComplete { result => operations.drop(execId) outcome.tryComplete(result) } } outcome.future } } override def lockExists(lockId: LockId)(implicit tx: Transaction): Boolean = { lockHandle(lockId).value.isDefined } override def remove(lockId: LockId)(implicit tx: Transaction): Boolean = { val exists = lockExists(lockId) lockHandle(lockId).remove() timestampHandle(lockId).remove() exists } override def changePasscode(lockId: LockId, masterSecret: Secret, newPasscode: Bytes) (implicit tx: Transaction): Unit = { val handle = lockHandle(lockId) require(handle.value.isDefined, "locked key does not exist") val lockedKey = handle.get val timestamp = currentTimestamp(lockId) val relockedkey = Lock.relock(lockedKey, masterSecret, newPasscode, timestamp, nextSealKey = None) handle.set(relockedkey) } private def currentTimestamp(lockId: LockId)(implicit tx: Transaction): Long = timestampHandle(lockId).get private def incrementTimestamp(lockId: LockId)(implicit tx: Transaction): Long = { val handle = timestampHandle(lockId) val newTimestamp = handle.value.getOrElse(0L) + 1 handle.set(newTimestamp) handle.commit() //we always persist the timestamp before proceeding.. newTimestamp } /** * start a new lock operation. the operation can later be executed or cancelled by the user * * @return the new lock operation. */ override def prepareLock(ticket: LockTicket, lockParams: LockParams) (implicit tx: Transaction): OnlineOperation[Secret] = { val lockId = ticket.lockId val handle = lockHandle(lockId) operation[Secret] { (passcode, timeout) => repeatWhileDiscarded(maxRetries = 2, timeout) { val timestamp = incrementTimestamp(lockId) val latestSealOpt = handle.value.map(lockedKey => Protocol.decode[Lock.Header](lockedKey.header).seal) //locked-key may be undefined yet .. val newSeriesId = Lock.nonce() val request = EnrollmentRequest(Protocol.encoded(ticket), timestamp, protocolCiphersuite.hash(prependLength(newSeriesId)), latestSealOpt) sendReceive[EnrollmentRequest, EnrollmentResponse, Secret](request) { case EnrollmentResponse.Failure(err) => err } { case EnrollmentResponse.Success(sealKey) => val masterSecret = generateMasterSecret() val lockoutThreshold = lockParams.asInstanceOf[BasicLockParams].lockoutThreshold val encrypedLock = Lock.lock(lockId, masterSecret, passcode, timestamp, newSeriesId, sealKey, lockoutThreshold) handle.set(encrypedLock) masterSecret } } } } override def prepareUnlock(lockId: LockId)(implicit tx: Transaction): OnlineOperation[Secret] = { val handle = lockHandle(lockId) require(handle.value.isDefined, "lock does not exist") operation[Secret] { (passcode, timeout) => repeatWhileDiscarded(maxRetries = 2, timeout) { val encryptedLock = handle.get //already verified val timestamp = incrementTimestamp(lockId) val UnlockStart(request, kexSharedSecret) = UnlockHelper.start(encryptedLock, passcode, timestamp) sendReceive[UnlockRequest, UnlockResponse, Secret](request) { case UnlockResponse.Failure(err) => err } { case success: UnlockResponse.Success => val UnlockResult(masterSecret, relockedKey) = UnlockHelper.conclude(encryptedLock, passcode, timestamp, kexSharedSecret, success) handle.set(relockedKey) masterSecret } } } } private def generateMasterSecret() = Secret(random.bits(protocolCiphersuite.bitStrength*2).bytes) /** * Repeat the specified operation f while failing with DiscardedException * Note that the server may occasionally discard requests due to sporadic errors. * @param maxRetries the maximum number of times to retry if discarded * @param timeout the total timeout to wait before aborting the operation * @param f the operation to perform against the server (via the pipeline). */ private def repeatWhileDiscarded[Result](maxRetries: Int, timeout: Duration)(f: => Future[Result]) : Future[Result] = { val outcome: Promise[Result] = Promise() def repeatWhileDiscarded(maxRetries: Int): Unit = { f.map(outcome.trySuccess).recover { case discarded: DiscardedException if maxRetries > 0 => if (!outcome.isCompleted) { //could be timed out so better check.. logger.warn("request was discarded by server, retrying.. ") repeatWhileDiscarded(maxRetries - 1) } case discarded: DiscardedException => outcome.tryFailure(new LockException(LockErr.ServiceUnavailable)) case le: LockException => outcome.tryFailure(le) case cause => logger.debug("operation failed due to unexpected error", cause) outcome.tryFailure(new LockException(LockErr.Unspecified, cause)) } } val scheduledTimeout = schedule(timeout) { outcome.tryFailure(new LockException(LockErr.Timeout)) } outcome.future.onComplete { _ => scheduledTimeout.cancel() } repeatWhileDiscarded(maxRetries) outcome.future } private def sendReceive[Request <: MonolockRequest, Response <: MonolockResponse : ClassTag, Result] (request: Request)(f: PartialFunction[Response, MonolockResponse.Error]) (g: PartialFunction[Response, Result]) : Future[Result] = { val p = Promise[Result]() try { resources.pipeline.send(request).andThen { case Failure(RequestRejectedException(_, cause)) => logger.debug("request was rejected", cause) p.tryFailure(new LockException(LockErr.RejectedByService, cause)) case Failure(cause) => logger.debug("connection error occurred", cause) p.tryFailure(new LockException(LockErr.ServiceUnavailable, cause)) }.mapTo[Response].map[Unit] { case response if f.isDefinedAt(response) => p.tryFailure(toLockException(f(response))) case response => p.trySuccess(g(response)) }.andThen { case Failure(cause) if !p.isCompleted => logger.error("response processing error", cause) p.tryFailure(new LockException(LockErr.Unspecified, cause)) } } catch { case NonFatal(cause) => p.tryFailure(cause) } p.future } private class DiscardedException extends LockException(LockErr.ServiceUnavailable) private def toLockException(error: MonolockResponse.Error): LockException = error match { case MonolockResponse.IncorrectPasscode => new LockException(LockErr.IncorrectPasscode) case MonolockResponse.LockedOut => new LockException(LockErr.Lockout) case MonolockResponse.Rejected => new LockException(LockErr.RejectedByService) case MonolockResponse.Discarded => new DiscardedException } }
lockstep-one/vault
vault-client/src/main/scala/one/lockstep/monolock/client/MonolockManager.scala
Scala
agpl-3.0
10,675
/* * Copyright 2015-2017 Marconi Lanna * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ case class REPLesent( width: Int = 0 , height: Int = 0 , source: String = "REPLesent.txt" , slideCounter: Boolean = false , slideTotal: Boolean = false , padNewline: Boolean = false , intp: scala.tools.nsc.interpreter.IMain = null ) { import java.io.File import scala.util.matching.Regex import scala.util.{ Try, Success, Failure } private case class Config( top: String = "*" , bottom: String = "*" , sinistral: String = "* " , dextral: String = " *" , newline: String = System.lineSeparator , whiteSpace: String = " " , private val width: Int , private val height: Int ) { val (screenWidth, screenHeight): (Int, Int) = { val defaultWidth = 80 val defaultHeight = 25 if (width > 0 && height > 0) (width, height) else { // Experimental support for screen size auto-detection. // Supports only Unix-like systems, including Mac OS X and Linux. // Does not work with Microsoft Windows. val Array(h, w) = Try { import scala.sys.process._ val stty = Seq("sh", "-c", "stty size < /dev/tty").!! stty.trim.split(' ') map (_.toInt) } getOrElse Array(0, 0) val screenWidth = Seq(width, w) find (_ > 0) getOrElse defaultWidth val screenHeight = Seq(height, h - (if (padNewline) 1 else 0)) find (_ > 0) getOrElse defaultHeight (screenWidth, screenHeight) } } private def fill(s: String): String = if (s.isEmpty) s else { val t = s * (screenWidth / s.length) t + s.take(screenWidth - t.length) } val topRow = fill(top) + newline val bottomRow = fill(bottom) val verticalSpace = screenHeight - 3 // accounts for header, footer, and REPL prompt val horizontalSpace = screenWidth - sinistral.length - dextral.length val blankLine = { val padding = if (dextral.isEmpty) "" else whiteSpace * horizontalSpace + dextral sinistral + padding + newline } } private val config = Config(width = width, height = height) private case class Line(content: String, length: Int, private val style: Line.Style) { override def toString: String = content def isEmpty: Boolean = content.isEmpty def render(margin: Int): String = style(this, margin) } private object Line { import scala.io.AnsiColor._ protected sealed trait Style { import config.whiteSpace protected def horizontalSpace = config.horizontalSpace protected def fill(line: Line, left: Int, right: Int): String = { whiteSpace * left + line + whiteSpace * right } def apply(line: Line, margin: Int): String } private object HorizontalRuler extends Style { private val ansiBegin = RESET.head private val ansiEnd = RESET.last private val defaultPattern = Line("-") def apply(line: Line, margin: Int): String = { // Provides a default pattern if none was specified val pattern = if (line.isEmpty) defaultPattern else line val width = horizontalSpace - margin val repeats = width / pattern.length val content = pattern.toString * repeats var remaining = width - repeats * pattern.length var ansi = false var reset = "" val padding = pattern.toString takeWhile { c => val continue = remaining > 0 if (continue) c match { case `ansiEnd` if ansi => ansi = false case _ if ansi => // no-op case `ansiBegin` => ansi = true; reset = RESET case c if Character.isHighSurrogate(c) => // no-op case _ => remaining -= 1 } continue } val left = margin / 2 val right = margin - left val l = Line(content + padding + reset, width, LeftAligned) fill(l, left, right) } } private object FullScreenHorizontalRuler extends Style { def apply(line: Line, ignored: Int): String = HorizontalRuler(line, 0) } private object LeftFlushed extends Style { def apply(line: Line, ignored: Int): String = { val left = 0 val right = horizontalSpace - line.length fill(line, left, right) } } private object LeftAligned extends Style { def apply(line: Line, margin: Int): String = { val left = margin / 2 val right = horizontalSpace - left - line.length fill(line, left, right) } } private object Centered extends Style { def apply(line: Line, ignored: Int): String = { val margin = horizontalSpace - line.length val left = margin / 2 val right = margin - left fill(line, left, right) } } private object RightAligned extends Style { def apply(line: Line, margin: Int): String = { val right = (margin + 1) / 2 val left = horizontalSpace - right - line.length fill(line, left, right) } } private object RightFlushed extends Style { def apply(line: Line, ignored: Int): String = { val left = horizontalSpace - line.length val right = 0 fill(line, left, right) } } private def style(line: String): (String, Style) = line match { case s if s startsWith "<< " => (s.drop(3), LeftFlushed) case s if s startsWith "< " => (s.drop(2), LeftAligned) case s if s startsWith "| " => (s.drop(2), Centered) case s if s startsWith "> " => (s.drop(2), RightAligned) case s if s startsWith ">> " => (s.drop(3), RightFlushed) case s if s startsWith "//" => (s.drop(2), FullScreenHorizontalRuler) case s if s startsWith "/" => (s.drop(1), HorizontalRuler) case s: String => (s, LeftAligned) } private val ansiEscape = """\\\\.""".r private val ansiColor = Map( 'b' -> BLUE, 'c' -> CYAN, 'g' -> GREEN, 'k' -> BLACK, 'm' -> MAGENTA, 'r' -> RED, 'w' -> WHITE, 'y' -> YELLOW, 'B' -> BLUE_B, 'C' -> CYAN_B, 'G' -> GREEN_B, 'K' -> BLACK_B, 'M' -> MAGENTA_B, 'R' -> RED_B, 'W' -> WHITE_B, 'Y' -> YELLOW_B, '!' -> REVERSED, '*' -> BOLD, '_' -> UNDERLINED ) private def ansi(line: String): (String, Int) = { var drop = 0 var reset = "" val content: String = ansiEscape.replaceAllIn(line, m => m.matched(1) match { case c if ansiColor.contains(c) => drop += 2; reset = RESET; ansiColor(c) case 's' => drop += 2; RESET case '\\\\' => drop += 1; "\\\\\\\\" case c: Char => "\\\\\\\\" + c } ) (content + reset, drop) } private val emojiEscape = """:([\\w+\\-]+):""".r private lazy val emojis: Map[String, String] = { Try { val emoji = scala.io.Source.fromFile("emoji.txt").getLines emoji.map { l => val a = l.split(' ') (a(1), a(0)) }.toMap } getOrElse Map.empty } private def emoji(line: String): (String, Int) = { var drop = 0 val content: String = emojiEscape.replaceAllIn(line, m => { m.group(1) match { case e if emojis.contains(e) => drop += m.matched.length - 1; emojis(e) case _ => m.matched } }) (content, drop) } def apply(line: String): Line = { val (l1, lineStyle) = style(line) val (l2, ansiDrop) = ansi(l1) val (content, emojiDrop) = emoji(l2) val length = l1.codePointCount(0, l1.length) - ansiDrop - emojiDrop Line(content = content, length = length, style = lineStyle) } } // `size` and `maxLength` refer to the dimensions of the slide's last build private case class Build(content: IndexedSeq[Line], size: Int, maxLength: Int, footer: Line) private case class Slide(content: IndexedSeq[Line], builds: IndexedSeq[Int], code: IndexedSeq[String]) { private val maxLength = content.maxBy(_.length).length def lastBuild: Int = builds.size - 1 def hasBuild(n: Int): Boolean = builds.isDefinedAt(n) def build(n: Int, footer: Line): Build = Build(content.take(builds(n)), content.size, maxLength, footer) } private case class Deck(slides: IndexedSeq[Slide]) { private var slideCursor = -1 private var buildCursor = 0 private def currentSlideIsDefined: Boolean = slides.isDefinedAt(slideCursor) private def currentSlide: Slide = slides(slideCursor) private def footer: Line = { val sb = StringBuilder.newBuilder if (slideCounter) { sb ++= ">> " + (slideCursor + 1) if (slideTotal) sb ++= "/" + slides.size sb ++= " " } Line(sb.mkString) } private def select(slide: Int = slideCursor, build: Int = 0): Option[Build] = { // "Stops" the cursor one position after/before the last/first slide to avoid // multiple next/previous calls taking it indefinitely away from the deck slideCursor = slide.min(slides.size).max(-1) buildCursor = build if (currentSlideIsDefined && currentSlide.hasBuild(buildCursor)) { Some(currentSlide.build(buildCursor, footer)) } else None } def jumpTo(n: Int): Option[Build] = select(slide = n) def jump(n: Int): Option[Build] = jumpTo(slideCursor + n) def nextBuild: Option[Build] = select(build = buildCursor + 1) orElse jump(1) def redrawBuild: Option[Build] = select(build = buildCursor) def previousBuild: Option[Build] = select(build = buildCursor - 1) orElse { jump(-1) flatMap { _ => select(build = currentSlide.lastBuild) } } def lastSlide: Option[Build] = jumpTo(slides.size - 1) def lastBuild: Option[Build] = jumpTo(slides.size) orElse previousBuild def currentSlideNumber: Int = slideCursor def runCode: Unit = { val code = currentSlide.code(buildCursor) if (repl.isEmpty) { Console.err.print(s"No reference to REPL found. Please call with parameter intp=$$intp") } else if (code.isEmpty) { Console.err.print("No code for you") } else { repl foreach (_.interpret(code)) } } } private val helpMessage = """Usage: | next n > go to next build/slide | previous p < go back to previous build/slide | redraw z redraw the current build/slide | Next N >> go to next slide | Previous P << go back to previous slide | i next i n advance i slides | i previous i p go back i slides | i go i g go to slide i | first f |< go to first slide | last l >| go to last slide | Last L >>| go to last build of last slide | run r !! execute code that appears on slide | blank b blank screen | help h ? print this help message""".stripMargin private val repl = Option(intp) private var deck = Deck(parseSource(source)) private def parseSource(path: String): IndexedSeq[Slide] = { Try { val pathFile = new File(path) val lines: Iterator[String] = ( if (pathFile.isDirectory) { pathFile .list .sorted .filter(_.endsWith(".replesent")) .flatMap { name => scala.io.Source.fromFile(new File(pathFile, name)).getLines } .toIterator } else { scala.io.Source.fromFile(path).getLines } ) parse(lines) } match { case Failure(e) => e.printStackTrace Console.err.print(s"Sorry, could not parse $path. Quick, say something funny before anyone notices!") IndexedSeq.empty case Success(value) => value } } private def parse(lines: Iterator[String]): IndexedSeq[Slide] = { sealed trait LineHandler { def switch: LineHandler def apply(line: String): (Line, Option[String]) } object LineHandler extends LineHandler { def switch: LineHandler = CodeHandler def apply(line: String): (Line, Option[String]) = (Line(line), None) } object CodeHandler extends LineHandler { private val patterns: Seq[(String, Regex)] = { val number: Regex = { val hex = "(?:0[xX][0-9A-Fa-f]+)" val decimal = "(?:[1-9][0-9]*|0)" val long = s"(?:${decimal}[DFLdfl])" val float = s"(?:${decimal}\\\\.${decimal}[DFdf])" val eNotation = s"(?:${decimal}(?:\\\\.0?${decimal})?[eE][+\\\\-]?[0-9]+)" s"""\\\\b(?:${eNotation}|${hex}|${long}|${float}|${decimal})\\\\b""".r } val string: Regex = "(?:s?\\"(?:\\\\\\\\\\"|[^\\"])*\\")".r val reserved: Regex = ( s"""\\\\b(?:null|contains|exists|filter|filterNot|find|flatMap|""" + s"""flatten|fold|forall|foreach|getOrElse|map|orElse)\\\\b""" ).r val special: Regex = s"""\\\\b(?:true|false|this)\\\\b""".r val typeSig: Regex = { val token: String => String = { limit => s"[$$_]${limit}[A-Z][_$$A-Z0-9]${limit}[\\\\w$$]${limit}" } val prefix: String = s"""(?<=(?::)\\\\s{0,10}|\\\\btype ${token("{0,10}")}\\\\s{0,10}=\\\\s{0,10})""" s"""\\\\b(?:${prefix}(?:${token("*")}|\\\\s*=>\\\\s*|\\\\s*with\\\\s*)*)\\\\b""" }.r val syntax: Regex = ( s"""\\\\b(?:abstract|case|catch|class|def|do|else|extends|final|""" + s"""finally|for|forSome|if|implicit|import|lazy|match|new|""" + s"""object|override|package|private|protected|return|sealed|""" + s"""super|throw|trait|try|type|val|var|while|with|yield)\\\\b""" ).r Seq[(String, Regex)]( "r" -> string , "c" -> reserved , "m" -> special , "g" -> typeSig , "r" -> number , "y" -> syntax ) } def switch: LineHandler = LineHandler def apply(line: String): (Line, Option[String]) = { val (colors, regexes) = patterns.unzip // new Regex("(?:(a)|(b)|(c))") will produce // m.subgroups List(null, "b", null) when applied on "b" val regex = new Regex(s"(?:(${regexes.mkString(")|(")}))") val formatted = regex.replaceAllIn(line, { m => val colorIdx = m.subgroups.indexWhere(_ != null) colors.drop(colorIdx).take(1).headOption .map({ color => s"\\\\\\\\${color}${Regex.quoteReplacement(m.toString)}\\\\\\\\s" }) .getOrElse(line) }) (Line("< " + formatted), Option(line)) } } case class Acc( content: IndexedSeq[Line] = IndexedSeq.empty , builds: IndexedSeq[Int] = IndexedSeq.empty , deck: IndexedSeq[Slide] = IndexedSeq.empty , code: IndexedSeq[String] = IndexedSeq.empty , codeAcc: IndexedSeq[String] = IndexedSeq.empty , handler: LineHandler = LineHandler ) { import config.newline def switchHandler: Acc = copy(handler = handler.switch) def append(line: String): Acc = { val (l, c) = handler(line) copy(content = content :+ l, codeAcc = c.fold(codeAcc)(codeAcc :+ _)) } def pushBuild: Acc = copy( builds = builds :+ content.size , code = code :+ codeAcc.mkString(newline) , codeAcc = IndexedSeq.empty ) def pushSlide: Acc = { if (content.isEmpty) { append("").pushSlide } else { val finalBuild = pushBuild val slide = Slide(content, finalBuild.builds, finalBuild.code) Acc(deck = deck :+ slide) } } } val slideSeparator = "---" val buildSeparator = "--" val codeDelimiter = "```" val acc = lines.foldLeft(Acc()) { (acc, line) => line match { case `slideSeparator` => acc.pushSlide case `buildSeparator` => acc.pushBuild case `codeDelimiter` => acc.switchHandler case _ => acc.append(line) } }.pushSlide acc.deck } private def render(build: Build): String = { import config._ val topPadding = (verticalSpace - build.size) / 2 val bottomPadding = verticalSpace - topPadding - build.content.size val margin = horizontalSpace - build.maxLength val sb = StringBuilder.newBuilder def render(line: Line): StringBuilder = { sb ++= sinistral sb ++= line.render(margin) sb ++= dextral sb ++= newline } sb ++= topRow sb ++= blankLine * topPadding build.content foreach render if (slideCounter && bottomPadding > 0) { sb ++= blankLine * (bottomPadding - 1) render(build.footer) } else { sb ++= blankLine * bottomPadding } sb ++= bottomRow sb.mkString } private def show(build: Option[Build]): Unit = { if (build.isEmpty) Console.err.print("No slide for you") build foreach { b => print(render(b)) } if (padNewline) print("\\n\\n\\u001b[2A") // Create a space for if the user enters "n\\n", to keep the screen from jumping } private def reloadDeck(): Unit = { val curSlide = deck.currentSlideNumber deck = Deck(parseSource(source)) show(deck.jumpTo(curSlide)) } implicit class Ops(val i: Int) { def next: Unit = show(deck.jump(i)) def n: Unit = next def previous: Unit = show(deck.jump(-i)) def p: Unit = previous def go: Unit = show(deck.jumpTo(i - 1)) def g: Unit = go } def next: Unit = show(deck.nextBuild) def n: Unit = next def > : Unit = next def previous: Unit = show(deck.previousBuild) def p: Unit = previous def < : Unit = previous def redraw: Unit = show(deck.redrawBuild) def z: Unit = redraw def reload: Unit = reloadDeck def y: Unit = reload def Next: Unit = 1.next def N: Unit = Next def >> : Unit = Next def Previous: Unit = 1.previous def P: Unit = Previous def << : Unit = Previous def first: Unit = 1.go def f: Unit = first def |< : Unit = first def last: Unit = show(deck.lastSlide) def l: Unit = last def >| : Unit = last def Last: Unit = show(deck.lastBuild) def L: Unit = Last def >>| : Unit = Last def run: Unit = deck.runCode def r: Unit = run def !! : Unit = run def blank: Unit = print(config.newline * config.screenHeight) def b: Unit = blank def help: Unit = print(helpMessage) def h: Unit = help def ? : Unit = help }
marconilanna/REPLesent
REPLesent.scala
Scala
apache-2.0
18,977
package xyz.hyperreal package object sprolog { val DOT = Symbol( "." ) val NIL = Symbol( "[]" ) type Predicate = WAMInterface => Boolean case class Indicator( functor: Symbol, arity: Int ) extends Ordered[Indicator] { def compare( that: Indicator ) = if (functor.name < that.functor.name) -1 else if (functor.name > that.functor.name) 1 else arity - that.arity override def toString = functor.name + "/" + arity } def indicator( t: AST ) = t match { case s: StructureAST => Indicator( s.f, s.arity ) case AtomAST( a ) => Indicator( a, 0 ) } def atom( t: AST ) = t.isInstanceOf[AtomAST] def asAtom( t: AST ) = t.asInstanceOf[AtomAST] def atomic( t: AST ) = t match { case _: NumberAST | _: AtomAST | _: StringAST | _: ConstantAST => true case _ => false } def constant( t: AST ): Any = t match { case NumberAST( n ) => n case AtomAST( a ) => a case StringAST( s ) => s case ConstantAST( c ) => c case _ => sys.error( t + " not a non-compound constant" ) } def compound( t: AST ) = t.isInstanceOf[StructureAST] def variable( t: AST ) = t.isInstanceOf[Addr] def isList( a: AST ): Boolean = a match { case AtomAST( NIL ) => true case StructureAST( DOT, Seq(head, tail) ) if isList( tail ) => true case _ => false } def toList( l: AST ): List[AST] = l match { case AtomAST( NIL ) => Nil case StructureAST( DOT, Seq(head, tail) ) => head :: toList( tail ) } def isVarList( a: AST ): Boolean = a match { case _: Addr => true case StructureAST( DOT, Seq(head, tail) ) if isVarList( tail ) => true case _ => false } def toVarList( l: AST ): List[AST] = l match { case a: Addr => List( a ) case StructureAST( DOT, Seq(head, tail) ) => head :: toVarList( tail ) } def fromList( l: List[AST] ): AST = l match { case Nil => AtomAST( NIL ) case head :: tail => StructureAST( DOT, IndexedSeq(head, fromList(tail)) ) } }
edadma/sprolog
src/main/scala/sprolog.scala
Scala
mit
1,997
package com.codacy.client.stash.service import com.codacy.client.stash.User import com.codacy.client.stash.client.{PageRequest, Request, RequestResponse, StashClient} class AdminServices(client: StashClient) { val BASE: String = "/rest/api/1.0/admin" /** * Retrieves a list of users that are members of a specified group. * * The authenticated user must have the LICENSED_USER permission to call this resource. */ def findUsersInGroup( context: String, filter: Option[String], pageRequest: Option[PageRequest] ): RequestResponse[Seq[User]] = { val baseParameters = Map("context" -> context) val parameters = filter.fold(baseParameters) { filter => baseParameters + ("filter" -> filter) } pageRequest match { case Some(pageRequest) => client.executePaginatedWithPageRequest( Request(s"$BASE/groups/more-members", classOf[Seq[User]]), pageRequest = pageRequest )(params = parameters) case None => client.executePaginated(Request(s"$BASE/groups/more-members", classOf[Seq[User]]))(params = parameters) } } }
codacy/stash-scala-client
src/main/scala/com/codacy/client/stash/service/AdminServices.scala
Scala
apache-2.0
1,137
/* * Licensed to Cloudera, Inc. under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Cloudera, Inc. licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cloudera.hue.livy import org.json4s.JField package object repl { type MimeTypeMap = List[JField] val APPLICATION_JSON = "application/json" val APPLICATION_LIVY_TABLE_JSON = "application/vnd.livy.table.v1+json" val IMAGE_PNG = "image/png" val TEXT_PLAIN = "text/plain" }
GitHublong/hue
apps/spark/java/livy-repl/src/main/scala/com/cloudera/hue/livy/repl/package.scala
Scala
apache-2.0
1,087
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler import java.util.Properties import scala.collection.mutable.Map import org.apache.spark._ import org.apache.spark.rdd.RDD import org.apache.spark.executor.TaskMetrics /** * Types of events that can be handled by the DAGScheduler. The DAGScheduler uses an event queue * architecture where any thread can post an event (e.g. a task finishing or a new job being * submitted) but there is a single "logic" thread that reads these events and takes decisions. * This greatly simplifies synchronization. */ private[scheduler] sealed trait DAGSchedulerEvent private[scheduler] case class JobSubmitted( jobId: Int, finalRDD: RDD[_], func: (TaskContext, Iterator[_]) => _, partitions: Array[Int], allowLocal: Boolean, callSite: String, listener: JobListener, properties: Properties = null) extends DAGSchedulerEvent private[scheduler] case class JobCancelled(jobId: Int) extends DAGSchedulerEvent private[scheduler] case class JobGroupCancelled(groupId: String) extends DAGSchedulerEvent private[scheduler] case object AllJobsCancelled extends DAGSchedulerEvent private[scheduler] case class BeginEvent(task: Task[_], taskInfo: TaskInfo) extends DAGSchedulerEvent private[scheduler] case class GettingResultEvent(task: Task[_], taskInfo: TaskInfo) extends DAGSchedulerEvent private[scheduler] case class CompletionEvent( task: Task[_], reason: TaskEndReason, result: Any, accumUpdates: Map[Long, Any], taskInfo: TaskInfo, taskMetrics: TaskMetrics) extends DAGSchedulerEvent private[scheduler] case class ExecutorGained(execId: String, host: String) extends DAGSchedulerEvent private[scheduler] case class ExecutorLost(execId: String) extends DAGSchedulerEvent private[scheduler] case class TaskSetFailed(taskSet: TaskSet, reason: String) extends DAGSchedulerEvent private[scheduler] case object StopDAGScheduler extends DAGSchedulerEvent
windeye/spark
core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerEvent.scala
Scala
apache-2.0
2,746
package rxtxio import akka.actor.ActorSystem import akka.io._ import akka.testkit._ import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.ShouldMatchers import org.scalatest.FunSuiteLike import Serial._ class SerialManagerSpec extends TestKit(ActorSystem("SerialManagerSpec")) with FunSuiteLike with BeforeAndAfterAll with ShouldMatchers with ImplicitSender { override def afterAll = system.shutdown test("list ports") { IO(Serial) ! ListPorts val Ports(ports) = expectMsgType[Ports] println("Found serial ports: " + ports.mkString(", ")) } }
msiegenthaler/rxtx-akka-io
src/test/scala/rxtxio/SerialManagerSpec.scala
Scala
apache-2.0
586
package com.tribbloids.spookystuff.execution import com.tribbloids.spookystuff.actions._ import com.tribbloids.spookystuff.dsl.GenPartitioner import com.tribbloids.spookystuff.row.{DataRow, SquashedFetchedRDD, SquashedFetchedRow} import org.apache.spark.rdd.RDD /** * Created by peng on 27/03/16. */ case class FetchPlan( override val child: ExecutionPlan, traces: Set[Trace], keyBy: Trace => Any, genPartitioner: GenPartitioner ) extends UnaryPlan(child) with InjectBeaconRDDPlan { override def doExecute(): SquashedFetchedRDD = { val trace_DataRowRDD: RDD[(TraceView, DataRow)] = child .rdd() .flatMap { _.interpolateAndRewriteLocally(traces) } .map { case (k, v) => k.keyBy(keyBy) -> v } val grouped = gpImpl.groupByKey(trace_DataRowRDD, beaconRDDOpt) grouped .map { tuple => SquashedFetchedRow(tuple._2.toArray, tuple._1) // actual fetch can only be triggered by extract or savePages } } }
tribbloid/spookystuff
core/src/main/scala/com/tribbloids/spookystuff/execution/FetchPlan.scala
Scala
apache-2.0
1,016
package com.hamrah.akka.persistence package object rocksdb { }
mhamrah/akka-persistence-rocksdb
src/package.scala
Scala
apache-2.0
65
/* * Copyright ASCII Soup (Nils Luxton) (c) 2016. * * GNU GPL v3 - See LICENSE.txt for details */ package com.asciisoup.advent.DayTwo import scala.io.Source object Puzzle extends App { val input = Source.fromInputStream(getClass.getResourceAsStream("/day_two.txt")) val keypad = Keypad(Grid.fromDimensions(Dimensions(3, 3)), "5") for (line <- input.getLines()) { for (s <- line.toList) { keypad.move(Direction.withName(s.toString.toUpperCase())) } print(keypad.current) } input.close() }
ascii-soup/AdventOfCode2016
src/main/scala/com/asciisoup/advent/DayTwo/Puzzle.scala
Scala
gpl-3.0
525
package scala.scalanative package compiler import scala.collection.mutable import nir._ trait Pass extends (Seq[Defn] => Seq[Defn]) { type OnAssembly = PartialFunction[Seq[Defn], Seq[Defn]] type OnDefn = PartialFunction[Defn, Seq[Defn]] type OnBlock = PartialFunction[Block, Seq[Block]] type OnInst = PartialFunction[Inst, Seq[Inst]] type OnCf = PartialFunction[Cf, Cf] type OnNext = PartialFunction[Next, Next] type OnVal = PartialFunction[Val, Val] type OnType = PartialFunction[Type, Type] def preAssembly: OnAssembly = null def postAssembly: OnAssembly = null def preDefn: OnDefn = null def postDefn: OnDefn = null def preBlock: OnBlock = null def postBlock: OnBlock = null def preInst: OnInst = null def postInst: OnInst = null def preCf: OnCf = null def postCf: OnCf = null def preNext: OnNext = null def postNext: OnNext = null def preVal: OnVal = null def postVal: OnVal = null def preType: OnType = null def postType: OnType = null @inline private def hook[A, B](pf: PartialFunction[A, B], arg: A, default: B): B = if (pf == null) default else pf.applyOrElse(arg, (_: A) => default) private def txAssembly(assembly: Seq[Defn]): Seq[Defn] = { val pre = hook(preAssembly, assembly, assembly) val post = pre.flatMap { defn => txDefn(defn) } hook(postAssembly, post, post) } private def txDefn(defn: Defn): Seq[Defn] = { val pres = hook(preDefn, defn, Seq(defn)) pres.flatMap { pre => val post = pre match { case defn @ Defn.Var(_, _, ty, value) => defn.copy(ty = txType(ty), value = txVal(value)) case defn @ Defn.Const(_, _, ty, value) => defn.copy(ty = txType(ty), value = txVal(value)) case defn @ Defn.Declare(_, _, ty) => defn.copy(ty = txType(ty)) case defn @ Defn.Define(_, _, ty, blocks) => defn.copy(ty = txType(ty), blocks = blocks.flatMap(txBlock)) case defn @ Defn.Struct(_, _, tys) => defn.copy(tys = tys.map(txType)) case defn @ Defn.Trait(_, _, _) => defn case defn @ Defn.Class(_, _, _, _) => defn case defn @ Defn.Module(_, _, _, _) => defn } hook(postDefn, post, Seq(post)) } } private def txBlock(block: Block): Seq[Block] = { val pres = hook(preBlock, block, Seq(block)) pres.flatMap { pre => val newparams = pre.params.map { param => Val.Local(param.name, txType(param.ty)) } val newinsts = pre.insts.flatMap(txInst) val newcf = txCf(pre.cf) val post = Block(pre.name, newparams, newinsts, newcf) hook(postBlock, post, Seq(post)) } } private def txInst(inst: Inst): Seq[Inst] = { val pres = hook(preInst, inst, Seq(inst)) pres.flatMap { pre => val newop = pre.op match { case Op.Call(ty, ptrv, argvs) => Op.Call(txType(ty), txVal(ptrv), argvs.map(txVal)) case Op.Load(ty, ptrv) => Op.Load(txType(ty), txVal(ptrv)) case Op.Store(ty, ptrv, v) => Op.Store(txType(ty), txVal(ptrv), txVal(v)) case Op.Elem(ty, ptrv, indexvs) => Op.Elem(txType(ty), txVal(ptrv), indexvs.map(txVal)) case Op.Extract(aggrv, indexvs) => Op.Extract(txVal(aggrv), indexvs) case Op.Insert(aggrv, v, indexvs) => Op.Insert(txVal(aggrv), txVal(v), indexvs) case Op.Stackalloc(ty, v) => Op.Stackalloc(txType(ty), txVal(v)) case Op.Bin(bin, ty, lv, rv) => Op.Bin(bin, txType(ty), txVal(lv), txVal(rv)) case Op.Comp(comp, ty, lv, rv) => Op.Comp(comp, txType(ty), txVal(lv), txVal(rv)) case Op.Conv(conv, ty, v) => Op.Conv(conv, txType(ty), txVal(v)) case Op.Select(v1, v2, v3) => Op.Select(txVal(v1), txVal(v2), txVal(v3)) case Op.Classalloc(n) => Op.Classalloc(n) case Op.Field(ty, v, n) => Op.Field(txType(ty), txVal(v), n) case Op.Method(ty, v, n) => Op.Method(txType(ty), txVal(v), n) case Op.Module(n) => Op.Module(n) case Op.As(ty, v) => Op.As(txType(ty), txVal(v)) case Op.Is(ty, v) => Op.Is(txType(ty), txVal(v)) case Op.Copy(v) => Op.Copy(txVal(v)) case Op.Sizeof(ty) => Op.Sizeof(txType(ty)) case Op.Closure(ty, fun, captures) => Op.Closure(txType(ty), txVal(fun), captures.map(txVal)) } val post = Inst(pre.name, newop) hook(postInst, post, Seq(post)) } } private def txCf(cf: Cf): Cf = { val pre = hook(preCf, cf, cf) val post = pre match { case Cf.Unreachable => Cf.Unreachable case Cf.Ret(v) => Cf.Ret(txVal(v)) case Cf.Jump(next) => Cf.Jump(txNext(next)) case Cf.If(v, thenp, elsep) => Cf.If(txVal(v), txNext(thenp), txNext(elsep)) case Cf.Switch(v, default, cases) => Cf.Switch(txVal(v), txNext(default), cases.map(txNext)) case Cf.Invoke(ty, ptrv, argvs, succ, fail) => Cf.Invoke(txType(ty), txVal(ptrv), argvs.map(txVal), txNext(succ), txNext(fail)) case Cf.Throw(v) => Cf.Throw(txVal(v)) case Cf.Try(norm, exc) => Cf.Try(txNext(norm), txNext(exc)) } hook(postCf, post, post) } private def txVal(value: Val): Val = { val pre = hook(preVal, value, value) val post = pre match { case Val.Zero(ty) => Val.Zero(txType(ty)) case Val.Undef(ty) => Val.Undef(txType(ty)) case Val.Struct(n, values) => Val.Struct(n, values.map(txVal)) case Val.Array(ty, values) => Val.Array(txType(ty), values.map(txVal)) case Val.Local(n, ty) => Val.Local(n, txType(ty)) case Val.Global(n, ty) => Val.Global(n, txType(ty)) case Val.Const(v) => Val.Const(txVal(v)) case _ => pre } hook(postVal, post, post) } private def txType(ty: Type): Type = { val pre = hook(preType, ty, ty) val post = pre match { case Type.Array(ty, n) => Type.Array(txType(ty), n) case Type.Function(tys, ty) => Type.Function(tys.map(txType), txType(ty)) case Type.Struct(n, tys) => Type.Struct(n, tys.map(txType)) case _ => pre } hook(postType, post, post) } private def txNext(next: Next): Next = { val pre = hook(preNext, next, next) val post = pre match { case succ: Next.Succ => succ case fail: Next.Fail => fail case Next.Label(n, args) => Next.Label(n, args.map(txVal)) case Next.Case(v, n) => Next.Case(txVal(v), n) } hook(postNext, post, post) } final def apply(assembly: Seq[Defn]): Seq[Defn] = txAssembly(assembly) final def apply(defn: Defn): Seq[Defn] = txDefn(defn) final def apply(block: Block): Seq[Block] = txBlock(block) final def apply(inst: Inst): Seq[Inst] = txInst(inst) final def apply(cf: Cf): Cf = txCf(cf) final def apply(next: Next): Next = txNext(next) final def apply(value: Val): Val = txVal(value) final def apply(ty: Type): Type = txType(ty) } trait PassCompanion { def apply(ctx: Ctx): Pass def depends: Seq[Global] = Seq() def injects: Seq[Defn] = Seq() }
phdoerfler/scala-native
tools/src/main/scala/scala/scalanative/compiler/Pass.scala
Scala
bsd-3-clause
7,695
package org.tejo.iza.rules.test import org.tejo.iza.rules.ClaraQuery /** Base class for instances which contains data * which tests one rule: query is made and result * is (not) expected. * */ abstract class RuleTestData { def facts: List[Any] def queryResultMap: Map[ClaraQuery[_], Any] }
tomaszym/izabela
rules/src/test/scala/org/tejo/iza/rules/test/RuleTestData.scala
Scala
gpl-2.0
306
package glint.matrix import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream} import breeze.linalg.DenseVector import glint.SystemTest import glint.models.client.BigMatrix import org.scalatest.{FlatSpec, Matchers} /** * BigMatrix test specification */ class BigMatrixSpec extends FlatSpec with SystemTest with Matchers { "A BigMatrix" should "store Double values" in withMaster { _ => withServer { _ => withClient { client => val model = client.matrix[Double](49, 6) val result = whenReady(model.push(Array(0L), Array(1), Array(0.54))) { identity } assert(result) val future = model.pull(Array(0L), Array(1)) val value = whenReady(future) { identity } assert(value(0) == 0.54) } } } it should "store Float values" in withMaster { _ => withServer { _ => withClient { client => val model = client.matrix[Float](49, 6, 8) val result = whenReady(model.push(Array(10L, 0L, 48L), Array(0, 1, 5), Array(0.0f, 0.54f, 0.33333f))) { identity } assert(result) val future = model.pull(Array(10L, 0L, 48L), Array(0, 1, 5)) val value = whenReady(future) { identity } value should equal(Array(0.0f, 0.54f, 0.33333f)) } } } it should "store Int values" in withMaster { _ => withServer { _ => withClient { client => val model = client.matrix[Int](23, 10) val result = whenReady(model.push(Array(1L, 5L, 20L), Array(0, 1, 8), Array(0, -1000, 23451234))) { identity } assert(result) val future = model.pull(Array(1L, 5L, 20L), Array(0, 1, 8)) val value = whenReady(future) { identity } value should equal(Array(0, -1000, 23451234)) } } } it should "store Long values" in withMaster { _ => withServers(3) { _ => withClient { client => val model = client.matrix[Long](23, 10) val result = whenReady(model.push(Array(1L, 5L, 20L), Array(0, 8, 1), Array(0L, -789300200100L, 987100200300L))) { identity } assert(result) val future = model.pull(Array(1L, 5L, 20L), Array(0, 8, 1)) val value = whenReady(future) { identity } value should equal(Array(0L, -789300200100L, 987100200300L)) } } } it should "return rows as vectors" in withMaster { _ => withServers(2) { _ => withClient { client => val model = client.matrix[Int](100, 100, 3) val result1 = whenReady(model.push(Array(0L, 20L, 50L, 81L), Array(0, 10, 99, 80), Array(100, 100, 20, 30))) { identity } val value1 = whenReady(model.pull(Array(0L, 20L, 50L, 81L))) { identity } val result2 = whenReady(model.push(Array(0L, 20L, 50L, 81L), Array(0, 10, 99, 80), Array(1, -1, 2, 3))) { identity } val value = whenReady(model.pull(Array(0L, 20L, 50L, 81L))) { identity } val value0 = DenseVector.zeros[Int](100) value0(0) = 101 val value20 = DenseVector.zeros[Int](100) value20(10) = 99 val value50 = DenseVector.zeros[Int](100) value50(99) = 22 val value81 = DenseVector.zeros[Int](100) value81(80) = 33 assert(value(0) == value0) assert(value(1) == value20) assert(value(2) == value50) assert(value(3) == value81) } } } it should "aggregate values through addition" in withMaster { _ => withServers(2) { _ => withClient { client => val model = client.matrix[Int](9, 100) val result1 = whenReady(model.push(Array(0L, 2L, 5L, 8L), Array(0, 10, 99, 80), Array(100, 100, 20, 30))) { identity } val result2 = whenReady(model.push(Array(0L, 2L, 5L, 8L), Array(0, 10, 99, 80), Array(1, -1, 2, 3))) { identity } assert(result1) assert(result2) val future = model.pull(Array(0L, 2L, 5L, 8L), Array(0, 10, 99, 80)) val value = whenReady(future) { identity } value should equal(Array(101, 99, 22, 33)) } } } it should "deserialize without an ActorSystem in scope" in { var ab: Array[Byte] = Array.empty[Byte] withMaster { _ => withServers(2) { _ => withClient { client => val model = client.matrix[Int](9, 10) val bos = new ByteArrayOutputStream val out = new ObjectOutputStream(bos) out.writeObject(model) out.close() ab = bos.toByteArray whenReady(model.push(Array(0L, 7L), Array(1, 2), Array(12, 42))) { identity } val bis = new ByteArrayInputStream(ab) val in = new ObjectInputStream(bis) val matrix = in.readObject().asInstanceOf[BigMatrix[Int]] val result = whenReady(matrix.pull(Array(0L, 7L), Array(1, 2))) { identity } result should equal(Array(12, 42)) } } } } }
rjagerman/glint
src/test/scala/glint/matrix/BigMatrixSpec.scala
Scala
mit
5,164
package org.scaladebugger.api.profiles.java.info import org.scaladebugger.api.lowlevel.events.misc.NoResume import org.scaladebugger.api.profiles.java.JavaDebugProfile import org.scaladebugger.api.profiles.traits.info.ThreadInfo import org.scaladebugger.api.utils.JDITools import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine import org.scaladebugger.test.helpers.ParallelMockFunSpec import test.{ApiTestUtilities, VirtualMachineFixtures} class JavaMethodInfoIntegrationSpec extends ParallelMockFunSpec with VirtualMachineFixtures with ApiTestUtilities { describe("JavaMethodInfo") { it("should be able to get the name of the method") { val testClass = "org.scaladebugger.test.info.Methods" val testFile = JDITools.scalaClassStringToFileString(testClass) @volatile var t: Option[ThreadInfo] = None val s = DummyScalaVirtualMachine.newInstance() // NOTE: Do not resume so we can check the variables at the stack frame s.withProfile(JavaDebugProfile.Name) .getOrCreateBreakpointRequest(testFile, 22, NoResume) .foreach(e => t = Some(e.thread)) withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) => logTimeTaken(eventually { val method = t.get.topFrame.thisObject.method("publicMethod") method.name should be ("publicMethod") }) } } it("should be able to get the parameter types of the method") { val testClass = "org.scaladebugger.test.info.Methods" val testFile = JDITools.scalaClassStringToFileString(testClass) @volatile var t: Option[ThreadInfo] = None val s = DummyScalaVirtualMachine.newInstance() // NOTE: Do not resume so we can check the variables at the stack frame s.withProfile(JavaDebugProfile.Name) .getOrCreateBreakpointRequest(testFile, 22, NoResume) .foreach(e => t = Some(e.thread)) withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) => logTimeTaken(eventually { val method = t.get.topFrame.thisObject.method("publicMethod") method.parameterTypeNames should be (Seq("int", "java.lang.String")) }) } } it("should be able to get the return value of the method") { val testClass = "org.scaladebugger.test.info.Methods" val testFile = JDITools.scalaClassStringToFileString(testClass) @volatile var t: Option[ThreadInfo] = None val s = DummyScalaVirtualMachine.newInstance() // NOTE: Do not resume so we can check the variables at the stack frame s.withProfile(JavaDebugProfile.Name) .getOrCreateBreakpointRequest(testFile, 22, NoResume) .foreach(e => t = Some(e.thread)) withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) => logTimeTaken(eventually { val method = t.get.topFrame.thisObject.method("publicMethod") method.returnTypeName should be ("java.lang.String") }) } } } }
ensime/scala-debugger
scala-debugger-api/src/it/scala/org/scaladebugger/api/profiles/java/info/JavaMethodInfoIntegrationSpec.scala
Scala
apache-2.0
3,023
package views.html import play.templates._ import play.templates.TemplateMagic._ import play.api.templates._ import play.api.templates.PlayMagic._ import models._ import controllers._ import play.api.i18n._ import play.api.mvc._ import play.api.data._ import views.html._ /* addDevice Template File */ object addDevice extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template2[Form[AddDeviceFormData],play.api.mvc.Flash,play.api.templates.HtmlFormat.Appendable] { /* addDevice Template File */ def apply/*2.2*/(addDeviceForm: Form[AddDeviceFormData])(implicit flash: play.api.mvc.Flash):play.api.templates.HtmlFormat.Appendable = { _display_ {import helper._ import helper.twitterBootstrap._ import play.api.i18n.Messages Seq[Any](format.raw/*2.78*/(""" """),format.raw/*7.1*/(""" """),_display_(/*8.2*/main("Add Device")/*8.20*/{_display_(Seq[Any](format.raw/*8.21*/(""" <fieldset> <legend>Add Device. <a href="/removeDevice">Remove Device</a></legend> """),_display_(/*11.4*/flash/*11.9*/.get("addSuccess").map/*11.31*/{ _ =>_display_(Seq[Any](format.raw/*11.37*/(""" <div class="alert alert-success">Device Added Successfully. Add Another.</div> """)))}),format.raw/*13.4*/(""" """),_display_(/*14.4*/form(action = routes.Application.addDevicePost())/*14.53*/{_display_(Seq[Any](format.raw/*14.54*/(""" """),_display_(/*15.5*/inputText( addDeviceForm("simId"), '_label -> "SIM ID", '_showConstraints -> false )),format.raw/*19.5*/(""" """),_display_(/*20.5*/inputText( addDeviceForm("phoneNumber"), '_label -> "PHONE NUMBER", '_showConstraints -> false )),format.raw/*24.5*/(""" <button class="btn btn-primary">Add Device</button> """)))}),format.raw/*26.4*/(""" </fieldset> """),_display_(/*28.3*/addDeviceForm/*28.16*/.globalErrors.map/*28.33*/{ error =>_display_(Seq[Any](format.raw/*28.43*/(""" <div class="alert alert-error">"""),_display_(/*29.35*/Messages(error.message)),format.raw/*29.58*/("""</div> """)))}),format.raw/*30.3*/(""" """)))}),format.raw/*31.2*/(""" """))} } def render(addDeviceForm:Form[AddDeviceFormData],flash:play.api.mvc.Flash): play.api.templates.HtmlFormat.Appendable = apply(addDeviceForm)(flash) def f:((Form[AddDeviceFormData]) => (play.api.mvc.Flash) => play.api.templates.HtmlFormat.Appendable) = (addDeviceForm) => (flash) => apply(addDeviceForm)(flash) def ref: this.type = this } /* -- GENERATED -- DATE: Mon Jun 30 14:48:21 IST 2014 SOURCE: /home/nagarjuna/FooService/app/views/addDevice.scala.html HASH: bd45be268103cf55fbff4715ae44f8e9f8070d92 MATRIX: 646->31|897->107|925->191|952->193|978->211|1016->212|1131->301|1144->306|1175->328|1219->334|1335->420|1365->424|1423->473|1462->474|1493->479|1612->578|1643->583|1774->694|1863->753|1905->769|1927->782|1953->799|2001->809|2063->844|2107->867|2146->876|2178->878 LINES: 19->2|27->2|29->7|30->8|30->8|30->8|33->11|33->11|33->11|33->11|35->13|36->14|36->14|36->14|37->15|41->19|42->20|46->24|48->26|50->28|50->28|50->28|50->28|51->29|51->29|52->30|53->31 -- GENERATED -- */
pamu/FooService
FooService1/target/scala-2.10/src_managed/main/views/html/addDevice.template.scala
Scala
apache-2.0
3,393
/** * Copyright (C) 2011 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.cache import org.scalatest.junit.AssertionsForJUnit import org.junit.Test import java.util.concurrent.locks.{ReentrantLock, Lock} import collection.JavaConversions._ import concurrent.{Await, Future} import concurrent.duration._ import concurrent.ExecutionContext.Implicits.global class MemoryCacheTest extends AssertionsForJUnit { class MyCacheable(val getEvictionLock: Lock) extends Cacheable { var wasEvicted = false var wasRemoved = false def evicted() { wasEvicted = true } def removed() { wasRemoved = true } def added() {} } case class Key(key: String) extends InternalCacheKey("test", key) val VALIDITY = 0L @Test def testFindKeepsInCache() { val cache = new MemoryCacheImpl("test", 1) val o1 = new MyCacheable(null) // Add object val key1 = Key("o1") cache.add(key1, VALIDITY, o1) // Find object val result1 = cache.findValid(key1, VALIDITY) assert(result1 eq o1) assert(!o1.wasEvicted) assert(!o1.wasRemoved) assert(cache.getCurrentSize === 1) } @Test def testTakeRemovesFromCache() { val cache = new MemoryCacheImpl("test", 1) val o1 = new MyCacheable(null) // Add object val key1 = Key("o1") cache.add(key1, VALIDITY, o1) // Find object val result1 = cache.takeValid(key1, VALIDITY) assert(result1 eq o1) assert(!o1.wasEvicted) assert(o1.wasRemoved) assert(cache.getCurrentSize === 0) } @Test def testRemoveNotifies() { val cache = new MemoryCacheImpl("test", 1) val o1 = new MyCacheable(null) // Add object val key1 = Key("o1") cache.add(key1, VALIDITY, o1) // Remove object cache.remove(key1) assert(!o1.wasEvicted) assert(o1.wasRemoved) assert(cache.getCurrentSize === 0) } @Test def testRemoveAllNotifies() { val cache = new MemoryCacheImpl("test", 1) val o1 = new MyCacheable(null) // Add object val key1 = Key("o1") cache.add(key1, VALIDITY, o1) // Remove all cache.removeAll() assert(!o1.wasEvicted) assert(o1.wasRemoved) assert(cache.getCurrentSize === 0) } @Test def testReduceSizeEvicts() { val cache = new MemoryCacheImpl("test", 1) val o1 = new MyCacheable(null) // Add object val key1 = Key("o1") cache.add(key1, VALIDITY, o1) // Remove all cache.setMaxSize(0) assert(o1.wasEvicted) assert(!o1.wasRemoved) assert(cache.getCurrentSize === 0) } @Test def testReduceSizeWithLock() { val cache = new MemoryCacheImpl("test", 1) val lock = new ReentrantLock val o1 = new MyCacheable(lock) // Add object val key1 = Key("o1") cache.add(key1, VALIDITY, o1) // Reduce size in other thread lock.lock() Await.ready(Future(cache.setMaxSize(0)), Duration.Inf) lock.unlock() assert(!o1.wasEvicted) assert(!o1.wasRemoved) assert(cache.getCurrentSize === 1) } @Test def testEvictedIfLockAvailable() { val cache = new MemoryCacheImpl("test", 1) val o1 = new MyCacheable(new ReentrantLock) // Add first object cache.add(Key("o1"), VALIDITY, o1) // Push first object out with second object cache.add(Key("o2"), VALIDITY, new AnyRef) assert(o1.wasEvicted) assert(!o1.wasRemoved) assert(cache.getCurrentSize === 1) } @Test def testNotEvictedIfLockUnavailable() { val cache = new MemoryCacheImpl("test", 1) val lock = new ReentrantLock val o1 = new MyCacheable(lock) // Add first object cache.add(Key("o1"), VALIDITY, o1) // Run in separate thread and wait lock.lock() Await.ready(Future(cache.add(Key("o2"), VALIDITY, new AnyRef)), Duration.Inf) lock.unlock() assert(!o1.wasEvicted) assert(!o1.wasRemoved) assert(cache.getCurrentSize === 2) } @Test def testNextToLastEvicted() { val cache = new MemoryCacheImpl("test", 2) val lock = new ReentrantLock // First object will be last and has a lock. It must not be evicted. val o1 = new MyCacheable(lock) // Second object will be next-to-last and doesn't have a lock. It must be evicted. val o2 = new MyCacheable(null) // Add objects cache.add(Key("o1"), VALIDITY, o1) cache.add(Key("o2"), VALIDITY, o2) // Run in separate thread and wait lock.lock() Await.ready(Future(cache.add(Key("o3"), VALIDITY, new AnyRef)), Duration.Inf) lock.unlock() assert(!o1.wasEvicted) assert(!o1.wasRemoved) assert(o2.wasEvicted) assert(!o2.wasRemoved) assert(cache.getCurrentSize === 2) } @Test def testIterators() { val size = 100 val cache = new MemoryCacheImpl("test", size) val range = 1 to size for (i ← range.reverse) cache.add(Key("o" + i), VALIDITY, i) val keysAsInts = cache.iterateCacheKeys map (_.asInstanceOf[Key].key.tail.toInt) toSeq val values = cache.iterateCacheObjects map (_.asInstanceOf[Int]) toSeq assert(range === keysAsInts) assert(range === values) } }
martinluther/orbeon-forms
src/test/scala/org/orbeon/oxf/cache/MemoryCacheTest.scala
Scala
lgpl-2.1
6,188
package com.lucidchart.open.nark.models import java.sql.Blob import java.sql.Clob import java.util.UUID import org.apache.commons.io.IOUtils import com.lucidchart.open.nark.utils.UUIDHelper import anorm._ object AnormImplicits { /** * Attempt to convert a SQL value into a byte array * * @param value value to convert * @return byte array */ private def valueToByteArrayOption(value: Any): Option[Array[Byte]] = { try { value match { case bytes: Array[Byte] => Some(bytes) case clob: Clob => Some(IOUtils.toByteArray(clob.getAsciiStream())) case blob: Blob => Some(blob.getBytes(1, blob.length.asInstanceOf[Int])) case _ => None } } catch { case e: Exception => None } } /** * Attempt to convert a SQL value into a UUID * * @param value value to convert * @return UUID */ private def valueToUUIDOption(value: Any): Option[UUID] = { try { valueToByteArrayOption(value) match { case Some(bytes) => Some(UUIDHelper.fromByteArray(bytes)) case _ => None } } catch { case e: Exception => None } } /** * Implicit conversion from anorm row to byte array */ implicit def rowToByteArray: Column[Array[Byte]] = { Column.nonNull[Array[Byte]] { (value, meta) => val MetaDataItem(qualified, nullable, clazz) = meta valueToByteArrayOption(value) match { case Some(bytes) => Right(bytes) case _ => Left(TypeDoesNotMatch("Cannot convert " + value + ":" + value.asInstanceOf[AnyRef].getClass + " to Byte Array for column " + qualified)) } } } /** * Implicit converstion from anorm row to uuid */ implicit def rowToUUID: Column[UUID] = { Column.nonNull[UUID] { (value, meta) => val MetaDataItem(qualified, nullable, clazz) = meta valueToUUIDOption(value) match { case Some(uuid) => Right(uuid) case _ => Left(TypeDoesNotMatch("Cannot convert " + value + ":" + value.asInstanceOf[AnyRef].getClass + " to UUID for column " + qualified)) } } } /** * Implicit conversion from UUID to anorm statement value */ implicit def uuidToStatement = new ToStatement[UUID] { def set(s: java.sql.PreparedStatement, index: Int, aValue: UUID): Unit = s.setObject(index, UUIDHelper.toByteArray(aValue)) } /** * Implicit conversion from scala BigDecimal to anorm statement value */ implicit def scalaBigDecimalToStatement = new ToStatement[scala.math.BigDecimal] { def set(s: java.sql.PreparedStatement, index: Int, aValue: scala.math.BigDecimal): Unit = s.setBigDecimal(index, aValue.underlying) } class RichSQL(val query: String, val parameterValues: (Any, ParameterValue[Any])*) { /** * Convert this object into an anorm.SqlQuery */ def toSQL = SQL(query).on(parameterValues: _*) /** * Similar to anorm.SimpleSql.on, but takes lists instead of single values. * Each list is converted into a set of values, and then passed to anorm's * on function when toSQL is called. */ def onList[A](args: (String, Iterable[A])*)(implicit toParameterValue: (A) => ParameterValue[A]) = { val condensed = args.map { case (name, values) => val search = "{" + name + "}" val valueNames = values.zipWithIndex.map { case (value, index) => name + "_" + index } val placeholders = valueNames.map { name => "{" + name + "}" } val replace = placeholders.mkString(",") val converted = values.map { value => toParameterValue(value).asInstanceOf[ParameterValue[Any]] } val parameters = valueNames.zip(converted) (search, replace, parameters) } val newQuery = condensed.foldLeft(query) { case (newQuery, (search, replace, _)) => newQuery.replace(search, replace) } val newValues = parameterValues ++ condensed.map { case (_, _, parameters) => parameters }.flatten new RichSQL(newQuery, newValues: _*) } /** * Helper for inserting multiple elements at the same time. * * Example: * * RichSQL(""" * insert into mytable (a,b,c) values ({fields}) * """).multiInsert(2, Seq("a", "b", "c"), "fields")( * "a" -> List(5, 6), * "b" -> List(new Date(), new Date()), * "c" -> records.map(_.toString) * ) * * @param count # of records being inserted * @param fields Field names, in order, to insert * @param searchName name of the replacement variable * @param args */ def multiInsert(count: Int, fields: Seq[String], searchName: String = "fields")(args: (String, Seq[ParameterValue[_]])*) = { require(count > 0) require(!fields.isEmpty) require(searchName.length() > 0) val search = "{" + searchName + "}" val expandedFields = (for (i <- 0 until count) yield { "{" + fields.map(_ + i).mkString("}, {") + "}" }).mkString("), (") val newQuery = query.replace(search, expandedFields) val newValues = parameterValues ++ args.map { case (argName, argValues) => require(argValues.size == count) (for (i <- 0 until count) yield { (argName + i, argValues(i).asInstanceOf[ParameterValue[Any]]) }) }.flatten new RichSQL(newQuery, newValues: _*) } } object RichSQL { def apply[A](query: String) = new RichSQL(query) } }
lucidsoftware/nark
app/com/lucidchart/open/nark/models/AnormImplicits.scala
Scala
apache-2.0
5,150
package im.actor.server.commons.serialization import akka.serialization._ import com.google.protobuf.{ GeneratedMessage ⇒ GGeneratedMessage, ByteString } import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.{ Builder ⇒ MapBuilder } import com.trueaccord.scalapb.GeneratedMessage import scala.util.{ Failure, Success } object ActorSerializer { private val ARRAY_OF_BYTE_ARRAY = Array[Class[_]](classOf[Array[Byte]]) // FIXME: dynamically increase capacity private val map = new MapBuilder[Int, Class[_]].maximumWeightedCapacity(1024).build() private val reverseMap = new MapBuilder[Class[_], Int].maximumWeightedCapacity(1024).build() def clean(): Unit = { map.clear() reverseMap.clear() } def register(id: Int, clazz: Class[_]): Unit = { get(id) match { case None ⇒ get(clazz) match { case Some(regId) ⇒ throw new IllegalArgumentException(s"There is already a mapping for class: ${clazz}, id: ${regId}") case None ⇒ map.put(id, Class.forName(clazz.getName + '$')) reverseMap.put(clazz, id) } case Some(registered) ⇒ if (!get(clazz).exists(_ == id)) throw new IllegalArgumentException(s"There is already a mapping with id ${id}: ${map.get(id)}") } } def register(items: (Int, Class[_])*): Unit = items foreach { case (id, clazz) ⇒ register(id, clazz) } def get(id: Int): Option[Class[_]] = Option(map.get(id)) def get(clazz: Class[_]) = Option(reverseMap.get(clazz)) def fromBinary(bytes: Array[Byte]): AnyRef = { val SerializedMessage(id, bodyBytes) = SerializedMessage.parseFrom(bytes) ActorSerializer.get(id) match { case Some(clazz) ⇒ val field = clazz.getField("MODULE$").get(null) clazz .getDeclaredMethod("validate", ARRAY_OF_BYTE_ARRAY: _*) .invoke(field, bodyBytes.toByteArray) match { case Success(msg) ⇒ msg.asInstanceOf[GeneratedMessage] case Failure(e) ⇒ throw e } case None ⇒ throw new IllegalArgumentException(s"Can't find mapping for id ${id}") } } def toBinary(o: AnyRef): Array[Byte] = { ActorSerializer.get(o.getClass) match { case Some(id) ⇒ o match { case m: GeneratedMessage ⇒ SerializedMessage(id, ByteString.copyFrom(m.toByteArray)).toByteArray case m: GGeneratedMessage ⇒ SerializedMessage(id, ByteString.copyFrom(m.toByteArray)).toByteArray case _ ⇒ throw new IllegalArgumentException(s"Can't serialize non-scalapb message [${o}]") } case None ⇒ throw new IllegalArgumentException(s"Can't find mapping for message [${o}]") } } } class ActorSerializer extends Serializer { override def identifier: Int = 3456 override def includeManifest: Boolean = false override def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = ActorSerializer.fromBinary(bytes) override def toBinary(o: AnyRef): Array[Byte] = ActorSerializer.toBinary(o) }
winiceo/actor-platform
actor-server/actor-commons-base/src/main/scala/im/actor/server/commons/serialization/ActorSerializer.scala
Scala
mit
3,078
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.tail import cats.laws._ import cats.laws.discipline._ import monix.eval.Task import scala.collection.mutable.ListBuffer object IterantFromSeqSuite extends BaseTestSuite { test("Iterant[Task].fromSeq(vector)") { implicit s => check1 { (list: List[Int]) => val result = Iterant[Task].fromSeq(list.toVector).toListL result <-> Task.now(list) } } test("Iterant[Task].fromSeq(list)") { implicit s => check1 { (list: List[Int]) => val result = Iterant[Task].fromSeq(list).toListL result <-> Task.now(list) } } test("Iterant[Task].fromSeq(iterable)") { implicit s => check1 { (list: List[Int]) => val result = Iterant[Task].fromSeq(ListBuffer(list: _*).toSeq).toListL result <-> Task.now(list) } } }
monifu/monifu
monix-tail/shared/src/test/scala/monix/tail/IterantFromSeqSuite.scala
Scala
apache-2.0
1,458
package rpgboss.editor.uibase import rpgboss.lib._ import rpgboss.model._ import scala.collection.mutable.Buffer import scala.swing._ import scala.swing.event._ import javax.swing.ImageIcon import rpgboss.editor.Internationalized object SwingUtils { def lbl(s: String) = new Label(s) def leftLabel(s: String) = new Label(s) { xAlignment = Alignment.Left } /** * General form that can be used for unusual index positioning. */ def customIdxRenderer[A, B](f: (A, Int) => B) (implicit renderer: ListView.Renderer[B]): ListView.Renderer[A] = new ListView.Renderer[A] { def componentFor( list: ListView[_ <: A], isSelected: Boolean, focused: Boolean, a: A, indexArgument: Int): Component = { // Normalize for case of selected combobox. (Otherwise -1 shown). val index = if (indexArgument < 0) list.selection.indices.head else indexArgument renderer.componentFor( list.asInstanceOf[ListView[_ <: B]], isSelected, focused, f(a, index), index) } } def standardIdxRenderer[A](labelF: A => String) (implicit renderer: ListView.Renderer[String]) = customIdxRenderer((a: A, idx: Int) => StringUtils.standardIdxFormat(idx, labelF(a))) def boolField(text: String, initial: Boolean, onUpdate: Boolean => Unit, additionalAction: Option[() => Unit] = None) = new CheckBox(text) { selected = initial listenTo(this) reactions += { case ButtonClicked(_) => onUpdate(selected) additionalAction.foreach(_.apply()) } } def colorField(initial: ColorSpec, onUpdate: ColorSpec => Unit) = { val initialColor = new Color(initial.r, initial.g, initial.b, initial.a) new ColorChooser(initialColor) { for(p <- peer.getChooserPanels()) { p.getDisplayName() match { case "Swatches" => peer.removeChooserPanel(p) case "HSL" => peer.removeChooserPanel(p) case _ => } } val previewPane = new ImagePanel( Utils.readClasspathImage("inGamePreview.png")) peer.setPreviewPanel(previewPane.peer) previewPane.tintColor = initialColor listenTo(this) reactions += { case ColorChanged(_, newColor) => val components = newColor.getRGBComponents(null) onUpdate(ColorSpec( components(0), components(1), components(2), components(3))) previewPane.tintColor = newColor } } } def textField(initial: String, onUpdate: String => Unit, additionalAction: Option[() => Unit] = None, preferredWidth: Int = 150, skipSizing: Boolean = false) = new TextField { if (!skipSizing) { minimumSize = new Dimension(100, 1) preferredSize = new Dimension(preferredWidth, preferredSize.height) } text = initial listenTo(this) reactions += { case ValueChanged(_) => onUpdate(text) additionalAction.foreach(_.apply()) } } def textAreaField(initial: Array[String], onUpdate: Array[String] => Unit) = { val textEdit = new TextArea(initial.mkString("\n")) { listenTo(this) lineWrap = true wordWrap = true reactions += { case e: ValueChanged => onUpdate(text.split("\n")) } } new ScrollPane { contents = textEdit preferredSize = new Dimension(300, 150) } } def percentField(min: Float, max: Float, initial: Float, onUpdate: Float => Unit) = { val spinner = new NumberSpinner( (min * 100).toInt, (max * 100).toInt, (initial * 100).round, v => onUpdate(v.toFloat / 100)) new BoxPanel(Orientation.Horizontal) { contents += spinner contents += new Label("%") { preferredSize = new Dimension(20, 15) } def value = spinner.getValue.toFloat / 100f def setValue(v: Float) = spinner.setValue((v * 100).round) override def enabled_=(b: Boolean) { super.enabled_=(b) spinner.enabled_=(b) } } } def percentIntField(min: Int, max: Int, initial: Int, onUpdate: Int => Unit, additionalAction: Option[() => Unit] = None) = { val spinner = new NumberSpinner(min, max, initial, onUpdate, additionalAction) new BoxPanel(Orientation.Horizontal) { contents += spinner contents += new Label("%") { preferredSize = new Dimension(20, 15) } def value = spinner.getValue def setValue(v: Int) = spinner.setValue(v) override def enabled_=(b: Boolean) { super.enabled_=(b) spinner.enabled_=(b) } } } def pxField(min: Int, max: Int, initial: Int, onUpdate: Int => Unit) = { val spinner = new NumberSpinner(min, max, initial, onUpdate) new BoxPanel(Orientation.Horizontal) { contents += spinner contents += new Label("px") { preferredSize = new Dimension(20, 15) } def value = spinner.getValue def setValue(v: Float) = spinner.setValue(v.round) override def enabled_=(b: Boolean) { super.enabled_=(b) spinner.enabled_=(b) } } } /** * Accepts any types <: that are 'viewable' i.e. implicitly convertible to * HasName. */ def indexedCombo[T <% HasName]( choices: Seq[T], initial: Int, onUpdate: Int => Unit, additionalAction: Option[() => Unit] = None) = { new ComboBox(choices) { selection.index = initial renderer = standardIdxRenderer(_.name) listenTo(selection) reactions += { case SelectionChanged(_) => onUpdate(selection.index) additionalAction.foreach(_.apply()) } } } def enumIdCombo[T <: Enumeration](enum: T)( initialId: Int, onUpdate: Int => Any, additionalAction: Option[() => Unit] = None, overrideChoiceSet: Option[Seq[enum.Value]] = None, customRenderer: Option[enum.Value => Any] = None) = { val choices = overrideChoiceSet.getOrElse(enum.values.toSeq) new ComboBox(choices) { selection.item = enum(initialId) listenTo(selection) reactions += { case SelectionChanged(_) => onUpdate(selection.item.id) additionalAction.foreach(_.apply()) } if (customRenderer.isDefined) { renderer = ListView.Renderer(customRenderer.get) } else { renderer = ListView.Renderer(v => Internationalized.getMessage(v.toString())) } } } def openEnumSelectDialog[T <: Enumeration](enum: T)( owner: Window, windowTitle: String, onSelect: enum.Value => Any) = { val d = new StdDialog(owner, windowTitle) { // Noop, as there is no okay button def okFunc() = {} contents = new BoxPanel(Orientation.Vertical) { enum.values.foreach { value => contents += new Button(Action( Internationalized.getMessage(value.toString)) { onSelect(value) close() }) } contents += new DesignGridPanel { addCancel(cancelBtn) } } } d.open() } def makeButtonGroup(btns: Seq[AbstractButton]) = { val firstSelected = btns.find(_.selected) val group = new ButtonGroup(btns: _*) firstSelected.map { btn => group.select(btn) } group } def addBtnsAsGrp(contents: Buffer[Component], btns: Seq[AbstractButton]) = { val group = makeButtonGroup(btns) contents ++= btns group } def enumButtons[T <: Enumeration](enum: T)( initial: enum.Value, selectF: enum.Value => Any, iconPaths: List[String]) = { val enumValues = enum.values.toList assert(iconPaths.isEmpty || iconPaths.length == enumValues.length) enumValues.zipWithIndex.map { case (eVal, i) => new ToggleButton() { val buttonString = if (iconPaths.isEmpty) Internationalized.getMessage(eVal.toString) else "" action = Action(buttonString) { selectF(eVal) } if (!iconPaths.isEmpty) { icon = new ImageIcon(Utils.readClasspathImage(iconPaths(i))) } selected = eVal == initial } } } def enumIdRadios[T <: Enumeration](enum: T)( initialId: Int, onUpdate: Int => Any, choices: Seq[T#Value] = Seq(), disabledSet: Set[T#Value] = Set[T#Value]()) = { val actualChoices = if (choices.isEmpty) enum.values.toSeq else choices actualChoices.map { eVal => new RadioButton() { val value = eVal action = Action(Internationalized.getMessage(eVal.toString)) { onUpdate(eVal.id) } enabled = !disabledSet.contains(eVal) selected = enabled && eVal.id == initialId } } } def boolEnumHorizBox( enum: BooleanRpgEnum, initial: Boolean, onUpdate: Boolean => Any) = { val radios = enumIdRadios(enum)( enum.fromBoolean(initial).id, id => onUpdate(enum.toBoolean(id))) new BoxPanel(Orientation.Horizontal) { val group = addBtnsAsGrp(contents, radios) } } def enumVerticalBox( enum: RpgEnum, initial: Int, onUpdate: Int => Any) = { val radios = enumIdRadios(enum)(initial, onUpdate) new BoxPanel(Orientation.Vertical) { val group = addBtnsAsGrp(contents, radios) def updateId(newId: Int) = { radios.find(_.value.id == newId).map(group.select) } } } def showErrorDialog(parent: Component, message: String) = { Dialog.showMessage(parent, message, "Error", Dialog.Message.Error) } }
rpgboss/rpgboss
desktop/src/main/scala/rpgboss/editor/uibase/SwingUtils.scala
Scala
agpl-3.0
9,751
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // scalastyle:off println package org.apache.spark.examples.graphx import scala.collection.mutable import org.apache.spark._ import org.apache.spark.graphx._ import org.apache.spark.graphx.PartitionStrategy._ import org.apache.spark.graphx.lib._ import org.apache.spark.internal.Logging import org.apache.spark.storage.StorageLevel /** * Driver program for running graph algorithms. */ object Analytics extends Logging { def main(args: Array[String]): Unit = { if (args.length < 2) { val usage = """Usage: Analytics <taskType> <file> --numEPart=<num_edge_partitions> |[other options] Supported 'taskType' as follows: |pagerank Compute PageRank |cc Compute the connected components of vertices |triangles Count the number of triangles""".stripMargin System.err.println(usage) System.exit(1) } val taskType = args(0) val fname = args(1) val optionsList = args.drop(2).map { arg => arg.dropWhile(_ == '-').split('=') match { case Array(opt, v) => (opt -> v) case _ => throw new IllegalArgumentException(s"Invalid argument: $arg") } } val options = mutable.Map(optionsList: _*) val conf = new SparkConf() GraphXUtils.registerKryoClasses(conf) val numEPart = options.remove("numEPart").map(_.toInt).getOrElse { println("Set the number of edge partitions using --numEPart.") sys.exit(1) } val partitionStrategy: Option[PartitionStrategy] = options.remove("partStrategy") .map(PartitionStrategy.fromString(_)) val edgeStorageLevel = options.remove("edgeStorageLevel") .map(StorageLevel.fromString(_)).getOrElse(StorageLevel.MEMORY_ONLY) val vertexStorageLevel = options.remove("vertexStorageLevel") .map(StorageLevel.fromString(_)).getOrElse(StorageLevel.MEMORY_ONLY) taskType match { case "pagerank" => val tol = options.remove("tol").map(_.toFloat).getOrElse(0.001F) val outFname = options.remove("output").getOrElse("") val numIterOpt = options.remove("numIter").map(_.toInt) options.foreach { case (opt, _) => throw new IllegalArgumentException(s"Invalid option: $opt") } println("======================================") println("| PageRank |") println("======================================") val sc = new SparkContext(conf.setAppName(s"PageRank($fname)")) val unpartitionedGraph = GraphLoader.edgeListFile(sc, fname, numEdgePartitions = numEPart, edgeStorageLevel = edgeStorageLevel, vertexStorageLevel = vertexStorageLevel).cache() val graph = partitionStrategy.foldLeft(unpartitionedGraph)(_.partitionBy(_)) println(s"GRAPHX: Number of vertices ${graph.vertices.count}") println(s"GRAPHX: Number of edges ${graph.edges.count}") val pr = (numIterOpt match { case Some(numIter) => PageRank.run(graph, numIter) case None => PageRank.runUntilConvergence(graph, tol) }).vertices.cache() println(s"GRAPHX: Total rank: ${pr.map(_._2).reduce(_ + _)}") if (!outFname.isEmpty) { logWarning(s"Saving pageranks of pages to $outFname") pr.map { case (id, r) => id + "\t" + r }.saveAsTextFile(outFname) } sc.stop() case "cc" => options.foreach { case (opt, _) => throw new IllegalArgumentException(s"Invalid option: $opt") } println("======================================") println("| Connected Components |") println("======================================") val sc = new SparkContext(conf.setAppName(s"ConnectedComponents($fname)")) val unpartitionedGraph = GraphLoader.edgeListFile(sc, fname, numEdgePartitions = numEPart, edgeStorageLevel = edgeStorageLevel, vertexStorageLevel = vertexStorageLevel).cache() val graph = partitionStrategy.foldLeft(unpartitionedGraph)(_.partitionBy(_)) val cc = ConnectedComponents.run(graph) println(s"Components: ${cc.vertices.map { case (vid, data) => data }.distinct()}") sc.stop() case "triangles" => options.foreach { case (opt, _) => throw new IllegalArgumentException(s"Invalid option: $opt") } println("======================================") println("| Triangle Count |") println("======================================") val sc = new SparkContext(conf.setAppName(s"TriangleCount($fname)")) val graph = GraphLoader.edgeListFile(sc, fname, canonicalOrientation = true, numEdgePartitions = numEPart, edgeStorageLevel = edgeStorageLevel, vertexStorageLevel = vertexStorageLevel) // TriangleCount requires the graph to be partitioned .partitionBy(partitionStrategy.getOrElse(RandomVertexCut)).cache() val triangles = TriangleCount.run(graph) println("Triangles: " + triangles.vertices.map { case (vid, data) => data.toLong }.reduce(_ + _) / 3) sc.stop() case _ => println("Invalid task type.") } } } // scalastyle:on println
saltstar/spark
examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
Scala
apache-2.0
6,090
/* __ *\\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2002-2010, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | | ** ** |/ ** \\* */ package scala.xml package include.sax import scala.xml.include._ import collection.mutable.Stack import org.xml.sax.{ ContentHandler, XMLReader, Locator, Attributes } import org.xml.sax.ext.LexicalHandler import java.io.{ File, OutputStream, OutputStreamWriter, Writer, IOException } /** XIncluder is a SAX <code>ContentHandler</code> * that writes its XML document onto an output stream after resolving * all <code>xinclude:include</code> elements. * * <p> * based on Eliotte Rusty Harold's SAXXIncluder * </p> */ class XIncluder(outs: OutputStream, encoding: String) extends ContentHandler with LexicalHandler { var out = new OutputStreamWriter(outs, encoding) def setDocumentLocator(locator: Locator) {} def startDocument() { try { out.write("<?xml version='1.0' encoding='" + encoding + "'?>\\r\\n"); } catch { case e:IOException => throw new SAXException("Write failed", e) } } def endDocument() { try { out.flush() } catch { case e:IOException => throw new SAXException("Flush failed", e) } } def startPrefixMapping(prefix: String , uri: String) {} def endPrefixMapping(prefix: String) {} def startElement(namespaceURI: String, localName: String, qualifiedName: String, atts: Attributes) = { try { out.write("<" + qualifiedName); var i = 0; while (i < atts.getLength()) { out.write(" "); out.write(atts.getQName(i)); out.write("='"); val value = atts.getValue(i); // @todo Need to use character references if the encoding // can't support the character out.write(xml.Utility.escape(value)) out.write("'"); i += 1 } out.write(">") } catch { case e:IOException => throw new SAXException("Write failed", e) } } def endElement(namespaceURI: String, localName:String, qualifiedName: String) { try { out.write("</" + qualifiedName + ">") } catch { case e: IOException => throw new SAXException("Write failed", e) } } // need to escape characters that are not in the given // encoding using character references???? def characters(ch: Array[Char], start: Int, length: Int) { try { var i = 0; while (i < length) { val c = ch(start+i); if (c == '&') out.write("&amp;"); else if (c == '<') out.write("&lt;"); // This next fix is normally not necessary. // However, it is required if text contains ]]> // (The end CDATA section delimiter) else if (c == '>') out.write("&gt;"); else out.write(c); i = i+1; } } catch { case e: IOException => throw new SAXException("Write failed", e); } } def ignorableWhitespace(ch: Array[Char], start: Int , length: Int) { this.characters(ch, start, length) } // do I need to escape text in PI???? def processingInstruction(target: String, data: String) { try { out.write("<?" + target + " " + data + "?>") } catch { case e:IOException => throw new SAXException("Write failed", e) } } def skippedEntity(name: String) { try { out.write("&" + name + ";") } catch { case e:IOException => throw new SAXException("Write failed", e) } } // LexicalHandler methods private var inDTD: Boolean = false private val entities = new Stack[String]() def startDTD(name: String, publicID: String, systemID: String) { inDTD = true // if this is the source document, output a DOCTYPE declaration if (entities.isEmpty) { var id = "" if (publicID != null) id = " PUBLIC \\"" + publicID + "\\" \\"" + systemID + '"'; else if (systemID != null) id = " SYSTEM \\"" + systemID + '"'; try { out.write("<!DOCTYPE " + name + id + ">\\r\\n") } catch { case e:IOException => throw new SAXException("Error while writing DOCTYPE", e) } } } def endDTD() {} def startEntity(name: String) { entities push name } def endEntity(name: String) { entities.pop() } def startCDATA() {} def endCDATA() {} // Just need this reference so we can ask if a comment is // inside an include element or not private var filter: XIncludeFilter = null def setFilter(filter: XIncludeFilter) { this.filter = filter } def comment(ch: Array[Char], start: Int, length: Int) { if (!inDTD && !filter.insideIncludeElement()) { try { out.write("<!--") out.write(ch, start, length) out.write("-->") } catch { case e: IOException => throw new SAXException("Write failed", e) } } } }
cran/rkafkajars
java/scala/xml/include/sax/XIncluder.scala
Scala
apache-2.0
5,356
package com.sksamuel.scrimage import org.scalatest.{Matchers, WordSpec} class Image47Test extends WordSpec with Matchers { "loading iphone image" should { "detect rotation flag" in { val src = Image.fromResource("/issue47.JPG") src.height shouldBe 3264 src.width shouldBe 2448 } } }
carlosFattor/scrimage
scrimage-core/src/test/scala/com/sksamuel/scrimage/Image47Test.scala
Scala
apache-2.0
316
package cromwell.filesystems.gcs import akka.actor.ActorSystem import com.google.api.gax.retrying.RetrySettings import com.google.auth.Credentials import com.google.cloud.storage.contrib.nio.CloudStorageConfiguration import cromwell.cloudsupport.gcp.auth.GoogleAuthMode import cromwell.cloudsupport.gcp.gcs.GcsStorage import cromwell.core.WorkflowOptions import cromwell.core.path.PathBuilderFactory import scala.concurrent.ExecutionContext final case class GcsPathBuilderFactory(authMode: GoogleAuthMode, applicationName: String, retrySettings: Option[RetrySettings] = None, cloudStorageConfiguration: CloudStorageConfiguration = GcsStorage.DefaultCloudStorageConfiguration) extends PathBuilderFactory { def withOptions(options: WorkflowOptions)(implicit as: ActorSystem, ec: ExecutionContext) = { GcsPathBuilder.fromAuthMode(authMode, applicationName, retrySettings, cloudStorageConfiguration, options) } /** * Ignores the authMode and creates a GcsPathBuilder using the passed credentials directly. * Can be used when the Credentials are already available. */ def fromCredentials(options: WorkflowOptions, credentials: Credentials) = { GcsPathBuilder.fromCredentials(credentials, applicationName, retrySettings, cloudStorageConfiguration, options) } }
ohsu-comp-bio/cromwell
filesystems/gcs/src/main/scala/cromwell/filesystems/gcs/GcsPathBuilderFactory.scala
Scala
bsd-3-clause
1,409
package controllers import play.api.mvc._ import dao.{QueryBuilder => Q, DAO} import models.{Row, Table,Error => Err} import play.api.libs.json.{JsValue, Json} import scala.concurrent.Future import play.api.libs.json._ import play.api.Logger import scala.concurrent.ExecutionContext.Implicits.global object Application extends Controller { def index = Action { Ok("") } def getTables = Action.async { import models.Table.tableFormat DAO.getTables map { case a:List[Table] => Ok(Json.toJson(a)) case _ => NoContent } } def getTableContent(name:String) = Action.async{ implicit request => parser(name) { model => { execute(Q.build(model)) } } } def filterByColumn(name:String, column:String, filter:String) = Action.async{ implicit request => parser(name,Some(column),Some(filter)) { model => { execute(Q.build(model)) } } } def query = Action.async(parse.json) { request => (request.body \\ "query").asOpt[String].map { q => execute(q) }.getOrElse { Future.successful( BadRequest("Missing parameter [query]") ) } } def parser[A, U](name:String,column:Option[String]=None, filter:Option[String]=None)(callback: models.Request => Future[SimpleResult])(implicit request: play.api.mvc.Request[A]): Future[SimpleResult] = { val function: Option[String] = request.getQueryString("f") val limit: Option[String] = request.getQueryString("limit") val offset: Option[String] = request.getQueryString("offset") val by: Option[String] = request.getQueryString("by") val order: Option[String] = request.getQueryString("order") val language: Option[String] = request.getQueryString("language") val field: Option[String] = request.getQueryString("field") val req = models.Request(name,func=function,limit=limit,offset=offset,by=by,order=order,column=column,filter= filter,language=language, field=field) Q.validate(req) match{ case true => callback(req) case false => Future.successful(BadRequest("This function does not exist")) } } def execute(query:String) = { DAO.execute(query) map { case a:List[Row] => { val newList = a.flatMap(b => List(b.toJson())) if( newList.size == 0){ NoContent }else{ Ok(Json.toJson(newList)) } } case e:Err => BadRequest(Json.toJson(e)) case _ => BadRequest("Bad request wrong table name or connection error") } } }
enginyoyen/postgresql-rest-api
app/controllers/Application.scala
Scala
mit
2,529
package ee.cone.c4actor import net.jpountz.lz4.{LZ4BlockInputStream, LZ4BlockOutputStream} import okio.{Buffer, ByteString} import scala.annotation.tailrec case object LZ4Compressor extends DeCompressor with Compressor with RawCompressor { @tailrec private def readAgain(in: LZ4BlockInputStream, sink: Buffer): Unit = { val size = in.available() val byteArray = new Array[Byte](size) if (in.read(byteArray) >= 0) { sink.write(byteArray) readAgain(in, sink) } } def deCompress(data: ByteString): ByteString = FinallyClose(new Buffer) { buffer ⇒ FinallyClose(new LZ4BlockInputStream(new Buffer().write(data).inputStream())) { lz41 ⇒ readAgain(lz41, buffer) } buffer.readByteString() } def compress(data: ByteString): ByteString = FinallyClose(new Buffer) { buffer ⇒ FinallyClose(new LZ4BlockOutputStream(buffer.outputStream(), 32000000)) { lz41 ⇒ lz41.write(data.toByteArray) } buffer.readByteString() } def compress(data: Array[Byte]): Array[Byte] = FinallyClose(new Buffer) { buffer ⇒ FinallyClose(new LZ4BlockOutputStream(buffer.outputStream(), 32000000)) { lz41 ⇒ lz41.write(data) } buffer.readByteArray() } def name: String = "lz4" }
wregs/c4proto
c4actor-kafka/src/main/scala/ee/cone/c4actor/LZ4Compressor.scala
Scala
apache-2.0
1,301
package ammonite.sshd.util import acyclic.file import java.io.{InputStream, OutputStream, PrintStream} /** * Container for staging environment important for Ammonite repl to run correctly. * @param thread a thread where execution takes place. Important for restoring contextClassLoader * @param contextClassLoader thread's context class loader. Ammonite repl uses that to load classes * @param systemIn * @param systemOut * @param systemErr */ case class Environment(thread: Thread, contextClassLoader: ClassLoader, systemIn: InputStream, systemOut: PrintStream, systemErr: PrintStream ) object Environment { def apply(classLoader: ClassLoader, in: InputStream, out: PrintStream): Environment = apply(Thread.currentThread(), classLoader, in, out, out) def apply(classLoader: ClassLoader, in: InputStream, out: OutputStream): Environment = apply(classLoader, in, new PrintStream(out)) /** * Collects information about current environment */ def collect() = Environment( Thread.currentThread(), Thread.currentThread().getContextClassLoader, System.in, System.out, System.err ) /** * Runs your code with supplied environment installed. * After execution of supplied code block will restore original environment */ def withEnvironment(env:Environment)(code: ⇒ Any):Any = { val oldEnv = collect() try { install(env) code } finally { install(oldEnv) } } /** * Resets execution environment from parameters saved to Environment container passed in * @param env environment to reset to */ def install(env: Environment): Unit = { env.thread.setContextClassLoader(env.contextClassLoader) System.setIn(env.systemIn) System.setOut(env.systemOut) System.setErr(env.systemErr) } }
coderabhishek/Ammonite
sshd/src/main/scala/ammonite/sshd/util/Environment.scala
Scala
mit
1,899
package tifmo import dcstree.Executor import dcstree.Relation import inference.Dimension import inference.IEngineCore import inference.TermIndex import inference.Term import inference.IEFunction import inference.RuleDo import inference.RuleArg import inference.IEPredRL import inference.IEPredSubsume import inference.Debug_SimpleRuleTrace import inference.RAConversion._ package document { /** * Partial order relation. */ case class RelPartialOrder(name: String) extends Relation { def execute[T](ex: Executor, a: T, b: T) { (ex, a, b) match { case (ie:IEngineCore, xa:TermIndex, xb:TermIndex) => { assert(xa.dim == xb.dim) ie.claimFunc(FuncPartialOrder, Seq(xa), (name, xa.dim), Debug_SimpleRuleTrace("RelPartialOrder", ie.getNewPredID())) ie.claimFunc(FuncPartialOrder, Seq(xb), (name, xb.dim), Debug_SimpleRuleTrace("RelPartialOrder", ie.getNewPredID())) } case _ => {} } } } private[document] object FuncPartialOrder extends IEFunction { def headDim(tms: Seq[Term], param: Any) = param.asInstanceOf[(String, Dimension)]._2 def applyFunc(ie: IEngineCore, tms: Seq[TermIndex], param: Any) { val name = param.asInstanceOf[(String, Dimension)]._1 ie.foreachARLX(tms.head, Seq(name), rFuncPO3) ie.foreachXRLB(tms.head, Seq(name), rFuncPO2) ie.foreachSubset(tms.head, Seq(name), rFuncPO1) } } private[document] object rFuncPO3 extends RuleDo[IEPredRL] { def apply(ie: IEngineCore, pred: IEPredRL, args: Seq[RuleArg]) { args match { case Seq(RuleArg(name:String)) => pred.rl match { case RelPartialOrder(nm) => if (nm == name) { var task = Nil:List[() => Unit] for ((x, rl @ RelPartialOrder(xnm)) <- pred.a.allXRLB; if xnm == name) { task = (() => ie.claimRL(x, rl, pred.b, Debug_SimpleRuleTrace("Partial Order transitivity", ie.getNewPredID()))) :: task } task.foreach(_()) } case _ => {} } case _ => throw new Exception("rFuncPO3 error!") } } } private[document] object rFuncPO2 extends RuleDo[IEPredRL] { def apply(ie: IEngineCore, pred: IEPredRL, args: Seq[RuleArg]) { args match { case Seq(RuleArg(name:String)) => pred.rl match { case RelPartialOrder(nm) => if (nm == name) { var task = Nil:List[() => Unit] for ((rl @ RelPartialOrder(xnm), x) <- pred.b.allARLX; if xnm == name) { task = (() => ie.claimRL(pred.a, rl, x, Debug_SimpleRuleTrace("Partial Order transitivity", ie.getNewPredID()))) :: task } task.foreach(_()) } case _ => {} } case _ => throw new Exception("rFuncPO2 error!") } } } private[document] object rFuncPO1 extends RuleDo[IEPredSubsume] { def apply(ie: IEngineCore, pred: IEPredSubsume, args: Seq[RuleArg]) { args match { case Seq(RuleArg(name:String)) => { var task = Nil:List[() => Unit] for ((rl @ RelPartialOrder(xnm), x) <- pred.superset.allARLX; if xnm == name) { task = (() => ie.claimRL(pred.subset, rl, x, Debug_SimpleRuleTrace("Partial Order sub", ie.getNewPredID()))) :: task } for ((x, rl @ RelPartialOrder(xnm)) <- pred.superset.allXRLB; if xnm == name) { task = (() => ie.claimRL(x, rl, pred.subset, Debug_SimpleRuleTrace("Partial Order sub", ie.getNewPredID()))) :: task } task.foreach(_()) } case _ => throw new Exception("rFuncPO1 error!") } } } }
tianran/tifmo
src/tifmo/document/RelPartialOrder.scala
Scala
bsd-2-clause
3,387
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala.tools.partest.async import scala.annotation.compileTimeOnly import scala.collection.immutable.HashMap import scala.collection.mutable import scala.language.experimental.macros import scala.reflect.macros.blackbox import scala.tools.testkit.async.AsyncStateMachine object OutputAwait { def writing[T](body: T): Output[T] = macro impl @compileTimeOnly("[async] `value` must be enclosed in `writing`") def value[T](output: Output[T]): T = ??? def impl(c: blackbox.Context)(body: c.Tree): c.Tree = { import c.universe._ val awaitSym = typeOf[OutputAwait.type].decl(TermName("value")) def mark(t: DefDef): Tree = c.internal.markForAsyncTransform(c.internal.enclosingOwner, t, awaitSym, Map.empty) val name = TypeName("stateMachine$async") q""" final class $name extends _root_.scala.tools.partest.async.OutputStateMachine { ${mark(q"""override def apply(tr$$async: _root_.scala.Option[_root_.scala.AnyRef]) = ${body}""")} } new $name().start().asInstanceOf[${c.macroApplication.tpe}] """ } } case class Output[T](value: Option[T], written: HashMap[String, Vector[Any]]) object Output { def apply[T](value: T, written: (String, Any)*): Output[T] = { new Output(Some(value), toMultiMap[String, Any](written)) } def mergeMultiMap[K, V](m1: HashMap[K, Vector[V]], m2: HashMap[K, Vector[V]]): HashMap[K, Vector[V]] = { m1.merged(m2) { case ((k1, v1), (k2, v2)) => (k1, v1 ++ v2) } } private def toMultiMap[K, V](written: Seq[(K, V)]): HashMap[K, Vector[V]] = { val mutableMap = collection.mutable.HashMap[K, mutable.Builder[V, Vector[V]]]() for ((k, v) <- written) mutableMap.getOrElseUpdate(k, Vector.newBuilder[V]) += v val immutableMapBuilder = collection.immutable.HashMap.newBuilder[K, Vector[V]] immutableMapBuilder ++= mutableMap.view.mapValues(_.result()) immutableMapBuilder.result() } } abstract class OutputStateMachine extends AsyncStateMachine[Output[AnyRef], Option[AnyRef]] { private var written = collection.immutable.HashMap[String, Vector[Any]]() var result$async: Output[AnyRef] = _ // FSM translated method def apply(tr$async: Option[AnyRef]): Unit // Required methods private[this] var state$async: Int = 0 protected def state: Int = state$async protected def state_=(s: Int): Unit = state$async = s protected def completeFailure(t: Throwable): Unit = throw t protected def completeSuccess(value: AnyRef): Unit = result$async = Output(Some(value), written) protected def onComplete(f: Output[AnyRef]): Unit = ??? protected def getCompleted(f: Output[AnyRef]): Option[AnyRef] = { written = Output.mergeMultiMap(written, f.written) f.value } protected def tryGet(tr: Option[AnyRef]): AnyRef = tr match { case Some(value) => value.asInstanceOf[AnyRef] case None => result$async = Output(None, written) this // sentinel value to indicate the dispatch loop should exit. } def start(): Output[AnyRef] = { apply(None) result$async } }
lrytz/scala
src/partest/scala/tools/partest/async/OutputAwait.scala
Scala
apache-2.0
3,338
package views.html package appeal import controllers.routes import play.api.data.Form import lila.api.Context import lila.app.templating.Environment._ import lila.app.ui.ScalatagsTemplate._ import lila.appeal.Appeal import lila.common.String.html.richText import lila.mod.IpRender.RenderIp import lila.mod.{ ModPreset, ModPresets } import lila.report.Report.Inquiry import lila.report.Suspect import lila.user.{ Holder, User } object discussion { case class ModData( mod: Holder, suspect: Suspect, presets: ModPresets, logins: lila.security.UserLogins.TableData, appeals: List[lila.appeal.Appeal], renderIp: RenderIp, inquiry: Option[Inquiry] ) def apply(appeal: Appeal, textForm: Form[String])(implicit ctx: Context) = bits.layout("Appeal") { main(cls := "page-small box box-pad appeal")( renderAppeal(appeal, textForm, modData = none) ) } def show( appeal: Appeal, textForm: Form[String], modData: ModData )(implicit ctx: Context) = bits.layout(s"Appeal by ${modData.suspect.user.username}") { main(cls := "box box-pad appeal")( renderAppeal(appeal, textForm, modData.some), div(cls := "appeal__actions", id := "appeal-actions")( modData.inquiry match { case None => postForm(action := routes.Mod.spontaneousInquiry(appeal.id))( submitButton(cls := "button")("Handle this appeal") ) case Some(Inquiry(mod, _)) if ctx.userId has mod => postForm(action := routes.Appeal.mute(modData.suspect.user.username))( if (appeal.isMuted) submitButton("Un-mute")( title := "Be notified about user replies again", cls := "button button-green button-thin" ) else submitButton("Mute")( title := "Don't be notified about user replies", cls := "button button-red button-thin" ) ) case Some(Inquiry(mod, _)) => frag(userIdLink(mod.some), nbsp, "is handling this.") }, postForm( action := routes.Appeal.sendToZulip(modData.suspect.user.id), cls := "appeal__actions__slack" )( submitButton(cls := "button button-thin")("Send to Zulip") ) ) ) } private def renderAppeal( appeal: Appeal, textForm: Form[String], modData: Option[ModData] )(implicit ctx: Context) = frag( h1( div(cls := "title")( "Appeal", modData.isDefined option frag(" by ", userIdLink(appeal.id.some)) ), modData.isDefined option div(cls := "actions")( a( cls := "button button-empty mod-zone-toggle", href := routes.User.mod(appeal.id), titleOrText("Mod zone (Hotkey: m)"), dataIcon := "" ) ) ), modData map { m => frag( div(cls := "mod-zone mod-zone-full none"), views.html.user.mod.otherUsers(m.mod, m.suspect.user, m.logins, m.appeals)(ctx, m.renderIp)( cls := "mod-zone communication__logins" ) ) }, standardFlash(), div(cls := "body")( appeal.msgs.map { msg => div(cls := s"appeal__msg appeal__msg--${if (appeal isByMod msg) "mod" else "suspect"}")( div(cls := "appeal__msg__header")( renderUser(appeal, msg.by, modData.isDefined), momentFromNowOnce(msg.at) ), div(cls := "appeal__msg__text")(richText(msg.text)) ) }, if (modData.isEmpty && !appeal.canAddMsg) p("Please wait for a moderator to reply.") else modData.fold(true)(_.inquiry.isDefined) option renderForm( textForm, action = if (modData.isDefined) routes.Appeal.reply(appeal.id).url else routes.Appeal.post.url, isNew = false, presets = modData.map(_.presets) ) ) ) private def renderUser(appeal: Appeal, userId: User.ID, asMod: Boolean)(implicit ctx: Context) = if (appeal isAbout userId) userIdLink(userId.some, params = asMod ?? "?mod") else span( userIdLink(User.lichessId.some), isGranted(_.Appeals) option frag( " (", userIdLink(userId.some), ")" ) ) def renderForm(form: Form[String], action: String, isNew: Boolean, presets: Option[ModPresets])(implicit ctx: Context ) = postForm(st.action := action)( form3.globalError(form), form3.group( form("text"), if (isNew) "Create an appeal" else "Add something to the appeal", help = !isGranted(_.Appeals) option frag("Please be concise. Maximum 1000 chars.") )( form3.textarea(_)(rows := 6) ), presets.map { ps => form3.actions( div( select(cls := "appeal-presets")( option(st.value := "")("Presets"), ps.value.map { case ModPreset(name, text, _) => option( st.value := text, st.title := text )(name) } ), isGranted(_.Presets) option a(href := routes.Mod.presets("appeal"))("Edit presets") ), form3.submit(trans.send()) ) } getOrElse form3.submit(trans.send()) ) }
luanlv/lila
app/views/appeal/discussion.scala
Scala
mit
5,580
/* * Copyright (c) 2018. Fengguo Wei and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License v2.0 * which accompanies this distribution, and is available at * https://www.apache.org/licenses/LICENSE-2.0 * * Detailed contributors are listed in the CONTRIBUTOR.md */ package org.argus.jawa.core.compiler.util /** * @author <a href="mailto:[email protected]">Fengguo Wei</a> */ object ReadClassFile { class CustomClassLoader extends ClassLoader { def loadClass(name: String, bytecodes: Array[Byte]): Class[_ <: Any] = { defineClass(name, bytecodes, 0, bytecodes.length) } } }
arguslab/Argus-SAF
jawa/src/main/scala/org/argus/jawa/core/compiler/util/ReadClassFile.scala
Scala
apache-2.0
687
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.json import java.io.{File, FileOutputStream, StringWriter} import java.nio.charset.{StandardCharsets, UnsupportedCharsetException} import java.nio.file.{Files, Paths, StandardOpenOption} import java.sql.{Date, Timestamp} import java.util.Locale import com.fasterxml.jackson.core.JsonFactory import org.apache.hadoop.fs.{Path, PathFilter} import org.apache.hadoop.io.SequenceFile.CompressionType import org.apache.hadoop.io.compress.GzipCodec import org.apache.spark.{SparkException, TestUtils} import org.apache.spark.rdd.RDD import org.apache.spark.sql.{functions => F, _} import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions} import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.execution.ExternalRDD import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.datasources.json.JsonInferSchema.compatibleType import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types._ import org.apache.spark.util.Utils class TestFileFilter extends PathFilter { override def accept(path: Path): Boolean = path.getParent.getName != "p=2" } class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData { import testImplicits._ def testFile(fileName: String): String = { Thread.currentThread().getContextClassLoader.getResource(fileName).toString } test("Type promotion") { def checkTypePromotion(expected: Any, actual: Any) { assert(expected.getClass == actual.getClass, s"Failed to promote ${actual.getClass} to ${expected.getClass}.") assert(expected == actual, s"Promoted value ${actual}(${actual.getClass}) does not equal the expected value " + s"${expected}(${expected.getClass}).") } val factory = new JsonFactory() def enforceCorrectType(value: Any, dataType: DataType): Any = { val writer = new StringWriter() Utils.tryWithResource(factory.createGenerator(writer)) { generator => generator.writeObject(value) generator.flush() } val dummyOption = new JSONOptions(Map.empty[String, String], "GMT") val dummySchema = StructType(Seq.empty) val parser = new JacksonParser(dummySchema, dummyOption) Utils.tryWithResource(factory.createParser(writer.toString)) { jsonParser => jsonParser.nextToken() val converter = parser.makeConverter(dataType) converter.apply(jsonParser) } } val intNumber: Int = 2147483647 checkTypePromotion(intNumber, enforceCorrectType(intNumber, IntegerType)) checkTypePromotion(intNumber.toLong, enforceCorrectType(intNumber, LongType)) checkTypePromotion(intNumber.toDouble, enforceCorrectType(intNumber, DoubleType)) checkTypePromotion( Decimal(intNumber), enforceCorrectType(intNumber, DecimalType.SYSTEM_DEFAULT)) val longNumber: Long = 9223372036854775807L checkTypePromotion(longNumber, enforceCorrectType(longNumber, LongType)) checkTypePromotion(longNumber.toDouble, enforceCorrectType(longNumber, DoubleType)) checkTypePromotion( Decimal(longNumber), enforceCorrectType(longNumber, DecimalType.SYSTEM_DEFAULT)) val doubleNumber: Double = 1.7976931348623157E308d checkTypePromotion(doubleNumber.toDouble, enforceCorrectType(doubleNumber, DoubleType)) checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber * 1000L)), enforceCorrectType(intNumber, TimestampType)) checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber.toLong * 1000L)), enforceCorrectType(intNumber.toLong, TimestampType)) val strTime = "2014-09-30 12:34:56" checkTypePromotion(DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf(strTime)), enforceCorrectType(strTime, TimestampType)) val strDate = "2014-10-15" checkTypePromotion( DateTimeUtils.fromJavaDate(Date.valueOf(strDate)), enforceCorrectType(strDate, DateType)) val ISO8601Time1 = "1970-01-01T01:00:01.0Z" val ISO8601Time2 = "1970-01-01T02:00:01-01:00" checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(3601000)), enforceCorrectType(ISO8601Time1, TimestampType)) checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(10801000)), enforceCorrectType(ISO8601Time2, TimestampType)) val ISO8601Date = "1970-01-01" checkTypePromotion(DateTimeUtils.millisToDays(32400000), enforceCorrectType(ISO8601Date, DateType)) } test("Get compatible type") { def checkDataType(t1: DataType, t2: DataType, expected: DataType) { var actual = compatibleType(t1, t2) assert(actual == expected, s"Expected $expected as the most general data type for $t1 and $t2, found $actual") actual = compatibleType(t2, t1) assert(actual == expected, s"Expected $expected as the most general data type for $t1 and $t2, found $actual") } // NullType checkDataType(NullType, BooleanType, BooleanType) checkDataType(NullType, IntegerType, IntegerType) checkDataType(NullType, LongType, LongType) checkDataType(NullType, DoubleType, DoubleType) checkDataType(NullType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT) checkDataType(NullType, StringType, StringType) checkDataType(NullType, ArrayType(IntegerType), ArrayType(IntegerType)) checkDataType(NullType, StructType(Nil), StructType(Nil)) checkDataType(NullType, NullType, NullType) // BooleanType checkDataType(BooleanType, BooleanType, BooleanType) checkDataType(BooleanType, IntegerType, StringType) checkDataType(BooleanType, LongType, StringType) checkDataType(BooleanType, DoubleType, StringType) checkDataType(BooleanType, DecimalType.SYSTEM_DEFAULT, StringType) checkDataType(BooleanType, StringType, StringType) checkDataType(BooleanType, ArrayType(IntegerType), StringType) checkDataType(BooleanType, StructType(Nil), StringType) // IntegerType checkDataType(IntegerType, IntegerType, IntegerType) checkDataType(IntegerType, LongType, LongType) checkDataType(IntegerType, DoubleType, DoubleType) checkDataType(IntegerType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT) checkDataType(IntegerType, StringType, StringType) checkDataType(IntegerType, ArrayType(IntegerType), StringType) checkDataType(IntegerType, StructType(Nil), StringType) // LongType checkDataType(LongType, LongType, LongType) checkDataType(LongType, DoubleType, DoubleType) checkDataType(LongType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT) checkDataType(LongType, StringType, StringType) checkDataType(LongType, ArrayType(IntegerType), StringType) checkDataType(LongType, StructType(Nil), StringType) // DoubleType checkDataType(DoubleType, DoubleType, DoubleType) checkDataType(DoubleType, DecimalType.SYSTEM_DEFAULT, DoubleType) checkDataType(DoubleType, StringType, StringType) checkDataType(DoubleType, ArrayType(IntegerType), StringType) checkDataType(DoubleType, StructType(Nil), StringType) // DecimalType checkDataType(DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT) checkDataType(DecimalType.SYSTEM_DEFAULT, StringType, StringType) checkDataType(DecimalType.SYSTEM_DEFAULT, ArrayType(IntegerType), StringType) checkDataType(DecimalType.SYSTEM_DEFAULT, StructType(Nil), StringType) // StringType checkDataType(StringType, StringType, StringType) checkDataType(StringType, ArrayType(IntegerType), StringType) checkDataType(StringType, StructType(Nil), StringType) // ArrayType checkDataType(ArrayType(IntegerType), ArrayType(IntegerType), ArrayType(IntegerType)) checkDataType(ArrayType(IntegerType), ArrayType(LongType), ArrayType(LongType)) checkDataType(ArrayType(IntegerType), ArrayType(StringType), ArrayType(StringType)) checkDataType(ArrayType(IntegerType), StructType(Nil), StringType) checkDataType( ArrayType(IntegerType, true), ArrayType(IntegerType), ArrayType(IntegerType, true)) checkDataType( ArrayType(IntegerType, true), ArrayType(IntegerType, false), ArrayType(IntegerType, true)) checkDataType( ArrayType(IntegerType, true), ArrayType(IntegerType, true), ArrayType(IntegerType, true)) checkDataType( ArrayType(IntegerType, false), ArrayType(IntegerType), ArrayType(IntegerType, true)) checkDataType( ArrayType(IntegerType, false), ArrayType(IntegerType, false), ArrayType(IntegerType, false)) checkDataType( ArrayType(IntegerType, false), ArrayType(IntegerType, true), ArrayType(IntegerType, true)) // StructType checkDataType(StructType(Nil), StructType(Nil), StructType(Nil)) checkDataType( StructType(StructField("f1", IntegerType, true) :: Nil), StructType(StructField("f1", IntegerType, true) :: Nil), StructType(StructField("f1", IntegerType, true) :: Nil)) checkDataType( StructType(StructField("f1", IntegerType, true) :: Nil), StructType(Nil), StructType(StructField("f1", IntegerType, true) :: Nil)) checkDataType( StructType( StructField("f1", IntegerType, true) :: StructField("f2", IntegerType, true) :: Nil), StructType(StructField("f1", LongType, true) :: Nil), StructType( StructField("f1", LongType, true) :: StructField("f2", IntegerType, true) :: Nil)) checkDataType( StructType( StructField("f1", IntegerType, true) :: Nil), StructType( StructField("f2", IntegerType, true) :: Nil), StructType( StructField("f1", IntegerType, true) :: StructField("f2", IntegerType, true) :: Nil)) checkDataType( StructType( StructField("f1", IntegerType, true) :: Nil), DecimalType.SYSTEM_DEFAULT, StringType) } test("Complex field and type inferring with null in sampling") { val jsonDF = spark.read.json(jsonNullStruct) val expectedSchema = StructType( StructField("headers", StructType( StructField("Charset", StringType, true) :: StructField("Host", StringType, true) :: Nil) , true) :: StructField("ip", StringType, true) :: StructField("nullstr", StringType, true):: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select nullstr, headers.Host from jsonTable"), Seq(Row("", "1.abc.com"), Row("", null), Row("", null), Row(null, null)) ) } test("Primitive field and type inferring") { val jsonDF = spark.read.json(primitiveFieldAndType) val expectedSchema = StructType( StructField("bigInteger", DecimalType(20, 0), true) :: StructField("boolean", BooleanType, true) :: StructField("double", DoubleType, true) :: StructField("integer", LongType, true) :: StructField("long", LongType, true) :: StructField("null", StringType, true) :: StructField("string", StringType, true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select * from jsonTable"), Row(new java.math.BigDecimal("92233720368547758070"), true, 1.7976931348623157E308, 10, 21474836470L, null, "this is a simple string.") ) } test("Complex field and type inferring") { val jsonDF = spark.read.json(complexFieldAndType1) val expectedSchema = StructType( StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) :: StructField("arrayOfArray2", ArrayType(ArrayType(DoubleType, true), true), true) :: StructField("arrayOfBigInteger", ArrayType(DecimalType(21, 0), true), true) :: StructField("arrayOfBoolean", ArrayType(BooleanType, true), true) :: StructField("arrayOfDouble", ArrayType(DoubleType, true), true) :: StructField("arrayOfInteger", ArrayType(LongType, true), true) :: StructField("arrayOfLong", ArrayType(LongType, true), true) :: StructField("arrayOfNull", ArrayType(StringType, true), true) :: StructField("arrayOfString", ArrayType(StringType, true), true) :: StructField("arrayOfStruct", ArrayType( StructType( StructField("field1", BooleanType, true) :: StructField("field2", StringType, true) :: StructField("field3", StringType, true) :: Nil), true), true) :: StructField("struct", StructType( StructField("field1", BooleanType, true) :: StructField("field2", DecimalType(20, 0), true) :: Nil), true) :: StructField("structWithArrayFields", StructType( StructField("field1", ArrayType(LongType, true), true) :: StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") // Access elements of a primitive array. checkAnswer( sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"), Row("str1", "str2", null) ) // Access an array of null values. checkAnswer( sql("select arrayOfNull from jsonTable"), Row(Seq(null, null, null, null)) ) // Access elements of a BigInteger array (we use DecimalType internally). checkAnswer( sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"), Row(new java.math.BigDecimal("922337203685477580700"), new java.math.BigDecimal("-922337203685477580800"), null) ) // Access elements of an array of arrays. checkAnswer( sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"), Row(Seq("1", "2", "3"), Seq("str1", "str2")) ) // Access elements of an array of arrays. checkAnswer( sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"), Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1)) ) // Access elements of an array inside a filed with the type of ArrayType(ArrayType). checkAnswer( sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"), Row("str2", 2.1) ) // Access elements of an array of structs. checkAnswer( sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " + "from jsonTable"), Row( Row(true, "str1", null), Row(false, null, null), Row(null, null, null), null) ) // Access a struct and fields inside of it. checkAnswer( sql("select struct, struct.field1, struct.field2 from jsonTable"), Row( Row(true, new java.math.BigDecimal("92233720368547758070")), true, new java.math.BigDecimal("92233720368547758070")) :: Nil ) // Access an array field of a struct. checkAnswer( sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"), Row(Seq(4, 5, 6), Seq("str1", "str2")) ) // Access elements of an array field of a struct. checkAnswer( sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"), Row(5, null) ) } test("GetField operation on complex data type") { val jsonDF = spark.read.json(complexFieldAndType1) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"), Row(true, "str1") ) // Getting all values of a specific field from an array of structs. checkAnswer( sql("select arrayOfStruct.field1, arrayOfStruct.field2 from jsonTable"), Row(Seq(true, false, null), Seq("str1", null, null)) ) } test("Type conflict in primitive field values") { val jsonDF = spark.read.json(primitiveFieldValueTypeConflict) val expectedSchema = StructType( StructField("num_bool", StringType, true) :: StructField("num_num_1", LongType, true) :: StructField("num_num_2", DoubleType, true) :: StructField("num_num_3", DoubleType, true) :: StructField("num_str", StringType, true) :: StructField("str_bool", StringType, true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select * from jsonTable"), Row("true", 11L, null, 1.1, "13.1", "str1") :: Row("12", null, 21474836470.9, null, null, "true") :: Row("false", 21474836470L, 92233720368547758070d, 100, "str1", "false") :: Row(null, 21474836570L, 1.1, 21474836470L, "92233720368547758070", null) :: Nil ) // Number and Boolean conflict: resolve the type as number in this query. checkAnswer( sql("select num_bool - 10 from jsonTable where num_bool > 11"), Row(2) ) // Widening to LongType checkAnswer( sql("select num_num_1 - 100 from jsonTable where num_num_1 > 11"), Row(21474836370L) :: Row(21474836470L) :: Nil ) checkAnswer( sql("select num_num_1 - 100 from jsonTable where num_num_1 > 10"), Row(-89) :: Row(21474836370L) :: Row(21474836470L) :: Nil ) // Widening to DecimalType checkAnswer( sql("select num_num_2 + 1.3 from jsonTable where num_num_2 > 1.1"), Row(21474836472.2) :: Row(92233720368547758071.3) :: Nil ) // Widening to Double checkAnswer( sql("select num_num_3 + 1.2 from jsonTable where num_num_3 > 1.1"), Row(101.2) :: Row(21474836471.2) :: Nil ) // Number and String conflict: resolve the type as number in this query. checkAnswer( sql("select num_str + 1.2 from jsonTable where num_str > 14d"), Row(92233720368547758071.2) ) // Number and String conflict: resolve the type as number in this query. checkAnswer( sql("select num_str + 1.2 from jsonTable where num_str >= 92233720368547758060"), Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue) ) // String and Boolean conflict: resolve the type as string. checkAnswer( sql("select * from jsonTable where str_bool = 'str1'"), Row("true", 11L, null, 1.1, "13.1", "str1") ) } ignore("Type conflict in primitive field values (Ignored)") { val jsonDF = spark.read.json(primitiveFieldValueTypeConflict) jsonDF.createOrReplaceTempView("jsonTable") // Right now, the analyzer does not promote strings in a boolean expression. // Number and Boolean conflict: resolve the type as boolean in this query. checkAnswer( sql("select num_bool from jsonTable where NOT num_bool"), Row(false) ) checkAnswer( sql("select str_bool from jsonTable where NOT str_bool"), Row(false) ) // Right now, the analyzer does not know that num_bool should be treated as a boolean. // Number and Boolean conflict: resolve the type as boolean in this query. checkAnswer( sql("select num_bool from jsonTable where num_bool"), Row(true) ) checkAnswer( sql("select str_bool from jsonTable where str_bool"), Row(false) ) // The plan of the following DSL is // Project [(CAST(num_str#65:4, DoubleType) + 1.2) AS num#78] // Filter (CAST(CAST(num_str#65:4, DoubleType), DecimalType) > 92233720368547758060) // ExistingRdd [num_bool#61,num_num_1#62L,num_num_2#63,num_num_3#64,num_str#65,str_bool#66] // We should directly cast num_str to DecimalType and also need to do the right type promotion // in the Project. checkAnswer( jsonDF. where('num_str >= BigDecimal("92233720368547758060")). select(('num_str + 1.2).as("num")), Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue()) ) // The following test will fail. The type of num_str is StringType. // So, to evaluate num_str + 1.2, we first need to use Cast to convert the type. // In our test data, one value of num_str is 13.1. // The result of (CAST(num_str#65:4, DoubleType) + 1.2) for this value is 14.299999999999999, // which is not 14.3. // Number and String conflict: resolve the type as number in this query. checkAnswer( sql("select num_str + 1.2 from jsonTable where num_str > 13"), Row(BigDecimal("14.3")) :: Row(BigDecimal("92233720368547758071.2")) :: Nil ) } test("Type conflict in complex field values") { val jsonDF = spark.read.json(complexFieldValueTypeConflict) val expectedSchema = StructType( StructField("array", ArrayType(LongType, true), true) :: StructField("num_struct", StringType, true) :: StructField("str_array", StringType, true) :: StructField("struct", StructType( StructField("field", StringType, true) :: Nil), true) :: StructField("struct_array", StringType, true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select * from jsonTable"), Row(Seq(), "11", "[1,2,3]", Row(null), "[]") :: Row(null, """{"field":false}""", null, null, "{}") :: Row(Seq(4, 5, 6), null, "str", Row(null), "[7,8,9]") :: Row(Seq(7), "{}", """["str1","str2",33]""", Row("str"), """{"field":true}""") :: Nil ) } test("Type conflict in array elements") { val jsonDF = spark.read.json(arrayElementTypeConflict) val expectedSchema = StructType( StructField("array1", ArrayType(StringType, true), true) :: StructField("array2", ArrayType(StructType( StructField("field", LongType, true) :: Nil), true), true) :: StructField("array3", ArrayType(StringType, true), true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select * from jsonTable"), Row(Seq("1", "1.1", "true", null, "[]", "{}", "[2,3,4]", """{"field":"str"}"""), Seq(Row(214748364700L), Row(1)), null) :: Row(null, null, Seq("""{"field":"str"}""", """{"field":1}""")) :: Row(null, null, Seq("1", "2", "3")) :: Nil ) // Treat an element as a number. checkAnswer( sql("select array1[0] + 1 from jsonTable where array1 is not null"), Row(2) ) } test("Handling missing fields") { val jsonDF = spark.read.json(missingFields) val expectedSchema = StructType( StructField("a", BooleanType, true) :: StructField("b", LongType, true) :: StructField("c", ArrayType(LongType, true), true) :: StructField("d", StructType( StructField("field", BooleanType, true) :: Nil), true) :: StructField("e", StringType, true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") } test("Loading a JSON dataset from a text file") { val dir = Utils.createTempDir() dir.delete() val path = dir.getCanonicalPath primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path) val jsonDF = spark.read.json(path) val expectedSchema = StructType( StructField("bigInteger", DecimalType(20, 0), true) :: StructField("boolean", BooleanType, true) :: StructField("double", DoubleType, true) :: StructField("integer", LongType, true) :: StructField("long", LongType, true) :: StructField("null", StringType, true) :: StructField("string", StringType, true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select * from jsonTable"), Row(new java.math.BigDecimal("92233720368547758070"), true, 1.7976931348623157E308, 10, 21474836470L, null, "this is a simple string.") ) } test("Loading a JSON dataset primitivesAsString returns schema with primitive types as strings") { val dir = Utils.createTempDir() dir.delete() val path = dir.getCanonicalPath primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path) val jsonDF = spark.read.option("primitivesAsString", "true").json(path) val expectedSchema = StructType( StructField("bigInteger", StringType, true) :: StructField("boolean", StringType, true) :: StructField("double", StringType, true) :: StructField("integer", StringType, true) :: StructField("long", StringType, true) :: StructField("null", StringType, true) :: StructField("string", StringType, true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select * from jsonTable"), Row("92233720368547758070", "true", "1.7976931348623157E308", "10", "21474836470", null, "this is a simple string.") ) } test("Loading a JSON dataset primitivesAsString returns complex fields as strings") { val jsonDF = spark.read.option("primitivesAsString", "true").json(complexFieldAndType1) val expectedSchema = StructType( StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) :: StructField("arrayOfArray2", ArrayType(ArrayType(StringType, true), true), true) :: StructField("arrayOfBigInteger", ArrayType(StringType, true), true) :: StructField("arrayOfBoolean", ArrayType(StringType, true), true) :: StructField("arrayOfDouble", ArrayType(StringType, true), true) :: StructField("arrayOfInteger", ArrayType(StringType, true), true) :: StructField("arrayOfLong", ArrayType(StringType, true), true) :: StructField("arrayOfNull", ArrayType(StringType, true), true) :: StructField("arrayOfString", ArrayType(StringType, true), true) :: StructField("arrayOfStruct", ArrayType( StructType( StructField("field1", StringType, true) :: StructField("field2", StringType, true) :: StructField("field3", StringType, true) :: Nil), true), true) :: StructField("struct", StructType( StructField("field1", StringType, true) :: StructField("field2", StringType, true) :: Nil), true) :: StructField("structWithArrayFields", StructType( StructField("field1", ArrayType(StringType, true), true) :: StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") // Access elements of a primitive array. checkAnswer( sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"), Row("str1", "str2", null) ) // Access an array of null values. checkAnswer( sql("select arrayOfNull from jsonTable"), Row(Seq(null, null, null, null)) ) // Access elements of a BigInteger array (we use DecimalType internally). checkAnswer( sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"), Row("922337203685477580700", "-922337203685477580800", null) ) // Access elements of an array of arrays. checkAnswer( sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"), Row(Seq("1", "2", "3"), Seq("str1", "str2")) ) // Access elements of an array of arrays. checkAnswer( sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"), Row(Seq("1", "2", "3"), Seq("1.1", "2.1", "3.1")) ) // Access elements of an array inside a filed with the type of ArrayType(ArrayType). checkAnswer( sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"), Row("str2", "2.1") ) // Access elements of an array of structs. checkAnswer( sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " + "from jsonTable"), Row( Row("true", "str1", null), Row("false", null, null), Row(null, null, null), null) ) // Access a struct and fields inside of it. checkAnswer( sql("select struct, struct.field1, struct.field2 from jsonTable"), Row( Row("true", "92233720368547758070"), "true", "92233720368547758070") :: Nil ) // Access an array field of a struct. checkAnswer( sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"), Row(Seq("4", "5", "6"), Seq("str1", "str2")) ) // Access elements of an array field of a struct. checkAnswer( sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"), Row("5", null) ) } test("Loading a JSON dataset prefersDecimal returns schema with float types as BigDecimal") { val jsonDF = spark.read.option("prefersDecimal", "true").json(primitiveFieldAndType) val expectedSchema = StructType( StructField("bigInteger", DecimalType(20, 0), true) :: StructField("boolean", BooleanType, true) :: StructField("double", DecimalType(17, -292), true) :: StructField("integer", LongType, true) :: StructField("long", LongType, true) :: StructField("null", StringType, true) :: StructField("string", StringType, true) :: Nil) assert(expectedSchema === jsonDF.schema) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select * from jsonTable"), Row(BigDecimal("92233720368547758070"), true, BigDecimal("1.7976931348623157E308"), 10, 21474836470L, null, "this is a simple string.") ) } test("Find compatible types even if inferred DecimalType is not capable of other IntegralType") { val mixedIntegerAndDoubleRecords = Seq( """{"a": 3, "b": 1.1}""", s"""{"a": 3.1, "b": 0.${"0" * 38}1}""").toDS() val jsonDF = spark.read .option("prefersDecimal", "true") .json(mixedIntegerAndDoubleRecords) // The values in `a` field will be decimals as they fit in decimal. For `b` field, // they will be doubles as `1.0E-39D` does not fit. val expectedSchema = StructType( StructField("a", DecimalType(21, 1), true) :: StructField("b", DoubleType, true) :: Nil) assert(expectedSchema === jsonDF.schema) checkAnswer( jsonDF, Row(BigDecimal("3"), 1.1D) :: Row(BigDecimal("3.1"), 1.0E-39D) :: Nil ) } test("Infer big integers correctly even when it does not fit in decimal") { val jsonDF = spark.read .json(bigIntegerRecords) // The value in `a` field will be a double as it does not fit in decimal. For `b` field, // it will be a decimal as `92233720368547758070`. val expectedSchema = StructType( StructField("a", DoubleType, true) :: StructField("b", DecimalType(20, 0), true) :: Nil) assert(expectedSchema === jsonDF.schema) checkAnswer(jsonDF, Row(1.0E38D, BigDecimal("92233720368547758070"))) } test("Infer floating-point values correctly even when it does not fit in decimal") { val jsonDF = spark.read .option("prefersDecimal", "true") .json(floatingValueRecords) // The value in `a` field will be a double as it does not fit in decimal. For `b` field, // it will be a decimal as `0.01` by having a precision equal to the scale. val expectedSchema = StructType( StructField("a", DoubleType, true) :: StructField("b", DecimalType(2, 2), true):: Nil) assert(expectedSchema === jsonDF.schema) checkAnswer(jsonDF, Row(1.0E-39D, BigDecimal("0.01"))) val mergedJsonDF = spark.read .option("prefersDecimal", "true") .json(floatingValueRecords.union(bigIntegerRecords)) val expectedMergedSchema = StructType( StructField("a", DoubleType, true) :: StructField("b", DecimalType(22, 2), true):: Nil) assert(expectedMergedSchema === mergedJsonDF.schema) checkAnswer( mergedJsonDF, Row(1.0E-39D, BigDecimal("0.01")) :: Row(1.0E38D, BigDecimal("92233720368547758070")) :: Nil ) } test("Loading a JSON dataset from a text file with SQL") { val dir = Utils.createTempDir() dir.delete() val path = dir.toURI.toString primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path) sql( s""" |CREATE TEMPORARY VIEW jsonTableSQL |USING org.apache.spark.sql.json |OPTIONS ( | path '$path' |) """.stripMargin) checkAnswer( sql("select * from jsonTableSQL"), Row(new java.math.BigDecimal("92233720368547758070"), true, 1.7976931348623157E308, 10, 21474836470L, null, "this is a simple string.") ) } test("Applying schemas") { val dir = Utils.createTempDir() dir.delete() val path = dir.getCanonicalPath primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path) val schema = StructType( StructField("bigInteger", DecimalType.SYSTEM_DEFAULT, true) :: StructField("boolean", BooleanType, true) :: StructField("double", DoubleType, true) :: StructField("integer", IntegerType, true) :: StructField("long", LongType, true) :: StructField("null", StringType, true) :: StructField("string", StringType, true) :: Nil) val jsonDF1 = spark.read.schema(schema).json(path) assert(schema === jsonDF1.schema) jsonDF1.createOrReplaceTempView("jsonTable1") checkAnswer( sql("select * from jsonTable1"), Row(new java.math.BigDecimal("92233720368547758070"), true, 1.7976931348623157E308, 10, 21474836470L, null, "this is a simple string.") ) val jsonDF2 = spark.read.schema(schema).json(primitiveFieldAndType) assert(schema === jsonDF2.schema) jsonDF2.createOrReplaceTempView("jsonTable2") checkAnswer( sql("select * from jsonTable2"), Row(new java.math.BigDecimal("92233720368547758070"), true, 1.7976931348623157E308, 10, 21474836470L, null, "this is a simple string.") ) } test("Applying schemas with MapType") { val schemaWithSimpleMap = StructType( StructField("map", MapType(StringType, IntegerType, true), false) :: Nil) val jsonWithSimpleMap = spark.read.schema(schemaWithSimpleMap).json(mapType1) jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap") checkAnswer( sql("select `map` from jsonWithSimpleMap"), Row(Map("a" -> 1)) :: Row(Map("b" -> 2)) :: Row(Map("c" -> 3)) :: Row(Map("c" -> 1, "d" -> 4)) :: Row(Map("e" -> null)) :: Nil ) withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") { checkAnswer( sql("select `map`['c'] from jsonWithSimpleMap"), Row(null) :: Row(null) :: Row(3) :: Row(1) :: Row(null) :: Nil ) } val innerStruct = StructType( StructField("field1", ArrayType(IntegerType, true), true) :: StructField("field2", IntegerType, true) :: Nil) val schemaWithComplexMap = StructType( StructField("map", MapType(StringType, innerStruct, true), false) :: Nil) val jsonWithComplexMap = spark.read.schema(schemaWithComplexMap).json(mapType2) jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap") checkAnswer( sql("select `map` from jsonWithComplexMap"), Row(Map("a" -> Row(Seq(1, 2, 3, null), null))) :: Row(Map("b" -> Row(null, 2))) :: Row(Map("c" -> Row(Seq(), 4))) :: Row(Map("c" -> Row(null, 3), "d" -> Row(Seq(null), null))) :: Row(Map("e" -> null)) :: Row(Map("f" -> Row(null, null))) :: Nil ) withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") { checkAnswer( sql("select `map`['a'].field1, `map`['c'].field2 from jsonWithComplexMap"), Row(Seq(1, 2, 3, null), null) :: Row(null, null) :: Row(null, 4) :: Row(null, 3) :: Row(null, null) :: Row(null, null) :: Nil ) } } test("SPARK-2096 Correctly parse dot notations") { val jsonDF = spark.read.json(complexFieldAndType2) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"), Row(true, "str1") ) checkAnswer( sql( """ |select complexArrayOfStruct[0].field1[1].inner2[0], complexArrayOfStruct[1].field2[0][1] |from jsonTable """.stripMargin), Row("str2", 6) ) } test("SPARK-3390 Complex arrays") { val jsonDF = spark.read.json(complexFieldAndType2) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql( """ |select arrayOfArray1[0][0][0], arrayOfArray1[1][0][1], arrayOfArray1[1][1][0] |from jsonTable """.stripMargin), Row(5, 7, 8) ) checkAnswer( sql( """ |select arrayOfArray2[0][0][0].inner1, arrayOfArray2[1][0], |arrayOfArray2[1][1][1].inner2[0], arrayOfArray2[2][0][0].inner3[0][0].inner4 |from jsonTable """.stripMargin), Row("str1", Nil, "str4", 2) ) } test("SPARK-3308 Read top level JSON arrays") { val jsonDF = spark.read.json(jsonArray) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql( """ |select a, b, c |from jsonTable """.stripMargin), Row("str_a_1", null, null) :: Row("str_a_2", null, null) :: Row(null, "str_b_3", null) :: Row("str_a_4", "str_b_4", "str_c_4") :: Nil ) } test("Corrupt records: FAILFAST mode") { // `FAILFAST` mode should throw an exception for corrupt records. val exceptionOne = intercept[SparkException] { spark.read .option("mode", "FAILFAST") .json(corruptRecords) }.getMessage assert(exceptionOne.contains( "Malformed records are detected in schema inference. Parse Mode: FAILFAST.")) val exceptionTwo = intercept[SparkException] { spark.read .option("mode", "FAILFAST") .schema("a string") .json(corruptRecords) .collect() }.getMessage assert(exceptionTwo.contains( "Malformed records are detected in record parsing. Parse Mode: FAILFAST.")) } test("Corrupt records: DROPMALFORMED mode") { val schemaOne = StructType( StructField("a", StringType, true) :: StructField("b", StringType, true) :: StructField("c", StringType, true) :: Nil) val schemaTwo = StructType( StructField("a", StringType, true) :: Nil) // `DROPMALFORMED` mode should skip corrupt records val jsonDFOne = spark.read .option("mode", "DROPMALFORMED") .json(corruptRecords) checkAnswer( jsonDFOne, Row("str_a_4", "str_b_4", "str_c_4") :: Nil ) assert(jsonDFOne.schema === schemaOne) val jsonDFTwo = spark.read .option("mode", "DROPMALFORMED") .schema(schemaTwo) .json(corruptRecords) checkAnswer( jsonDFTwo, Row("str_a_4") :: Nil) assert(jsonDFTwo.schema === schemaTwo) } test("SPARK-19641: Additional corrupt records: DROPMALFORMED mode") { val schema = new StructType().add("dummy", StringType) // `DROPMALFORMED` mode should skip corrupt records val jsonDF = spark.read .option("mode", "DROPMALFORMED") .json(additionalCorruptRecords) checkAnswer( jsonDF, Row("test")) assert(jsonDF.schema === schema) } test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") { val schema = StructType( StructField("a", StringType, true) :: StructField("b", StringType, true) :: StructField("c", StringType, true) :: Nil) val jsonDF = spark.read.schema(schema).json(corruptRecords) checkAnswer( jsonDF.select($"a", $"b", $"c"), Seq( // Corrupted records are replaced with null Row(null, null, null), Row(null, null, null), Row(null, null, null), Row("str_a_4", "str_b_4", "str_c_4"), Row(null, null, null)) ) } test("Corrupt records: PERMISSIVE mode, with designated column for malformed records") { // Test if we can query corrupt records. withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") { val jsonDF = spark.read.json(corruptRecords) val schema = StructType( StructField("_unparsed", StringType, true) :: StructField("a", StringType, true) :: StructField("b", StringType, true) :: StructField("c", StringType, true) :: Nil) assert(schema === jsonDF.schema) // In HiveContext, backticks should be used to access columns starting with a underscore. checkAnswer( jsonDF.select($"a", $"b", $"c", $"_unparsed"), Row(null, null, null, "{") :: Row(null, null, null, """{"a":1, b:2}""") :: Row(null, null, null, """{"a":{, b:3}""") :: Row("str_a_4", "str_b_4", "str_c_4", null) :: Row(null, null, null, "]") :: Nil ) checkAnswer( jsonDF.filter($"_unparsed".isNull).select($"a", $"b", $"c"), Row("str_a_4", "str_b_4", "str_c_4") ) checkAnswer( jsonDF.filter($"_unparsed".isNotNull).select($"_unparsed"), Row("{") :: Row("""{"a":1, b:2}""") :: Row("""{"a":{, b:3}""") :: Row("]") :: Nil ) } } test("SPARK-13953 Rename the corrupt record field via option") { val jsonDF = spark.read .option("columnNameOfCorruptRecord", "_malformed") .json(corruptRecords) val schema = StructType( StructField("_malformed", StringType, true) :: StructField("a", StringType, true) :: StructField("b", StringType, true) :: StructField("c", StringType, true) :: Nil) assert(schema === jsonDF.schema) checkAnswer( jsonDF.selectExpr("a", "b", "c", "_malformed"), Row(null, null, null, "{") :: Row(null, null, null, """{"a":1, b:2}""") :: Row(null, null, null, """{"a":{, b:3}""") :: Row("str_a_4", "str_b_4", "str_c_4", null) :: Row(null, null, null, "]") :: Nil ) } test("SPARK-4068: nulls in arrays") { val jsonDF = spark.read.json(nullsInArrays) jsonDF.createOrReplaceTempView("jsonTable") val schema = StructType( StructField("field1", ArrayType(ArrayType(ArrayType(ArrayType(StringType, true), true), true), true), true) :: StructField("field2", ArrayType(ArrayType( StructType(StructField("Test", LongType, true) :: Nil), true), true), true) :: StructField("field3", ArrayType(ArrayType( StructType(StructField("Test", StringType, true) :: Nil), true), true), true) :: StructField("field4", ArrayType(ArrayType(ArrayType(LongType, true), true), true), true) :: Nil) assert(schema === jsonDF.schema) checkAnswer( sql( """ |SELECT field1, field2, field3, field4 |FROM jsonTable """.stripMargin), Row(Seq(Seq(null), Seq(Seq(Seq("Test")))), null, null, null) :: Row(null, Seq(null, Seq(Row(1))), null, null) :: Row(null, null, Seq(Seq(null), Seq(Row("2"))), null) :: Row(null, null, null, Seq(Seq(null, Seq(1, 2, 3)))) :: Nil ) } test("SPARK-4228 DataFrame to JSON") { val schema1 = StructType( StructField("f1", IntegerType, false) :: StructField("f2", StringType, false) :: StructField("f3", BooleanType, false) :: StructField("f4", ArrayType(StringType), nullable = true) :: StructField("f5", IntegerType, true) :: Nil) val rowRDD1 = unparsedStrings.map { r => val values = r.split(",").map(_.trim) val v5 = try values(3).toInt catch { case _: NumberFormatException => null } Row(values(0).toInt, values(1), values(2).toBoolean, r.split(",").toList, v5) } val df1 = spark.createDataFrame(rowRDD1, schema1) df1.createOrReplaceTempView("applySchema1") val df2 = df1.toDF val result = df2.toJSON.collect() // scalastyle:off assert(result(0) === "{\\"f1\\":1,\\"f2\\":\\"A1\\",\\"f3\\":true,\\"f4\\":[\\"1\\",\\" A1\\",\\" true\\",\\" null\\"]}") assert(result(3) === "{\\"f1\\":4,\\"f2\\":\\"D4\\",\\"f3\\":true,\\"f4\\":[\\"4\\",\\" D4\\",\\" true\\",\\" 2147483644\\"],\\"f5\\":2147483644}") // scalastyle:on val schema2 = StructType( StructField("f1", StructType( StructField("f11", IntegerType, false) :: StructField("f12", BooleanType, false) :: Nil), false) :: StructField("f2", MapType(StringType, IntegerType, true), false) :: Nil) val rowRDD2 = unparsedStrings.map { r => val values = r.split(",").map(_.trim) val v4 = try values(3).toInt catch { case _: NumberFormatException => null } Row(Row(values(0).toInt, values(2).toBoolean), Map(values(1) -> v4)) } val df3 = spark.createDataFrame(rowRDD2, schema2) df3.createOrReplaceTempView("applySchema2") val df4 = df3.toDF val result2 = df4.toJSON.collect() assert(result2(1) === "{\\"f1\\":{\\"f11\\":2,\\"f12\\":false},\\"f2\\":{\\"B2\\":null}}") assert(result2(3) === "{\\"f1\\":{\\"f11\\":4,\\"f12\\":true},\\"f2\\":{\\"D4\\":2147483644}}") val jsonDF = spark.read.json(primitiveFieldAndType) val primTable = spark.read.json(jsonDF.toJSON) primTable.createOrReplaceTempView("primitiveTable") checkAnswer( sql("select * from primitiveTable"), Row(new java.math.BigDecimal("92233720368547758070"), true, 1.7976931348623157E308, 10, 21474836470L, "this is a simple string.") ) val complexJsonDF = spark.read.json(complexFieldAndType1) val compTable = spark.read.json(complexJsonDF.toJSON) compTable.createOrReplaceTempView("complexTable") // Access elements of a primitive array. checkAnswer( sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from complexTable"), Row("str1", "str2", null) ) // Access an array of null values. checkAnswer( sql("select arrayOfNull from complexTable"), Row(Seq(null, null, null, null)) ) // Access elements of a BigInteger array (we use DecimalType internally). checkAnswer( sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] " + " from complexTable"), Row(new java.math.BigDecimal("922337203685477580700"), new java.math.BigDecimal("-922337203685477580800"), null) ) // Access elements of an array of arrays. checkAnswer( sql("select arrayOfArray1[0], arrayOfArray1[1] from complexTable"), Row(Seq("1", "2", "3"), Seq("str1", "str2")) ) // Access elements of an array of arrays. checkAnswer( sql("select arrayOfArray2[0], arrayOfArray2[1] from complexTable"), Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1)) ) // Access elements of an array inside a filed with the type of ArrayType(ArrayType). checkAnswer( sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from complexTable"), Row("str2", 2.1) ) // Access a struct and fields inside of it. checkAnswer( sql("select struct, struct.field1, struct.field2 from complexTable"), Row( Row(true, new java.math.BigDecimal("92233720368547758070")), true, new java.math.BigDecimal("92233720368547758070")) :: Nil ) // Access an array field of a struct. checkAnswer( sql("select structWithArrayFields.field1, structWithArrayFields.field2 from complexTable"), Row(Seq(4, 5, 6), Seq("str1", "str2")) ) // Access elements of an array field of a struct. checkAnswer( sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] " + "from complexTable"), Row(5, null) ) } test("Dataset toJSON doesn't construct rdd") { val containsRDD = spark.emptyDataFrame.toJSON.queryExecution.logical.find { case ExternalRDD(_, _) => true case _ => false } assert(containsRDD.isEmpty, "Expected logical plan of toJSON to not contain an RDD") } test("JSONRelation equality test") { withTempPath(dir => { val path = dir.getCanonicalFile.toURI.toString sparkContext.parallelize(1 to 100) .map(i => s"""{"a": 1, "b": "str$i"}""").saveAsTextFile(path) val d1 = DataSource( spark, userSpecifiedSchema = None, partitionColumns = Array.empty[String], bucketSpec = None, className = classOf[JsonFileFormat].getCanonicalName, options = Map("path" -> path)).resolveRelation() val d2 = DataSource( spark, userSpecifiedSchema = None, partitionColumns = Array.empty[String], bucketSpec = None, className = classOf[JsonFileFormat].getCanonicalName, options = Map("path" -> path)).resolveRelation() assert(d1 === d2) }) } test("SPARK-6245 JsonInferSchema.infer on empty RDD") { // This is really a test that it doesn't throw an exception val emptySchema = JsonInferSchema.infer( empty.rdd, new JSONOptions(Map.empty[String, String], "GMT"), CreateJacksonParser.string) assert(StructType(Seq()) === emptySchema) } test("SPARK-7565 MapType in JsonRDD") { withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") { withTempDir { dir => val schemaWithSimpleMap = StructType( StructField("map", MapType(StringType, IntegerType, true), false) :: Nil) val df = spark.read.schema(schemaWithSimpleMap).json(mapType1) val path = dir.getAbsolutePath df.write.mode("overwrite").parquet(path) // order of MapType is not defined assert(spark.read.parquet(path).count() == 5) val df2 = spark.read.json(corruptRecords) df2.write.mode("overwrite").parquet(path) checkAnswer(spark.read.parquet(path), df2.collect()) } } } test("SPARK-8093 Erase empty structs") { val emptySchema = JsonInferSchema.infer( emptyRecords.rdd, new JSONOptions(Map.empty[String, String], "GMT"), CreateJacksonParser.string) assert(StructType(Seq()) === emptySchema) } test("JSON with Partition") { def makePartition(rdd: RDD[String], parent: File, partName: String, partValue: Any): File = { val p = new File(parent, s"$partName=${partValue.toString}") rdd.saveAsTextFile(p.getCanonicalPath) p } withTempPath(root => { val d1 = new File(root, "d1=1") // root/dt=1/col1=abc val p1_col1 = makePartition( sparkContext.parallelize(2 to 5).map(i => s"""{"a": 1, "b": "str$i"}"""), d1, "col1", "abc") // root/dt=1/col1=abd val p2 = makePartition( sparkContext.parallelize(6 to 10).map(i => s"""{"a": 1, "b": "str$i"}"""), d1, "col1", "abd") spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part") checkAnswer(sql( "SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abc'"), Row(4)) checkAnswer(sql( "SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abd'"), Row(5)) checkAnswer(sql( "SELECT count(a) FROM test_myjson_with_part where d1 = 1"), Row(9)) }) } test("backward compatibility") { // This test we make sure our JSON support can read JSON data generated by previous version // of Spark generated through toJSON method and JSON data source. // The data is generated by the following program. // Here are a few notes: // - Spark 1.5.0 cannot save timestamp data. So, we manually added timestamp field (col13) // in the JSON object. // - For Spark before 1.5.1, we do not generate UDTs. So, we manually added the UDT value to // JSON objects generated by those Spark versions (col17). // - If the type is NullType, we do not write data out. // Create the schema. val struct = StructType( StructField("f1", FloatType, true) :: StructField("f2", ArrayType(BooleanType), true) :: Nil) val dataTypes = Seq( StringType, BinaryType, NullType, BooleanType, ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5), DateType, TimestampType, ArrayType(IntegerType), MapType(StringType, LongType), struct, new UDT.MyDenseVectorUDT()) val fields = dataTypes.zipWithIndex.map { case (dataType, index) => StructField(s"col$index", dataType, nullable = true) } val schema = StructType(fields) val constantValues = Seq( "a string in binary".getBytes(StandardCharsets.UTF_8), null, true, 1.toByte, 2.toShort, 3, Long.MaxValue, 0.25.toFloat, 0.75, new java.math.BigDecimal(s"1234.23456"), new java.math.BigDecimal(s"1.23456"), java.sql.Date.valueOf("2015-01-01"), java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"), Seq(2, 3, 4), Map("a string" -> 2000L), Row(4.75.toFloat, Seq(false, true)), new UDT.MyDenseVector(Array(0.25, 2.25, 4.25))) val data = Row.fromSeq(Seq("Spark " + spark.sparkContext.version) ++ constantValues) :: Nil // Data generated by previous versions. // scalastyle:off val existingJSONData = """{"col0":"Spark 1.2.2","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: """{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: """{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: """{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: """{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: """{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: """{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"16436","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: Nil // scalastyle:on // Generate data for the current version. val df = spark.createDataFrame(spark.sparkContext.parallelize(data, 1), schema) withTempPath { path => df.write.format("json").mode("overwrite").save(path.getCanonicalPath) // df.toJSON will convert internal rows to external rows first and then generate // JSON objects. While, df.write.format("json") will write internal rows directly. val allJSON = existingJSONData ++ df.toJSON.collect() ++ sparkContext.textFile(path.getCanonicalPath).collect() Utils.deleteRecursively(path) sparkContext.parallelize(allJSON, 1).saveAsTextFile(path.getCanonicalPath) // Read data back with the schema specified. val col0Values = Seq( "Spark 1.2.2", "Spark 1.3.1", "Spark 1.3.1", "Spark 1.4.1", "Spark 1.4.1", "Spark 1.5.0", "Spark 1.5.0", "Spark " + spark.sparkContext.version, "Spark " + spark.sparkContext.version) val expectedResult = col0Values.map { v => Row.fromSeq(Seq(v) ++ constantValues) } checkAnswer( spark.read.format("json").schema(schema).load(path.getCanonicalPath), expectedResult ) } } test("SPARK-11544 test pathfilter") { withTempPath { dir => val path = dir.getCanonicalPath val df = spark.range(2) df.write.json(path + "/p=1") df.write.json(path + "/p=2") assert(spark.read.json(path).count() === 4) val extraOptions = Map( "mapred.input.pathFilter.class" -> classOf[TestFileFilter].getName, "mapreduce.input.pathFilter.class" -> classOf[TestFileFilter].getName ) assert(spark.read.options(extraOptions).json(path).count() === 2) } } test("SPARK-12057 additional corrupt records do not throw exceptions") { // Test if we can query corrupt records. withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") { withTempView("jsonTable") { val schema = StructType( StructField("_unparsed", StringType, true) :: StructField("dummy", StringType, true) :: Nil) { // We need to make sure we can infer the schema. val jsonDF = spark.read.json(additionalCorruptRecords) assert(jsonDF.schema === schema) } { val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords) jsonDF.createOrReplaceTempView("jsonTable") // In HiveContext, backticks should be used to access columns starting with a underscore. checkAnswer( sql( """ |SELECT dummy, _unparsed |FROM jsonTable """.stripMargin), Row("test", null) :: Row(null, """[1,2,3]""") :: Row(null, """":"test", "a":1}""") :: Row(null, """42""") :: Row(null, """ ","ian":"test"}""") :: Nil ) } } } } test("Parse JSON rows having an array type and a struct type in the same field.") { withTempDir { dir => val dir = Utils.createTempDir() dir.delete() val path = dir.getCanonicalPath arrayAndStructRecords.map(record => record.replaceAll("\\n", " ")).write.text(path) val schema = StructType( StructField("a", StructType( StructField("b", StringType) :: Nil )) :: Nil) val jsonDF = spark.read.schema(schema).json(path) assert(jsonDF.count() == 2) } } test("SPARK-12872 Support to specify the option for compression codec") { withTempDir { dir => val dir = Utils.createTempDir() dir.delete() val path = dir.getCanonicalPath primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path) val jsonDF = spark.read.json(path) val jsonDir = new File(dir, "json").getCanonicalPath jsonDF.coalesce(1).write .format("json") .option("compression", "gZiP") .save(jsonDir) val compressedFiles = new File(jsonDir).listFiles() assert(compressedFiles.exists(_.getName.endsWith(".json.gz"))) val jsonCopy = spark.read .format("json") .load(jsonDir) assert(jsonCopy.count == jsonDF.count) val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean") val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean") checkAnswer(jsonCopySome, jsonDFSome) } } test("SPARK-13543 Write the output as uncompressed via option()") { val extraOptions = Map[String, String]( "mapreduce.output.fileoutputformat.compress" -> "true", "mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString, "mapreduce.output.fileoutputformat.compress.codec" -> classOf[GzipCodec].getName, "mapreduce.map.output.compress" -> "true", "mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName ) withTempDir { dir => val dir = Utils.createTempDir() dir.delete() val path = dir.getCanonicalPath primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path) val jsonDF = spark.read.json(path) val jsonDir = new File(dir, "json").getCanonicalPath jsonDF.coalesce(1).write .format("json") .option("compression", "none") .options(extraOptions) .save(jsonDir) val compressedFiles = new File(jsonDir).listFiles() assert(compressedFiles.exists(!_.getName.endsWith(".json.gz"))) val jsonCopy = spark.read .format("json") .options(extraOptions) .load(jsonDir) assert(jsonCopy.count == jsonDF.count) val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean") val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean") checkAnswer(jsonCopySome, jsonDFSome) } } test("Casting long as timestamp") { withTempView("jsonTable") { val schema = (new StructType).add("ts", TimestampType) val jsonDF = spark.read.schema(schema).json(timestampAsLong) jsonDF.createOrReplaceTempView("jsonTable") checkAnswer( sql("select ts from jsonTable"), Row(java.sql.Timestamp.valueOf("2016-01-02 03:04:05")) ) } } test("wide nested json table") { val nested = (1 to 100).map { i => s""" |"c$i": $i """.stripMargin }.mkString(", ") val json = s""" |{"a": [{$nested}], "b": [{$nested}]} """.stripMargin val df = spark.read.json(Seq(json).toDS()) assert(df.schema.size === 2) df.collect() } test("Write dates correctly with dateFormat option") { val customSchema = new StructType(Array(StructField("date", DateType, true))) withTempDir { dir => // With dateFormat option. val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.json" val datesWithFormat = spark.read .schema(customSchema) .option("dateFormat", "dd/MM/yyyy HH:mm") .json(datesRecords) datesWithFormat.write .format("json") .option("dateFormat", "yyyy/MM/dd") .save(datesWithFormatPath) // This will load back the dates as string. val stringSchema = StructType(StructField("date", StringType, true) :: Nil) val stringDatesWithFormat = spark.read .schema(stringSchema) .json(datesWithFormatPath) val expectedStringDatesWithFormat = Seq( Row("2015/08/26"), Row("2014/10/27"), Row("2016/01/28")) checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat) } } test("Write timestamps correctly with timestampFormat option") { val customSchema = new StructType(Array(StructField("date", TimestampType, true))) withTempDir { dir => // With dateFormat option. val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json" val timestampsWithFormat = spark.read .schema(customSchema) .option("timestampFormat", "dd/MM/yyyy HH:mm") .json(datesRecords) timestampsWithFormat.write .format("json") .option("timestampFormat", "yyyy/MM/dd HH:mm") .save(timestampsWithFormatPath) // This will load back the timestamps as string. val stringSchema = StructType(StructField("date", StringType, true) :: Nil) val stringTimestampsWithFormat = spark.read .schema(stringSchema) .json(timestampsWithFormatPath) val expectedStringDatesWithFormat = Seq( Row("2015/08/26 18:00"), Row("2014/10/27 18:30"), Row("2016/01/28 20:00")) checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat) } } test("Write timestamps correctly with timestampFormat option and timeZone option") { val customSchema = new StructType(Array(StructField("date", TimestampType, true))) withTempDir { dir => // With dateFormat option and timeZone option. val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json" val timestampsWithFormat = spark.read .schema(customSchema) .option("timestampFormat", "dd/MM/yyyy HH:mm") .json(datesRecords) timestampsWithFormat.write .format("json") .option("timestampFormat", "yyyy/MM/dd HH:mm") .option(DateTimeUtils.TIMEZONE_OPTION, "GMT") .save(timestampsWithFormatPath) // This will load back the timestamps as string. val stringSchema = StructType(StructField("date", StringType, true) :: Nil) val stringTimestampsWithFormat = spark.read .schema(stringSchema) .json(timestampsWithFormatPath) val expectedStringDatesWithFormat = Seq( Row("2015/08/27 01:00"), Row("2014/10/28 01:30"), Row("2016/01/29 04:00")) checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat) val readBack = spark.read .schema(customSchema) .option("timestampFormat", "yyyy/MM/dd HH:mm") .option(DateTimeUtils.TIMEZONE_OPTION, "GMT") .json(timestampsWithFormatPath) checkAnswer(readBack, timestampsWithFormat) } } test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") { val records = Seq("""{"a": 3, "b": 1.1}""", """{"a": 3.1, "b": 0.000001}""").toDS() val schema = StructType( StructField("a", DecimalType(21, 1), true) :: StructField("b", DecimalType(7, 6), true) :: Nil) val df1 = spark.read.option("prefersDecimal", "true").json(records) assert(df1.schema == schema) val df2 = spark.read.option("PREfersdecimaL", "true").json(records) assert(df2.schema == schema) } test("SPARK-18352: Parse normal multi-line JSON files (compressed)") { withTempPath { dir => val path = dir.getCanonicalPath primitiveFieldAndType .toDF("value") .write .option("compression", "GzIp") .text(path) assert(new File(path).listFiles().exists(_.getName.endsWith(".gz"))) val jsonDF = spark.read.option("multiLine", true).json(path) val jsonDir = new File(dir, "json").getCanonicalPath jsonDF.coalesce(1).write .option("compression", "gZiP") .json(jsonDir) assert(new File(jsonDir).listFiles().exists(_.getName.endsWith(".json.gz"))) val originalData = spark.read.json(primitiveFieldAndType) checkAnswer(jsonDF, originalData) checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData) } } test("SPARK-18352: Parse normal multi-line JSON files (uncompressed)") { withTempPath { dir => val path = dir.getCanonicalPath primitiveFieldAndType .toDF("value") .write .text(path) val jsonDF = spark.read.option("multiLine", true).json(path) val jsonDir = new File(dir, "json").getCanonicalPath jsonDF.coalesce(1).write.json(jsonDir) val compressedFiles = new File(jsonDir).listFiles() assert(compressedFiles.exists(_.getName.endsWith(".json"))) val originalData = spark.read.json(primitiveFieldAndType) checkAnswer(jsonDF, originalData) checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData) } } test("SPARK-18352: Expect one JSON document per file") { // the json parser terminates as soon as it sees a matching END_OBJECT or END_ARRAY token. // this might not be the optimal behavior but this test verifies that only the first value // is parsed and the rest are discarded. // alternatively the parser could continue parsing following objects, which may further reduce // allocations by skipping the line reader entirely withTempPath { dir => val path = dir.getCanonicalPath spark .createDataFrame(Seq(Tuple1("{}{invalid}"))) .coalesce(1) .write .text(path) val jsonDF = spark.read.option("multiLine", true).json(path) // no corrupt record column should be created assert(jsonDF.schema === StructType(Seq())) // only the first object should be read assert(jsonDF.count() === 1) } } test("SPARK-18352: Handle multi-line corrupt documents (PERMISSIVE)") { withTempPath { dir => val path = dir.getCanonicalPath val corruptRecordCount = additionalCorruptRecords.count().toInt assert(corruptRecordCount === 5) additionalCorruptRecords .toDF("value") // this is the minimum partition count that avoids hash collisions .repartition(corruptRecordCount * 4, F.hash($"value")) .write .text(path) val jsonDF = spark.read.option("multiLine", true).option("mode", "PERMISSIVE").json(path) assert(jsonDF.count() === corruptRecordCount) assert(jsonDF.schema === new StructType() .add("_corrupt_record", StringType) .add("dummy", StringType)) val counts = jsonDF .join( additionalCorruptRecords.toDF("value"), F.regexp_replace($"_corrupt_record", "(^\\\\s+|\\\\s+$)", "") === F.trim($"value"), "outer") .agg( F.count($"dummy").as("valid"), F.count($"_corrupt_record").as("corrupt"), F.count("*").as("count")) checkAnswer(counts, Row(1, 4, 6)) } } test("SPARK-19641: Handle multi-line corrupt documents (DROPMALFORMED)") { withTempPath { dir => val path = dir.getCanonicalPath val corruptRecordCount = additionalCorruptRecords.count().toInt assert(corruptRecordCount === 5) additionalCorruptRecords .toDF("value") // this is the minimum partition count that avoids hash collisions .repartition(corruptRecordCount * 4, F.hash($"value")) .write .text(path) val jsonDF = spark.read.option("multiLine", true).option("mode", "DROPMALFORMED").json(path) checkAnswer(jsonDF, Seq(Row("test"))) } } test("SPARK-18352: Handle multi-line corrupt documents (FAILFAST)") { withTempPath { dir => val path = dir.getCanonicalPath val corruptRecordCount = additionalCorruptRecords.count().toInt assert(corruptRecordCount === 5) additionalCorruptRecords .toDF("value") // this is the minimum partition count that avoids hash collisions .repartition(corruptRecordCount * 4, F.hash($"value")) .write .text(path) val schema = new StructType().add("dummy", StringType) // `FAILFAST` mode should throw an exception for corrupt records. val exceptionOne = intercept[SparkException] { spark.read .option("multiLine", true) .option("mode", "FAILFAST") .json(path) } assert(exceptionOne.getMessage.contains("Malformed records are detected in schema " + "inference. Parse Mode: FAILFAST.")) val exceptionTwo = intercept[SparkException] { spark.read .option("multiLine", true) .option("mode", "FAILFAST") .schema(schema) .json(path) .collect() } assert(exceptionTwo.getMessage.contains("Malformed records are detected in record " + "parsing. Parse Mode: FAILFAST.")) } } test("Throw an exception if a `columnNameOfCorruptRecord` field violates requirements") { val columnNameOfCorruptRecord = "_unparsed" val schema = StructType( StructField(columnNameOfCorruptRecord, IntegerType, true) :: StructField("a", StringType, true) :: StructField("b", StringType, true) :: StructField("c", StringType, true) :: Nil) val errMsg = intercept[AnalysisException] { spark.read .option("mode", "Permissive") .option("columnNameOfCorruptRecord", columnNameOfCorruptRecord) .schema(schema) .json(corruptRecords) }.getMessage assert(errMsg.startsWith("The field for corrupt records must be string type and nullable")) // We use `PERMISSIVE` mode by default if invalid string is given. withTempPath { dir => val path = dir.getCanonicalPath corruptRecords.toDF("value").write.text(path) val errMsg = intercept[AnalysisException] { spark.read .option("mode", "permm") .option("columnNameOfCorruptRecord", columnNameOfCorruptRecord) .schema(schema) .json(path) .collect }.getMessage assert(errMsg.startsWith("The field for corrupt records must be string type and nullable")) } } test("SPARK-18772: Parse special floats correctly") { val jsons = Seq( """{"a": "NaN"}""", """{"a": "Infinity"}""", """{"a": "-Infinity"}""") // positive cases val checks: Seq[Double => Boolean] = Seq( _.isNaN, _.isPosInfinity, _.isNegInfinity) Seq(FloatType, DoubleType).foreach { dt => jsons.zip(checks).foreach { case (json, check) => val ds = spark.read .schema(StructType(Seq(StructField("a", dt)))) .json(Seq(json).toDS()) .select($"a".cast(DoubleType)).as[Double] assert(check(ds.first())) } } // negative cases Seq(FloatType, DoubleType).foreach { dt => val lowerCasedJsons = jsons.map(_.toLowerCase(Locale.ROOT)) // The special floats are case-sensitive so these cases below throw exceptions. lowerCasedJsons.foreach { lowerCasedJson => val e = intercept[SparkException] { spark.read .option("mode", "FAILFAST") .schema(StructType(Seq(StructField("a", dt)))) .json(Seq(lowerCasedJson).toDS()) .collect() } assert(e.getMessage.contains("Cannot parse")) } } } test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " + "from a file") { withTempPath { dir => val path = dir.getCanonicalPath val data = """{"field": 1} |{"field": 2} |{"field": "3"}""".stripMargin Seq(data).toDF().repartition(1).write.text(path) val schema = new StructType().add("field", ByteType).add("_corrupt_record", StringType) // negative cases val msg = intercept[AnalysisException] { spark.read.schema(schema).json(path).select("_corrupt_record").collect() }.getMessage assert(msg.contains("only include the internal corrupt record column")) intercept[catalyst.errors.TreeNodeException[_]] { spark.read.schema(schema).json(path).filter($"_corrupt_record".isNotNull).count() } // workaround val df = spark.read.schema(schema).json(path).cache() assert(df.filter($"_corrupt_record".isNotNull).count() == 1) assert(df.filter($"_corrupt_record".isNull).count() == 2) checkAnswer( df.select("_corrupt_record"), Row(null) :: Row(null) :: Row("{\\"field\\": \\"3\\"}") :: Nil ) } } def testLineSeparator(lineSep: String): Unit = { test(s"SPARK-21289: Support line separator - lineSep: '$lineSep'") { // Read val data = s""" | {"f": |"a", "f0": 1}$lineSep{"f": | |"c", "f0": 2}$lineSep{"f": "d", "f0": 3} """.stripMargin val dataWithTrailingLineSep = s"$data$lineSep" Seq(data, dataWithTrailingLineSep).foreach { lines => withTempPath { path => Files.write(path.toPath, lines.getBytes(StandardCharsets.UTF_8)) val df = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath) val expectedSchema = StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil) checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF()) assert(df.schema === expectedSchema) } } // Write withTempPath { path => Seq("a", "b", "c").toDF("value").coalesce(1) .write.option("lineSep", lineSep).json(path.getAbsolutePath) val partFile = TestUtils.recursiveList(path).filter(f => f.getName.startsWith("part-")).head val readBack = new String(Files.readAllBytes(partFile.toPath), StandardCharsets.UTF_8) assert( readBack === s"""{"value":"a"}$lineSep{"value":"b"}$lineSep{"value":"c"}$lineSep""") } // Roundtrip withTempPath { path => val df = Seq("a", "b", "c").toDF() df.write.option("lineSep", lineSep).json(path.getAbsolutePath) val readBack = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath) checkAnswer(df, readBack) } } } // scalastyle:off nonascii Seq("|", "^", "::", "!!!@3", 0x1E.toChar.toString, "아").foreach { lineSep => testLineSeparator(lineSep) } // scalastyle:on nonascii test("""SPARK-21289: Support line separator - default value \\r, \\r\\n and \\n""") { val data = "{\\"f\\": \\"a\\", \\"f0\\": 1}\\r{\\"f\\": \\"c\\", \\"f0\\": 2}\\r\\n{\\"f\\": \\"d\\", \\"f0\\": 3}\\n" withTempPath { path => Files.write(path.toPath, data.getBytes(StandardCharsets.UTF_8)) val df = spark.read.json(path.getAbsolutePath) val expectedSchema = StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil) checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF()) assert(df.schema === expectedSchema) } } test("SPARK-23849: schema inferring touches less data if samplingRatio < 1.0") { // Set default values for the DataSource parameters to make sure // that whole test file is mapped to only one partition. This will guarantee // reliable sampling of the input file. withSQLConf( "spark.sql.files.maxPartitionBytes" -> (128 * 1024 * 1024).toString, "spark.sql.files.openCostInBytes" -> (4 * 1024 * 1024).toString )(withTempPath { path => val ds = sampledTestData.coalesce(1) ds.write.text(path.getAbsolutePath) val readback = spark.read.option("samplingRatio", 0.1).json(path.getCanonicalPath) assert(readback.schema == new StructType().add("f1", LongType)) }) } test("SPARK-23849: usage of samplingRatio while parsing a dataset of strings") { val ds = sampledTestData.coalesce(1) val readback = spark.read.option("samplingRatio", 0.1).json(ds) assert(readback.schema == new StructType().add("f1", LongType)) } test("SPARK-23849: samplingRatio is out of the range (0, 1.0]") { val ds = spark.range(0, 100, 1, 1).map(_.toString) val errorMsg0 = intercept[IllegalArgumentException] { spark.read.option("samplingRatio", -1).json(ds) }.getMessage assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0")) val errorMsg1 = intercept[IllegalArgumentException] { spark.read.option("samplingRatio", 0).json(ds) }.getMessage assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0")) val sampled = spark.read.option("samplingRatio", 1.0).json(ds) assert(sampled.count() == ds.count()) } test("SPARK-23723: json in UTF-16 with BOM") { val fileName = "test-data/utf16WithBOM.json" val schema = new StructType().add("firstName", StringType).add("lastName", StringType) val jsonDF = spark.read.schema(schema) .option("multiline", "true") .option("encoding", "UTF-16") .json(testFile(fileName)) checkAnswer(jsonDF, Seq(Row("Chris", "Baird"), Row("Doug", "Rood"))) } test("SPARK-23723: multi-line json in UTF-32BE with BOM") { val fileName = "test-data/utf32BEWithBOM.json" val schema = new StructType().add("firstName", StringType).add("lastName", StringType) val jsonDF = spark.read.schema(schema) .option("multiline", "true") .json(testFile(fileName)) checkAnswer(jsonDF, Seq(Row("Chris", "Baird"))) } test("SPARK-23723: Use user's encoding in reading of multi-line json in UTF-16LE") { val fileName = "test-data/utf16LE.json" val schema = new StructType().add("firstName", StringType).add("lastName", StringType) val jsonDF = spark.read.schema(schema) .option("multiline", "true") .options(Map("encoding" -> "UTF-16LE")) .json(testFile(fileName)) checkAnswer(jsonDF, Seq(Row("Chris", "Baird"))) } test("SPARK-23723: Unsupported encoding name") { val invalidCharset = "UTF-128" val exception = intercept[UnsupportedCharsetException] { spark.read .options(Map("encoding" -> invalidCharset, "lineSep" -> "\\n")) .json(testFile("test-data/utf16LE.json")) .count() } assert(exception.getMessage.contains(invalidCharset)) } test("SPARK-23723: checking that the encoding option is case agnostic") { val fileName = "test-data/utf16LE.json" val schema = new StructType().add("firstName", StringType).add("lastName", StringType) val jsonDF = spark.read.schema(schema) .option("multiline", "true") .options(Map("encoding" -> "uTf-16lE")) .json(testFile(fileName)) checkAnswer(jsonDF, Seq(Row("Chris", "Baird"))) } test("SPARK-23723: specified encoding is not matched to actual encoding") { val fileName = "test-data/utf16LE.json" val schema = new StructType().add("firstName", StringType).add("lastName", StringType) val exception = intercept[SparkException] { spark.read.schema(schema) .option("mode", "FAILFAST") .option("multiline", "true") .options(Map("encoding" -> "UTF-16BE")) .json(testFile(fileName)) .count() } val errMsg = exception.getMessage assert(errMsg.contains("Malformed records are detected in record parsing")) } def checkEncoding(expectedEncoding: String, pathToJsonFiles: String, expectedContent: String): Unit = { val jsonFiles = new File(pathToJsonFiles) .listFiles() .filter(_.isFile) .filter(_.getName.endsWith("json")) val actualContent = jsonFiles.map { file => new String(Files.readAllBytes(file.toPath), expectedEncoding) }.mkString.trim assert(actualContent == expectedContent) } test("SPARK-23723: save json in UTF-32BE") { val encoding = "UTF-32BE" withTempPath { path => val df = spark.createDataset(Seq(("Dog", 42))) df.write .options(Map("encoding" -> encoding, "lineSep" -> "\\n")) .json(path.getCanonicalPath) checkEncoding( expectedEncoding = encoding, pathToJsonFiles = path.getCanonicalPath, expectedContent = """{"_1":"Dog","_2":42}""") } } test("SPARK-23723: save json in default encoding - UTF-8") { withTempPath { path => val df = spark.createDataset(Seq(("Dog", 42))) df.write.json(path.getCanonicalPath) checkEncoding( expectedEncoding = "UTF-8", pathToJsonFiles = path.getCanonicalPath, expectedContent = """{"_1":"Dog","_2":42}""") } } test("SPARK-23723: wrong output encoding") { val encoding = "UTF-128" val exception = intercept[UnsupportedCharsetException] { withTempPath { path => val df = spark.createDataset(Seq((0))) df.write .options(Map("encoding" -> encoding, "lineSep" -> "\\n")) .json(path.getCanonicalPath) } } assert(exception.getMessage == encoding) } test("SPARK-23723: read back json in UTF-16LE") { val options = Map("encoding" -> "UTF-16LE", "lineSep" -> "\\n") withTempPath { path => val ds = spark.createDataset(Seq(("a", 1), ("b", 2), ("c", 3))).repartition(2) ds.write.options(options).json(path.getCanonicalPath) val readBack = spark .read .options(options) .json(path.getCanonicalPath) checkAnswer(readBack.toDF(), ds.toDF()) } } test("SPARK-23723: write json in UTF-16/32 with multiline off") { Seq("UTF-16", "UTF-32").foreach { encoding => withTempPath { path => val ds = spark.createDataset(Seq( ("a", 1), ("b", 2), ("c", 3)) ).repartition(2) val e = intercept[IllegalArgumentException] { ds.write .option("encoding", encoding) .option("multiline", "false") .format("json").mode("overwrite") .save(path.getCanonicalPath) }.getMessage assert(e.contains( s"$encoding encoding in the blacklist is not allowed when multiLine is disabled")) } } } def checkReadJson(lineSep: String, encoding: String, inferSchema: Boolean, id: Int): Unit = { test(s"SPARK-23724: checks reading json in ${encoding} #${id}") { val schema = new StructType().add("f1", StringType).add("f2", IntegerType) withTempPath { path => val records = List(("a", 1), ("b", 2)) val data = records .map(rec => s"""{"f1":"${rec._1}", "f2":${rec._2}}""".getBytes(encoding)) .reduce((a1, a2) => a1 ++ lineSep.getBytes(encoding) ++ a2) val os = new FileOutputStream(path) os.write(data) os.close() val reader = if (inferSchema) { spark.read } else { spark.read.schema(schema) } val readBack = reader .option("encoding", encoding) .option("lineSep", lineSep) .json(path.getCanonicalPath) checkAnswer(readBack, records.map(rec => Row(rec._1, rec._2))) } } } // scalastyle:off nonascii List( (0, "|", "UTF-8", false), (1, "^", "UTF-16BE", true), (2, "::", "ISO-8859-1", true), (3, "!!!@3", "UTF-32LE", false), (4, 0x1E.toChar.toString, "UTF-8", true), (5, "아", "UTF-32BE", false), (6, "куку", "CP1251", true), (7, "sep", "utf-8", false), (8, "\\r\\n", "UTF-16LE", false), (9, "\\r\\n", "utf-16be", true), (10, "\\u000d\\u000a", "UTF-32BE", false), (11, "\\u000a\\u000d", "UTF-8", true), (12, "===", "US-ASCII", false), (13, "$^+", "utf-32le", true) ).foreach { case (testNum, sep, encoding, inferSchema) => checkReadJson(sep, encoding, inferSchema, testNum) } // scalastyle:on nonascii test("SPARK-23724: lineSep should be set if encoding if different from UTF-8") { val encoding = "UTF-16LE" val exception = intercept[IllegalArgumentException] { spark.read .options(Map("encoding" -> encoding)) .json(testFile("test-data/utf16LE.json")) .count() } assert(exception.getMessage.contains( s"""The lineSep option must be specified for the $encoding encoding""")) } private val badJson = "\\u0000\\u0000\\u0000A\\u0001AAA" test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is enabled") { withTempPath { tempDir => val path = tempDir.getAbsolutePath Seq(badJson + """{"a":1}""").toDS().write.text(path) val expected = s"""${badJson}{"a":1}\\n""" val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType) val df = spark.read.format("json") .option("mode", "PERMISSIVE") .option("multiLine", true) .option("encoding", "UTF-8") .schema(schema).load(path) checkAnswer(df, Row(null, expected)) } } test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is disabled") { withTempPath { tempDir => val path = tempDir.getAbsolutePath Seq(badJson, """{"a":1}""").toDS().write.text(path) val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType) val df = spark.read.format("json") .option("mode", "PERMISSIVE") .option("multiLine", false) .option("encoding", "UTF-8") .schema(schema).load(path) checkAnswer(df, Seq(Row(1, null), Row(null, badJson))) } } test("SPARK-23094: permissively parse a dataset contains JSON with leading nulls") { checkAnswer( spark.read.option("mode", "PERMISSIVE").option("encoding", "UTF-8").json(Seq(badJson).toDS()), Row(badJson)) } }
lxsmnv/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
Scala
apache-2.0
90,184
package com.github.mdr.mash.ns.core.thread import com.github.mdr.mash.completions.CompletionSpec import com.github.mdr.mash.functions.{ BoundParams, MashFunction, ParameterModel } import com.github.mdr.mash.inference.{ Type, TypedArguments } import com.github.mdr.mash.ns.collections.{ MapFunction, MapTypeInferenceStrategy } import com.github.mdr.mash.runtime.{ MashList, MashString, MashValue } import scala.concurrent.ExecutionContext.Implicits._ import scala.concurrent.duration.Duration import scala.concurrent.{ Await, Future } object ParallelMapFunction extends MashFunction("core.thread.parallelMap") { import MapFunction.Params._ val params = ParameterModel(F, Sequence) def call(boundParams: BoundParams): MashValue = { val inSequence = boundParams(Sequence) val sequence = boundParams.validateSequence(Sequence) val f = boundParams.validateFunction(F) val mapped = parallelMap(sequence, f) inSequence match { case MashString(_, tagOpt) if mapped.forall(_.isAString) ⇒ mapped.asInstanceOf[Seq[MashString]].fold(MashString("", tagOpt))(_ + _) case _ ⇒ MashList(mapped) } } private def parallelMap(sequence: Seq[MashValue], f: MashValue ⇒ MashValue): Seq[MashValue] = { val xs = Future.traverse(sequence)(x ⇒ Future(f(x))) Await.result(xs, Duration.Inf) } override def typeInferenceStrategy = MapTypeInferenceStrategy override def getCompletionSpecs(argPos: Int, arguments: TypedArguments) = { val argBindings = MapFunction.params.bindTypes(arguments) val specOpt = for { param ← argBindings.paramAt(argPos) if param == F Type.Seq(elementType) ← argBindings.getType(Sequence) } yield CompletionSpec.Members(elementType) specOpt.toSeq } override def summaryOpt = Some("Transform each element of a sequence by a given function") override def descriptionOpt = Some( """The given function is applied to each element of the input sequence to produce a sequence of transformed output elements. Examples: <mash> parallelMap (_ * 2) [1, 2, 3] # [2, 4, 6] </mash>""") }
mdr/mash
src/main/scala/com/github/mdr/mash/ns/core/thread/ParallelMapFunction.scala
Scala
mit
2,196
/* * Copyright (c) Sourcy Software & Services GmbH 2015. * * _____ ____ __ __ _____ _____ __ __ (_)____ * / ___// __ \\ / / / // ___// ___// / / / / // __ \\ * (__ )/ /_/ // /_/ // / / /__ / /_/ /_ / // /_/ / * /____/ \\____/ \\__,_//_/ \\___/ \\__, /(_)/_/ \\____/ * /____/ * * Created by armin walland <[email protected]> on 2015-05-20. */ package io.sourcy.jirastatscollector import java.util.Base64 object HttpBasicAuth { private val BASIC = "Basic" val AUTHORIZATION = "Authorization" def encodeCredentials(username: String, password: String): String = { new String(Base64.getEncoder.encode((username + ":" + password).getBytes)) } def getHeader(username: String, password: String): String = BASIC + " " + encodeCredentials(username, password) }
sourcy/jirastatscollector
src/main/scala/io/sourcy/jirastatscollector/HttpBasicAuth.scala
Scala
apache-2.0
834
/* * Copyright (c) <2015-2016>, see CONTRIBUTORS * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package ch.usi.inf.l3.sana.oberon0 package object codegen { val TAB = 2 def tab(col: Int): String = " " * col }
amanjpro/languages-a-la-carte
oberon0/src/main/scala/codegen/package.scala
Scala
bsd-3-clause
1,692
package org.freeour.app.models import org.slf4j.LoggerFactory import scala.slick.driver.MySQLDriver.simple._ /** * Created by Bill Lv on 2/3/15. */ case class User(id: Option[Long] = None, email: String, password: String, var nickname: String, var phone: Option[String] = None, isAdmin: Boolean, var avatar: Option[Long] = None) { val logger = LoggerFactory.getLogger(getClass) def forgetMe = { logger.info("User: this is where you'd invalidate the saved token in you User model") } } class Users(tag: Tag) extends Table[User](tag, "USERS") { def id = column[Long]("ID", O.PrimaryKey, O.AutoInc) def email = column[String]("EMAIL", O DBType "varchar(32)", O.NotNull) def password = column[String]("PASSWORD", O DBType "varchar(128)", O.NotNull) def nickname = column[String]("NICKNAME", O DBType "varchar(32)", O.NotNull) def phone = column[String]("PHONE", O DBType "varchar(32) null", O.Nullable) def isAdmin = column[Boolean]("IS_ADMIN", O.NotNull) def avatar = column[Long]("AVATAR", O.Nullable) def emailIdx = index("IDX_USERS_EMAIL", email, unique = true) override def * = (id.?, email, password, nickname, phone.?, isAdmin, avatar.?) <>(User.tupled, User.unapply) } object UserRepository extends TableQuery(new Users(_)) { def findById(id: Long)(implicit session: scala.slick.jdbc.JdbcBackend#SessionDef) = filter(_.id === id).firstOption def findByEmail(email: String)(implicit session: scala.slick.jdbc.JdbcBackend#SessionDef) = filter(_.email === email).firstOption def update(user: User)(implicit session: scala.slick.jdbc.JdbcBackend#SessionDef) = { filter(_.id === user.id) .map(p => (p.nickname, p.phone, p.isAdmin)) .update((user.nickname, user.phone.get, user.isAdmin)) } }
ideaalloc/freeour
src/main/scala/org/freeour/app/models/User.scala
Scala
gpl-2.0
1,800
package com.github.akileev.akka.serial.io import akka.actor.ActorSystem import akka.io._ import akka.testkit._ import com.github.akileev.akka.serial.io.Serial._ import org.scalatest.{BeforeAndAfterAll, FunSuiteLike} import scala.concurrent.Await import scala.concurrent.duration.Duration class SerialManagerSpec extends TestKit(ActorSystem("SerialManagerSpec")) with FunSuiteLike with BeforeAndAfterAll with ImplicitSender { override def afterAll = { val whenTerminated = system.terminate() Await.result(whenTerminated, Duration.Inf) } test("list ports") { IO(Serial) ! ListPorts val Ports(ports) = expectMsgType[Ports] println("Found serial ports: " + ports.mkString(", ")) } }
akileev/akka-serial-io
src/test/scala/com/github/akileev/akka/serial/io/SerialManagerSpec.scala
Scala
apache-2.0
717
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.rocketmq.spark import org.slf4j.{Logger, LoggerFactory} /** * Utility trait for classes that want to log data. */ trait Logging { // Make the log field transient so that objects with Logging can // be serialized and used on another machine @transient private var log_ : Logger = null // Method to get or create the logger def log: Logger = { if (log_ == null) log_ = LoggerFactory.getLogger(this.getClass.getName.stripSuffix("$")) return log_ } // Log methods that take only a String def logInfo(msg: => String) = if (log.isInfoEnabled) log.info(msg) def logDebug(msg: => String) = if (log.isDebugEnabled) log.debug(msg) def logWarning(msg: => String) = if (log.isWarnEnabled) log.warn(msg) def logError(msg: => String) = if (log.isErrorEnabled) log.error(msg) // Log methods that take Throwable (Exceptions/Errors) too def logInfo(msg: => String, throwable: Throwable) { if (log.isInfoEnabled) log.info(msg, throwable) } def logDebug(msg: => String, throwable: Throwable) { if (log.isDebugEnabled) log.debug(msg, throwable) } def logWarning(msg: => String, throwable: Throwable) = if (log.isWarnEnabled) log.warn(msg, throwable) def logError(msg: => String, throwable: Throwable) = if (log.isErrorEnabled) log.error(msg, throwable) }
StyleTang/incubator-rocketmq-externals
rocketmq-spark/src/main/scala/org/apache/rocketmq/spark/Logging.scala
Scala
apache-2.0
2,137
package org.dbpedia.spotlight.db.spotter /** * Created by dav009 on 24/03/2014. */ import org.dbpedia.spotlight.db.{FSADictionary, FSASpotter} import org.junit.Assert.assertEquals import org.junit.Test object TestFSA{ def getMockedFSA():FSADictionary ={ val testFSADict:FSADictionary = new FSADictionary() testFSADict.transitionsTokens = Array[Array[Int]]( Array[Int](100, 200, 200, 300, 400), //state 0 Array[Int](500) //state 0 ) testFSADict.transitionsStates = Array[Array[Int]]( Array[Int](-1, -1, 1, -1, 1), Array[Int](-1) ) testFSADict } } class TestFSA { @Test def testFSATransitions(){ val testFSADict:FSADictionary = TestFSA.getMockedFSA() // binary search odd case val(status, nextState) = testFSADict.next(0, 200) assertEquals(status, FSASpotter.ACCEPTING_STATE) assertEquals(nextState, 1) // token no given for transition val (status2, nextState2) = testFSADict.next(0, 900) assertEquals(status2, FSASpotter.REJECTING_STATE) assertEquals(nextState2, FSASpotter.REJECTING_STATE) // current str is not yet a match val (status3, nextState3) = testFSADict.next(0, 400) assertEquals(status3, FSASpotter.REJECTING_STATE) assertEquals(nextState3, 1) } @Test def testSearchTokensInTransition(){ val testFSADict:FSADictionary = TestFSA.getMockedFSA() // checks odd binary serach case assertEquals(testFSADict.searchTokenInTransitions(0, 200), 1) // non existent item assert(testFSADict.searchTokenInTransitions(0, 900) < 0) //last item assertEquals(testFSADict.searchTokenInTransitions(0, 400), 4) } }
Skunnyk/dbpedia-spotlight-model
core/src/test/scala/org/dbpedia/spotlight/db/spotter/TestFSA.scala
Scala
apache-2.0
1,688
package com.sksamuel.elastic4s.requests.searches.queries.geo import com.sksamuel.elastic4s.EnumConversions import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory} object GeoPolyonQueryBodyFn { def apply(q: GeoPolygonQuery): XContentBuilder = { val builder = XContentFactory.jsonBuilder() builder.startObject("geo_polygon") builder.startObject(q.field) builder.startArray("points") q.points.foreach { point => builder.startObject() builder.field("lat", point.lat) builder.field("lon", point.long) builder.endObject() } builder.endArray() q.ignoreUnmapped.foreach(builder.field("ignore_unmapped", _)) q.validationMethod.map(EnumConversions.geoValidationMethod).foreach(builder.field("validation_method", _)) q.boost.foreach(builder.field("boost", _)) q.queryName.foreach(builder.field("_name", _)) builder.endObject() builder.endObject() builder.endObject() } }
stringbean/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/queries/geo/GeoPolyonQueryBodyFn.scala
Scala
apache-2.0
964
package com.landoop.streamreactor.connect.hive.sink.staging import com.landoop.streamreactor.connect.hive.TopicPartitionOffset import org.apache.hadoop.fs.{FileSystem, Path} import scala.concurrent.duration.FiniteDuration /** * The [[CommitPolicy]] is responsible for determining when * a file should be flushed (closed on disk, and moved to be visible). * * Typical implementations will flush based on number of records, * file size, or time since the file was opened. */ trait CommitPolicy { /** * This method is invoked after a file has been written. * * If the output file should be committed at this time, then this * method should return true, otherwise false. * * Once a commit has taken place, a new file will be opened * for the next record. * * */ def shouldFlush(context: CommitContext)(implicit fs: FileSystem): Boolean } /** * @param tpo the [[TopicPartitionOffset]] of the last record written * @param path the path of the file that the struct was written to * @param count the number of records written thus far to the file * @param createdTimestamp the time in milliseconds when the the file was created/accessed first time */ case class CommitContext(tpo: TopicPartitionOffset, path: Path, count: Long, fileSize: Long, createdTimestamp: Long) /** * Default compile of [[CommitPolicy]] that will flush the * output file under the following circumstances: * - file size reaches limit * - time since file was created * - number of files is reached * * @param interval in millis */ case class DefaultCommitPolicy(fileSize: Option[Long], interval: Option[FiniteDuration], fileCount: Option[Long]) extends CommitPolicy { require(fileSize.isDefined || interval.isDefined || fileCount.isDefined) override def shouldFlush(context: CommitContext) (implicit fs: FileSystem): Boolean = { val open_time = System.currentTimeMillis() - context.createdTimestamp fileSize.exists(_ <= context.fileSize) || interval.exists(_.toMillis <= open_time) || fileCount.exists(_ <= context.count) } }
datamountaineer/stream-reactor
kafka-connect-hive-1.1/src/main/scala/com/landoop/streamreactor/connect/hive/sink/staging/CommitPolicy.scala
Scala
apache-2.0
2,266
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.converters import org.scalatest.FunSuite class GenotypesToVariantsConverterSuite extends FunSuite { test("Simple test of integer RMS") { val v = new GenotypesToVariantsConverter assert(v.rms(List(1, 1)) === 1) } test("Simple test of floating point RMS") { val v = new GenotypesToVariantsConverter val rmsVal = v.rms(List(39.0, -40.0, 41.0)) // floating point, so apply tolerances assert(rmsVal > 40.0 && rmsVal < 40.01) } test("Max genotype quality should lead to max variant quality") { val v = new GenotypesToVariantsConverter // if p = 1, then the rest of our samples don't matter val vq = v.variantQualityFromGenotypes(List(1.0, 0.0)) // floating point, so apply tolerances assert(vq > 0.999 && vq < 1.001) } test("Genotype quality = 0.5 for two samples should lead to variant quality of 0.75") { val v = new GenotypesToVariantsConverter val vq = v.variantQualityFromGenotypes(List(0.5, 0.5)) // floating point, so apply tolerances assert(vq > 0.745 && vq < 0.755) } }
allenday/adam
adam-core/src/test/scala/org/bdgenomics/adam/converters/GenotypesToVariantsConverterSuite.scala
Scala
apache-2.0
1,880
package org.bfn.ninetynineprobs object P97 { // TODO }
bfontaine/99Scala
src/main/scala/P97.scala
Scala
mit
60
package org.im package loader import org.scalatest._ import cats._ import cats.implicits._ import cats.data._ import Validated._ class dslspec extends FlatSpec with Matchers { import Implicits._ import com.lucidchart.open.relate.interp.Parameter._ "A mapping" should "find all mappings" in { object tmaps extends mappings("a", "b") { import sourcefirst._ string("f1").directMove string("f2").directMove } tmaps.mappings.length should equal(2) } it should "run simple source to target mappings" in { object tmaps extends mappings("a", "b") { import sourcefirst._ string("sname").to("tname") } val (values, nas, errors) = tmaps.transform(Map("sname" -> Option("blah")), 1) assert(values.size == 1) values.get("tname").getOrElse(Some("")) shouldBe Some("blah") } it should "use a default value" in { object tmaps extends mappings("a", "b") { import sourcefirst._ string("sname").to("tname").ifNotPresent("sname", Some("default value")) } val (values, nas, errors) = tmaps.transform(Map("sname" -> None), 1) withClue("values:") { assert(values.size == 1) } withClue("output value:") { values("tname") shouldBe Some("default value") } } it should "always set the value to a constant when requested" in { object tmaps extends mappings("a", "b") { import sourcefirst._ long("iattr").to_("tattr").constant(Some(1)) } val (values, nas, errors) = tmaps.transform(Map("iattr" -> Some("30")), 1) withClue("values:") { assert(values.size == 1) } withClue("output value:") { values("tattr") shouldBe Some(1) } } it should "always map to None using mapToNoValue" in { object tmaps extends mappings("a", "b") { import sourcefirst._ string("sname").to("tname").mapToNoValue } val (values, nas, errors) = tmaps.transform(Map("sname" -> Some("crazy value")), 1) assert(values.size == 1) values.get("tname").get shouldBe None } it should "map to the same column automatically if only the source was given for some typed combinators" in { object tmaps extends mappings("a", "b") { import sourcefirst._ long("sname").directMove } val (values, nas, errors) = tmaps.transform(Map("sname" -> Some("100")), 1) assert(values.size == 1) assert(values.contains("sname")) values("sname").getOrElse(0) shouldBe (100) } it should "convert string boolean to an integer" in { object tmaps extends mappings("a", "b") { import sourcefirst._ strbool("str2bool").directMove } val (values, nas, errors) = tmaps.transform(Map("str2bool" -> Some("true")), 1) assert(values.size == 1) assert(values.contains("str2bool")) values("str2bool").getOrElse(-1) shouldBe (1) } it should "ignore a mapping marked as ignore" in { object tmaps extends mappings("a", "b") { import sourcefirst._ string("cola").ignore } tmaps.mappings.length shouldBe (0) tmaps.allMappings.length shouldBe (1) } it should "return target column names" in { object tmaps extends mappings("a", "b") { import sourcefirst._ string("cola").directMove string("colb").to("targetb") } tmaps.targetCols should contain inOrderOnly ("cola", "targetb") } it should "not screw up the source and target if only the target is specified and there are rules" in { object tmaps extends mappings("a", "b") { import sourcefirst._ to[String]("cola") to[String]("colb").rule { ctx => ctx.none } } tmaps.mappings.find(_.target == "cola").map(_.target) shouldBe None tmaps.mappings.find(_.target == "colb").map(_.target) shouldBe Some("colb") } it should "not create multiple mappings when using combinators" in { object tmaps extends mappings("a", "b") { import sourcefirst._ string("a").to("a") to[String]("cola").source("colasource").rule { ctx => ctx.none } } tmaps.mappings.size shouldBe (2) tmaps.allMappings.size shouldBe (2) } "Mappings" should "detect mappings with duplicative priority rules" in { val result = try { object tmaps extends mappings("a", "b") { to[String]("cola").rule { ctx => ctx.success("a") } ruleFor[String, String]("cola")(0) { ctx => ctx.none } } fail("Did not detect duplicative priorities.") } catch { case scala.util.control.NonFatal(_) => succeed } result } it should "contain rules that return success/none/failure" in { object tmaps extends mappings("a", "b") { to[String]("cola").rule { ctx => ctx.success("a") } to[String]("colb").rule { ctx => import ctx._ none } to[String]("colc").rule { ctx => ctx.notApplicable } to[String]("theerrortarget").rule { ctx => ctx.error("Could not find source data.") } } val (values, nas, errors) = tmaps.transform(Map(), 1) withClue("nas targets") { nas.size shouldBe (1) } withClue("error targets") { errors.size shouldBe (1) } withClue("# valid results") { values.size shouldBe (2) } } it should "fire rules in order and stop when a result is found" in { object tmaps extends mappings("a", "b") { to[String]("cola"). rule { ctx => ctx.notApplicable }. rule { ctx => ctx.notApplicable }. rule { ctx => ctx.success("a") } ruleFor[String, String]("cola")(3) { ctx => ctx.error("You should never reach this rule.") } } val (values, nas, errors) = tmaps.transform(Map(), 1) withClue("values") { values.size shouldBe (1) } withClue("errors") { errors.size shouldBe (0) } withClue("nas") { nas.size shouldBe (0) } } import Implicits._ import com.lucidchart.open.relate.interp._ object tmaps1 extends mappings("a", "b") { import sourcefirst._ to[String]("cola"). rule { ctx => import ctx._ input.get("colasource") match { case Some(v) => success(v) case _ => none } } int("colbsource").to("colb") float("colcsource").to("colc") int("cold").directMove } "Transform" should "return transform results correctly" in { val (values, nas, errors) = tmaps1.transform(Map( "colasource" -> Option("colavalue"), "colbsource" -> Option("100"), "colcsource" -> Option("12.5"), "cold" -> Option("1")), 1) withClue("nas targets") { nas.size shouldBe (0) } withClue("error targets") { errors.size shouldBe (0) } withClue("# valid results") { values.size shouldBe (4) } withClue("cola") { values("cola") shouldBe Some("colavalue") } withClue("colb") { values("colb") shouldBe Some(100) } withClue("colc") { values("colc") shouldBe Some(12.5) } withClue("cold") { values("cold") shouldBe Some(1) } } it should "require a source attribute if its called out in a mapping with an extractor" in { val (values, nas, errors) = tmaps1.transform(Map( "colasource" -> Option("colavalue"), "colbsource" -> Option("100"), "colcsource" -> Option("12.5")), 1) withClue("error targets") { errors.size shouldBe (1) } withClue("nas targets") { nas.size shouldBe (0) } withClue("# valid results") { values.size shouldBe (0) } } it should "return target system loader objects correctly" in { val (values, nas, errors) = tmaps1.transform(Map( "colasource" -> Option("colavalue"), "colbsource" -> Option("100"), "colcsource" -> Option("12.5"), "cold" -> Option("10")), 1) values.size shouldBe (4) errors.size shouldBe (0) nas.size shouldBe (0) val loaders = tmaps1.load(values) withClue("length") { loaders.size shouldBe (4) } withClue("loader object types") { assert(loaders("cola").isInstanceOf[StringParameter]) assert(loaders("colb").isInstanceOf[IntParameter]) assert(loaders("colc").isInstanceOf[FloatParameter]) } val arglist = sequenceLikeUnsafe(Seq("colb", "colc", "cola"), loaders) withClue("arglist order") { arglist.length shouldBe (3) assert(arglist(2).isInstanceOf[StringParameter]) assert(arglist(0).isInstanceOf[IntParameter]) assert(arglist(1).isInstanceOf[FloatParameter]) } } it should "inline dates, by default, should error if unable to convert, or NA if specified" in { object tmapsdates extends mappings("a", "b") { import sourcefirst._ date("dcol").to("target").required date("dcol2").to("target2").required date("dcol3", true).to("target3").required } val (values, nas, errors) = tmapsdates.transform(Map( "dcol" -> Some("2016-01-01T01:01:01"), "dcol2" -> Some("2016-01-01T01:01:01Z"), "dcol3" -> Some("2016-01-01T01:01:01")), 1) // 2 because dcol3 date() will be NA then a direct move rule runs last withClue("values:") { values.size shouldBe (2) } withClue("errors:") { errors.size shouldBe (1) } withClue("nas:") { nas.size shouldBe (0) } } it should "allow follow on rules for dates" in { val fallback = inputToDate(java.time.format.DateTimeFormatter.ISO_LOCAL_DATE_TIME) _ object tmapsdates extends mappings("a", "b") { import sourcefirst._ date("d").to("target").rule { ctx => import ctx._ toRuleResultC(false)(fallback(input.getAs[String]("d"))) } } val (values, nas, errors) = tmapsdates.transform(Map("d" -> Some("2016-01-01T01:01:01")), 1) values.size shouldBe (1) errors.size shouldBe (0) nas.size shouldBe (0) } it should "transform a long correctly" in { object test1 extends mappings("a", "b") { import sourcefirst._ long("cola").to("colavalue") } val (values, nas, errors) = test1.transform(Map("cola" -> Some("100")), 1) values.size shouldBe (1) errors.size shouldBe (0) nas.size shouldBe (0) values("colavalue") shouldBe Some(100) } "schema" should "capture input conversions and handle dynamic values correctly" in { object test1 extends mappings("a", "b") { override object ischema extends schema { slong("colasource") sdouble("colbsource") } to[String]("colatarget").rule(0) { ctx => import ctx._ val newValue = input.getAs[Long]("colasource").map(_ + 30.0) getOrElse -1.0 success(newValue.toString + "!") } to[Double]("colbtarget").rule(0) { ctx => import ctx._ ctx.colbsource[Double].filter(_ < 2.0).fold[RuleResult](ctx.notApplicable)(v => ctx.success(v + 100.0)) }.rule(1) { ctx => ctx.success(ctx.colbsource[Double].map(_ + 30.0) getOrElse -1.0) } } val (values, nas, errors) = test1.transform(Map( "colasource" -> Option("3"), "colbsource" -> Option("4.0")), 1) values.size shouldBe (2) errors.size shouldBe (0) nas.size shouldBe (0) values("colatarget") shouldBe Some("33.0!") values("colbtarget") shouldBe Some(34.0) } it should "allow the use of the schema dsl with rule1" in { object test1 extends mappings("a", "b") { override object ischema extends schema { slong("colasource").required sfloat("colbsource").required slong("colcsource") } to[Long]("colatarget").rule1[Long](0)("colasource") { (ctx, v) => ctx.success(v) } to[Float]("colbtarget").rule1[Float](0)("colbsource") { (ctx, v) => ctx.success(v) } to[Long]("colctarget").fromX[Long]("colcsource") } val (values, nas, errors) = test1.transform(Map( "colasource" -> Option("3"), "colbsource" -> Option("4.0"), "colcsource" -> Option("100")), 1) values.size shouldBe (3) errors.size shouldBe (0) nas.size shouldBe (0) values("colatarget") shouldBe Some(3) values("colbtarget") shouldBe Some(4.0) values("colctarget") shouldBe Some(100) } it should "return simple conversion errors if the input does not match" in { object test1 extends mappings("a", "b") { override object ischema extends schema { sdate("datesource") } to[java.sql.Date]("datetarget").rule1[java.sql.Date](0)("datesource") { (ctx, v) => ctx.success(v) } } val (values, nas, errors) = test1.transform(Map( "datesource" -> Option("3")), 1) //println(s"$values\n$nas\n$errors") values.size shouldBe (0) errors.size shouldBe (1) nas.size shouldBe (0) } }
aappddeevv/loader
csv/src/test/scala/org/im/loader/dsltest.scala
Scala
apache-2.0
12,649
package spire package math import java.lang.Long.numberOfLeadingZeros import java.lang.Double.{ isInfinite, isNaN } import java.math.{ MathContext, RoundingMode, BigInteger, BigDecimal => JBigDecimal } import java.util.concurrent.atomic.AtomicReference import scala.math.{ ScalaNumber, ScalaNumericConversions } import spire.Platform import spire.algebra.{Eq, EuclideanRing, Field, IsAlgebraic, NRoot, Order, Ring, Sign, Signed} import spire.algebra.Sign.{ Positive, Negative, Zero } import spire.macros.Checked.checked import spire.math.poly.{ Term, BigDecimalRootRefinement, RootFinder, Roots } import spire.std.bigInt._ import spire.std.bigDecimal._ import spire.std.long._ import spire.syntax.std.seq._ /** * Algebraic provides an exact number type for algebraic numbers. Algebraic * numbers are roots of polynomials with rational coefficients. With it, we can * represent expressions involving addition, multiplication, division, n-roots * (eg. `sqrt` or `cbrt`), and roots of rational polynomials. So, it is similar * [[Rational]], but adds roots as a valid, exact operation. The cost is that * this will not be as fast as [[Rational]] for many operations. * * In general, you can assume all operations on this number type are exact, * except for those that explicitly construct approximations to an Algebraic * number, such as `toBigDecimal`. * * For an overview of the ideas, algorithms, and proofs of this number type, * you can read the following papers: * * - "On Guaranteed Accuracy Computation." C. K. Yap. * - "Recent Progress in Exact Geometric Computation." C. Li, S. Pion, and C. K. Yap. * - "A New Constructive Root Bound for Algebraic Expressions" by C. Li & C. K. Yap. * - "A Separation Bound for Real Algebraic Expressions." C. Burnikel, et al. */ @SerialVersionUID(1L) final class Algebraic private (val expr: Algebraic.Expr) extends ScalaNumber with ScalaNumericConversions with Serializable { import Algebraic.{ Zero, One, Expr, MinIntValue, MaxIntValue, MinLongValue, MaxLongValue, JBigDecimalOrder, roundExact, BFMSS, LiYap } /** * Returns an `Int` with the same sign as this algebraic number. Algebraic * numbers support exact sign tests, so this is guaranteed to be accurate. */ def signum: Int = expr.signum /** * Returns the sign of this Algebraic number. Algebraic numbers support exact * sign tests, so this is guaranteed to be accurate. */ def sign: Sign = Sign(signum) /** * Return a non-negative `Algebraic` with the same magnitude as this one. */ def abs: Algebraic = if (this.signum < 0) -this else this def unary_- : Algebraic = new Algebraic(Expr.Neg(expr)) def +(that: Algebraic): Algebraic = new Algebraic(Expr.Add(this.expr, that.expr)) def -(that: Algebraic): Algebraic = new Algebraic(Expr.Sub(this.expr, that.expr)) def *(that: Algebraic): Algebraic = new Algebraic(Expr.Mul(this.expr, that.expr)) def /(that: Algebraic): Algebraic = new Algebraic(Expr.Div(this.expr, that.expr)) /** * Returns an `Algebraic` whose value is just the integer part of * `this / that`. This operation is exact. */ def quot(that: Algebraic): Algebraic = this /~ that /** An alias for [[quot]]. */ def /~(that: Algebraic): Algebraic = Algebraic((this / that).toBigInt) /** * Returns an `Algebraic` whose value is the difference between `this` and * `(this /~ that) * that` -- the modulus. */ def mod(that: Algebraic): Algebraic = this % that /** An alias for [[mod]]. */ def %(that: Algebraic): Algebraic = this - (this /~ that) * that /** Returns the square root of this number. */ def sqrt: Algebraic = nroot(2) /** Returns the cube root of this number. */ def cbrt: Algebraic = nroot(3) /** Returns the `k`-th root of this number. */ def nroot(k: Int): Algebraic = if (k < 0) { new Algebraic(Expr.Div(Expr.ConstantLong(1), Expr.KRoot(this.expr, -k))) } else if (k > 0) { new Algebraic(Expr.KRoot(this.expr, k)) } else { throw new ArithmeticException("divide by zero (0-root)") } /** Raise this number to the `k`-th power. */ def pow(k: Int): Algebraic = if (k == Int.MinValue) { throw new ArithmeticException(s"illegal exponent (${Int.MinValue})") } else if (k == 0) { if (signum == 0) { throw new ArithmeticException("undeterminate result (0^0)") } else { One } } else if (k == 1) { this } else if (k < 0) { new Algebraic(Expr.Div(Expr.ConstantLong(1), this.pow(-k).expr)) } else { new Algebraic(Expr.Pow(this.expr, k)) } def < (that: Algebraic): Boolean = compare(that) < 0 def > (that: Algebraic): Boolean = compare(that) > 0 def <= (that: Algebraic): Boolean = compare(that) <= 0 def >= (that: Algebraic): Boolean = compare(that) >= 0 /** * Returns an integer with the same sign as `this - that`. Specifically, if * `this &lt; that`, then the sign is negative, if `this &gt; that`, then the * sign is positive, otherwise `this == that` and this returns 0. */ def compare(that: Algebraic): Int = (this - that).signum /** * Returns `true` iff this Algebraic number is exactly 0. */ def isZero: Boolean = signum == 0 override def equals(that: Any): Boolean = that match { case (that: Algebraic) => this === that case (that: Real) => this.toReal == that case (that: Number) => this.compare(Algebraic(that.toBigDecimal)) == 0 case (that: Rational) => this.compare(Algebraic(that)) == 0 case (that: BigInt) => isWhole && toBigInt == that case (that: Natural) => isWhole && signum >= 0 && that == toBigInt case (that: SafeLong) => isWhole && that == this case (that: Complex[_]) => that == this case (that: Quaternion[_]) => that == this case (that: BigDecimal) => try { toBigDecimal(that.mc) == that } catch { case ae: ArithmeticException => false } case _ => unifiedPrimitiveEquals(that) } def ===(that: Algebraic): Boolean = this.compare(that) == 0 def =!=(that: Algebraic): Boolean = !(this === that) override def hashCode: Int = if (isWhole && isValidLong) { unifiedPrimitiveHashcode } else { val x = toBigDecimal(java.math.MathContext.DECIMAL64) x.underlying.unscaledValue.hashCode + 23 * x.scale.hashCode + 17 } def toExprString: String = { import Expr._ def recur(e: Expr): String = e match { case ConstantLong(n) => n.toString case ConstantDouble(n) => n.toString case ConstantBigDecimal(n) => n.toString case ConstantRational(n) => s"(${n})" case ConstantRoot(poly, i, _, _) => s"root($poly, $i)" case Neg(sub) => s"-$sub" case Add(lhs, rhs) => s"(${recur(lhs)}) + (${recur(rhs)})" case Sub(lhs, rhs) => s"(${recur(lhs)}) - (${recur(rhs)})" case Mul(lhs, rhs) => s"(${recur(lhs)}) * (${recur(rhs)})" case Div(lhs, rhs) => s"(${recur(lhs)}) / (${recur(rhs)})" case KRoot(sub, 2) => s"(${recur(sub)}).sqrt" case KRoot(sub, 3) => s"(${recur(sub)}).cbrt" case KRoot(sub, k) => s"(${recur(sub)}).nroot($k)" case Pow(sub, k) => s"${recur(sub)}.pow(k)" } recur(expr) } override def toString: String = { val approx = toBigDecimal(MathContext.DECIMAL64) if (this == Algebraic(approx)) { if (approx.signum == 0) { "Algebraic(0)" } else { s"Algebraic(${approx.bigDecimal.stripTrailingZeros})" } } else { s"Algebraic(~$approx)" } } /** * Returns the nearest, valid `Int` value to this Algebraic, without going * further away from 0 (eg. truncation). * * If this `Algebraic` represented 1.2, then this would return 1. If this * represented -3.3, then this would return -3. If this value is greater than * `Int.MaxValue`, then `Int.MaxValue` is returned. If this value is less * than `Int.MinValue`, then `Int.MinValue` is returned. */ def intValue: Int = { val n = toBigInt if (n < MinIntValue) Int.MinValue else if (n > MaxIntValue) Int.MaxValue else n.intValue } /** * Returns the nearest, valid `Long` value to this Algebraic, without going * further away from 0 (eg. truncation). * * If this `Algebraic` represented 1.2, then this would return 1. If this * represented -3.3, then this would return -3. If this value is greater than * `Long.MaxValue`, then `Long.MaxValue` is returned. If this value is less * than `Long.MinValue`, then `Long.MinValue` is returned. */ def longValue: Long = { val n = toBigInt if (n < MinLongValue) Long.MinValue else if (n > MaxLongValue) Long.MaxValue else n.longValue } /** * Returns a `Float` that approximates this value. If the exponent is too * large to fit in a float, the `Float.PositiveInfinity` or * `Float.NegativeInfinity` is returned. */ def floatValue: Float = toBigDecimal(MathContext.DECIMAL32).toFloat /** * Returns a `Double` that approximates this value. If the exponent is too * large to fit in a double, the `Double.PositiveInfinity` or * `Double.NegativeInfinity` is returned. */ def doubleValue: Double = toBigDecimal(MathContext.DECIMAL64).toDouble /** * Returns the nearest, valid `BigInt` value to this Algebraic, without going * further away from 0 (eg. truncation). * * If this `Algebraic` represented 1.2, then this would return 1. If this * represented -3.3, then this would return -3. */ def toBigInt: BigInt = toBigDecimal(0, RoundingMode.DOWN).toBigInt /** * Absolute approximation to `scale` decimal places with the given rounding * mode. Rounding is always exact. */ def toBigDecimal(scale: Int, roundingMode: RoundingMode): BigDecimal = BigDecimal(roundExact(this, expr.toBigDecimal(scale + 2), scale, roundingMode)) /** * Relative approximation to the precision specified in `mc` with the given * rounding mode. Rounding is always exact. The sign is always correct; the * sign of the returned `BigDecimal` matches the sign of the exact value this * `Algebraic` represents. * * @param mc the precision and rounding mode of the final result * @return an approximation to the value of this algebraic number */ def toBigDecimal(mc: MathContext): BigDecimal = { import Expr._ val roundingMode = mc.getRoundingMode def rec(e: Expr, digits: Int): JBigDecimal = e match { case ConstantLong(n) => new JBigDecimal(n, new MathContext(digits, roundingMode)) case ConstantDouble(n) => new JBigDecimal(n, new MathContext(digits, roundingMode)) case ConstantBigDecimal(n) => n.bigDecimal.round(new MathContext(digits, roundingMode)) case ConstantRational(n) => val num = new JBigDecimal(n.numerator.toBigInteger) val den = new JBigDecimal(n.denominator.toBigInteger) num.divide(den, new MathContext(digits, roundingMode)) case ConstantRoot(poly, _, lb, ub) => // Ugh - on an airplane and can't trust BigDecimal's constructors. val poly0 = poly.map { n => new BigDecimal(new JBigDecimal(n.bigInteger), MathContext.UNLIMITED) } BigDecimalRootRefinement(poly0, lb, ub, new MathContext(digits, roundingMode)).approximateValue case Neg(sub) => rec(sub, digits).negate() case Add(_, _) | Sub(_, _) if e.signum == 0 => JBigDecimal.ZERO case Add(lhs, rhs) => val digits0 = digits + e.separationBound.decimalDigits.toInt + 1 val lValue = rec(lhs, digits0) val rValue = rec(rhs, digits0) lValue.add(rValue, new MathContext(digits, roundingMode)) case Sub(lhs, rhs) => val digits0 = digits + e.separationBound.decimalDigits.toInt + 1 val lValue = rec(lhs, digits0) val rValue = rec(rhs, digits0) lValue.subtract(rValue, new MathContext(digits, roundingMode)) case Mul(lhs, rhs) => val lValue = rec(lhs, digits + 1) val rValue = rec(rhs, digits + 2) lValue.multiply(rValue, new MathContext(digits, roundingMode)) case Div(lhs, rhs) => val rValue = rec(rhs, digits + 2) if (rValue.compareTo(JBigDecimal.ZERO) == 0) throw new ArithmeticException("divide by zero") val lValue = rec(lhs, digits + 2) lValue .divide(rValue, new MathContext(digits + 2, roundingMode)) .round(new MathContext(digits, roundingMode)) case KRoot(sub, k) => Algebraic.nroot(rec(sub, digits + 2), k, new MathContext(digits + 2, roundingMode)) .round(new MathContext(digits, roundingMode)) case Pow(sub, k) => val subValue = rec(sub, digits + ceil(log(k.toDouble)).toInt) subValue.pow(k, new MathContext(digits, roundingMode)) } val approx = rec(expr, mc.getPrecision + 2) val newScale = approx.scale - approx.precision + mc.getPrecision val adjustedApprox = if (newScale <= approx.scale) approx.setScale(newScale + 1, RoundingMode.DOWN) else approx roundExact(this, adjustedApprox, newScale, roundingMode) .round(mc) // We perform a final round, since roundExact uses scales. } /** * Returns `true` iff this Algebraic exactly represents a valid `BigInt`. */ def isWhole: Boolean = this == Algebraic(toBigInt) /** * Returns `true` if this Algebraic number is a whole number (no fractional * part) and fits within the bounds of an `Int`. That is, if `x.isValidInt`, * then `Algebraic(x.toInt) == x`. */ override def isValidInt: Boolean = { val n = toBigInt (n <= MaxIntValue) && (n >= MinIntValue) && (this == Algebraic(n)) } /** * Returns `true` if this Algebraic number is a whole number (no fractional * part) and fits within the bounds of an `Long`. That is, if `x.isValidLong`, * then `Algebraic(x.toLong) == x`. */ def isValidLong: Boolean = { val n = toBigInt (n <= MaxLongValue) && (n >= MinLongValue) && (this == Algebraic(n)) } /** * Returns `true` iff this is a rational expression (ie contains no n-root * expressions). Otherwise it is a radical expression and returns false. */ def isRational: Boolean = expr.flags.isRational /** * If this is a rational expressions, then it returns the exact value as a * [[Rational]]. Otherwise, this is a radical expression and `None` is * returned. */ def toRational: Option[Rational] = if (expr.flags.isRational) { implicit val nroot: NRoot[Rational] with RootFinder[Rational] = new NRoot[Rational] with RootFinder[Rational] { private def fail = throw new ArithmeticException(s"Rational cannot support exact algebraic operations") def nroot(a: Rational, n: Int): Rational = fail def fpow(a:Rational, b:Rational): Rational = fail def findRoots(poly: Polynomial[Rational]): Roots[Rational] = fail } Some(evaluateWith[Rational]) } else { None } /** * Evaluates this algebraic expression with a different number type. All * `Algebraic` numbers store the entire expression tree, so we can use this * to *replay* the stored expression using a different type. This will * accumulate errors as if the number type had been used from the beginning * and is only really suitable for more exact number types, like [[Real]]. * * TODO: Eq/ClassTag come from poly.map - would love to get rid of them. */ def evaluateWith[A: Field: NRoot: RootFinder: Eq: ClassTag](implicit conv: ConvertableTo[A]): A = { import spire.syntax.field._ import spire.syntax.nroot._ import Expr._ def eval(e: Expr): A = e match { case ConstantLong(n) => conv.fromLong(n) case ConstantDouble(n) => conv.fromDouble(n) case ConstantBigDecimal(n) => conv.fromBigDecimal(n) case ConstantRational(n) => conv.fromRational(n) case ConstantRoot(poly, i, _, _) => RootFinder[A].findRoots(poly.map(conv.fromBigInt)).get(i) case Neg(n) => -eval(n) case Add(a, b) => eval(a) + eval(b) case Sub(a, b) => eval(a) - eval(b) case Mul(a, b) => eval(a) * eval(b) case Div(a, b) => eval(a) / eval(b) case KRoot(a, k) => eval(a).nroot(k) case Pow(a, k) => eval(a).pow(k) } eval(expr) } /** * Returns an exact [[Real]] representation of this number. */ def toReal: Real = evaluateWith[Real] // ScalaNumber. Because of course all Scala numbers are wrappers. def underlying: AnyRef = this } object Algebraic extends AlgebraicInstances { /** Returns an Algebraic expression equal to 0. */ val Zero: Algebraic = new Algebraic(Expr.ConstantLong(0)) /** Returns an Algebraic expression equal to 1. */ val One: Algebraic = new Algebraic(Expr.ConstantLong(1)) /** Returns an Algebraic expression equivalent to `n`. */ implicit def apply(n: Int): Algebraic = new Algebraic(Expr.ConstantLong(n)) /** Returns an Algebraic expression equivalent to `n`. */ def apply(n: Long): Algebraic = new Algebraic(Expr.ConstantLong(n)) /** * Returns an Algebraic expression equivalent to `n`, if `n` is finite. If * `n` is either infinite or `NaN`, then an `IllegalArgumentException` is * thrown. */ def apply(n: Float): Algebraic = Algebraic(n.toDouble) /** * Returns an Algebraic expression equivalent to `n`, if `n` is finite. If * `n` is either infinite or `NaN`, then an `IllegalArgumentException` is * thrown. */ implicit def apply(n: Double): Algebraic = if (java.lang.Double.isInfinite(n)) { throw new IllegalArgumentException("cannot construct inifinite Algebraic") } else if (java.lang.Double.isNaN(n)) { throw new IllegalArgumentException("cannot construct Algebraic from NaN") } else { new Algebraic(Expr.ConstantDouble(n)) } /** Returns an Algebraic expression equivalent to `n`. */ def apply(n: BigInt): Algebraic = new Algebraic(Expr.ConstantBigDecimal(BigDecimal(n))) /** Returns an Algebraic expression equivalent to `n`. */ def apply(n: BigDecimal): Algebraic = new Algebraic(Expr.ConstantBigDecimal(n)) /** Returns an Algebraic expression equivalent to `n`. */ def apply(n: Rational): Algebraic = new Algebraic(Expr.ConstantRational(n)) /** * Returns an Algebraic expression whose value is equivalent to the `i`-th * real root of the [[Polynomial]] `poly`. If `i` is negative or does not an * index a real root (eg the value is greater than or equal to the number of * real roots) then an `ArithmeticException` is thrown. Roots are indexed * starting at 0. So if there are 3 roots, then they are indexed as 0, 1, * and 2. * * @param poly the polynomial containing at least i real roots * @param i the index (0-based) of the root * @return an algebraic whose value is the i-th root of the polynomial */ def root(poly: Polynomial[Rational], i: Int): Algebraic = { if (i < 0) { throw new ArithmeticException(s"invalid real root index: $i") } else { val zpoly = Roots.removeFractions(poly) val intervals = Roots.isolateRoots(zpoly) if (i >= intervals.size) { throw new ArithmeticException(s"cannot extract root $i, there are only ${intervals.size} roots") } intervals(i) match { case Point(value) => new Algebraic(Expr.ConstantRational(value)) case Bounded(lb, ub, _) => new Algebraic(Expr.ConstantRoot(zpoly, i, lb, ub)) case _ => throw new RuntimeException("invalid isolated root interval") } } } /** * Returns all of the real roots of the given polynomial, in order from * smallest to largest. * * @param poly the polynomial to return the real roots of * @return all the real roots of `poly` */ def roots(poly: Polynomial[Rational]): Vector[Algebraic] = { val zpoly = Roots.removeFractions(poly) val intervals = Roots.isolateRoots(zpoly) intervals.zipWithIndex map { case (Point(value), _) => new Algebraic(Expr.ConstantRational(value)) case (Bounded(lb, ub, _), i) => new Algebraic(Expr.ConstantRoot(zpoly, i, lb, ub)) case x => throw new RuntimeException(s"invalid isolated root interval: $x") } } /** * Returns an Algebraic whose value is the real root within (lb, ub). This is * potentially unsafe, as we assume that exactly 1 real root lies within the * interval, otherwise the results are undetermined. * * @param poly a polynomial with a real root within (lb, ub) * @param i the index of the root in the polynomial * @param lb the lower bound of the open interval containing the root * @param ub the upper bound of the open interval containing the root */ def unsafeRoot(poly: Polynomial[BigInt], i: Int, lb: Rational, ub: Rational): Algebraic = new Algebraic(Expr.ConstantRoot(poly, i, lb, ub)) /** * Returns an Algebraic expression equivalent to `BigDecimal(n)`. If `n` is * not parseable as a `BigDecimal` then an exception is thrown. */ def apply(n: String): Algebraic = Algebraic(BigDecimal(new JBigDecimal(n))) /** * The [[Algebraic]] expression AST. `Algebraic` simply stores an expression * tree representing all operations performed on it. We then use this tree to * deduce certain properties about the algebraic expression and use them to * perform exact sign tests, compute approximations, etc. * * Generally, this should be regarded as an internal implementation detail of * `Algebraic`. */ sealed abstract class Expr extends Serializable { import Expr._ protected def flagBits: Int /** * A set of flags we can quickly compute for an [[Algebraic]] expression. * * @note we have to do this round-about trip between flagsBits and flags * because of */ def flags: Flags = new Flags(flagBits) private val bounds: Platform.TrieMap[ZeroBoundFunction, Any] = Platform.TrieMap() /** * Returns the bound for `zbf`, using a cached value if it is available. */ def getBound(zbf: ZeroBoundFunction): zbf.Bound = bounds.getOrElseUpdate(zbf, zbf(this)).asInstanceOf[zbf.Bound] @volatile private var cachedDegreeBound: Long = 0L private def radicalNodes(): Set[KRoot] = { val childRadicals = children.foldLeft(Set.empty[KRoot]) { (acc, child) => acc ++ child.radicalNodes() } val radicals = this match { case expr @ KRoot(sub, k) => childRadicals + expr case _ => childRadicals } if (cachedDegreeBound == 0L) { cachedDegreeBound = radicals.foldLeft(1L) { (acc, kroot) => checked { acc * kroot.k } } } radicals } /** * Returns a bound on the degree of this expression. */ def degreeBound: Long = { if (cachedDegreeBound == 0L) radicalNodes() cachedDegreeBound } /** * Returns the BFMSS separation bound. */ def bfmssBound: BitBound = new BitBound(getBound(BFMSS).getBitBound(degreeBound)) /** * Returns the Li & Yap separation bound. */ def liYapBound: BitBound = new BitBound(getBound(LiYap).getBitBound(degreeBound)) /** * Returns a separation bound for this expression as a bit bound. A * separation bound is a lower-bound on the value of this expression that * is only valid if this expression is not 0. This bound can thus be used * to determine if this value is actually 0 and, if not, the sign, by * simply approximating the expression with enough accuracy that it falls * on one side or the other of the separation bound. */ def separationBound: BitBound = bfmssBound min liYapBound /** * Returns an asbolute approximation to this expression as a BigDecimal * that is accurate up to +/- 10^-digits. */ def toBigDecimal(digits: Int): JBigDecimal /** * Returns an upper bound on the absolute value of this expression as a * bit bound. */ def upperBound: BitBound /** * Returns a lower bound on the absolute value of this expression as a * bit bound. * * TODO: We could do better here wrt to addition (need a fastSignum: Option[Int]) */ def lowerBound: BitBound = -separationBound /** Returns an integer with the same sign as this expression. */ def signum: Int /** * Returns a list of the children of this expression. A child is a * sub-expression required by this expression. For instance, `Add` has 2 * children, the left-hand and right-hand side sub-expressions. A numeric * literal expression, such as `ConstantDouble` or `ConstantRational` has * no children. */ def children: List[Expr] } object Expr { /** * A set of flags for algebraic expressions, so we can quickly determine * some properties, like whether the expression is rational, radical, what * types of leaf nodes it has, etc. This is used to help guide algorithmic * choices, such as what separation bound to use. */ final class Flags(val bits: Int) extends AnyVal { import Flags._ /** Returns the union of flags `this` and `that`. */ def | (that: Flags): Flags = new Flags(bits | that.bits) private def check(n: Int): Boolean = (bits & n) != 0 /** Returns `true` iff this expression is composed only of rational operations. */ def isRational: Boolean = !isRadical /** Returns `true` iff this expression contains an n-th root operation. */ def isRadical: Boolean = check(RadicalFlag) /** Returns `true` iff this expression contains a `ConstantDouble` leaf node. */ def hasDoubleLeaf: Boolean = check(HasDoubleLeaf) /** Returns `true` iff this expression contains a `ConstantBigDecimal` leaf node. */ def hasBigDecimalLeaf: Boolean = check(HasBigDecimalLeaf) /** Returns `true` iff this expression contains a `ConstantRational` leaf node. */ def hasRationalLeaf: Boolean = check(HasRationalLeaf) } object Flags { final val RadicalFlag = 1 final val HasDoubleLeaf = 2 final val HasBigDecimalLeaf = 4 final val HasRationalLeaf = 8 final val IntegerLeaf: Flags = new Flags(0) final val DoubleLeaf: Flags = new Flags(HasDoubleLeaf) final val BigDecimalLeaf: Flags = new Flags(HasBigDecimalLeaf) final val RationalLeaf: Flags = new Flags(HasRationalLeaf) final val IsRadical: Flags = new Flags(RadicalFlag) } /** Constant expressions are leaf nodes, contains literal numbers. */ sealed abstract class Constant[A] extends Expr { def value: A def children: List[Expr] = Nil } /** Unary expressions contain only a single child expression. */ sealed abstract class UnaryExpr extends Expr { val sub: Expr def children: List[Expr] = sub :: Nil } /** Binary expressions contain 2 child expression. */ sealed abstract class BinaryExpr extends Expr { val lhs: Expr val rhs: Expr val flagBits: Int = (lhs.flags | rhs.flags).bits def children: List[Expr] = lhs :: rhs :: Nil } @SerialVersionUID(0L) case class ConstantLong(value: Long) extends Constant[Long] { def flagBits: Int = Flags.IntegerLeaf.bits def upperBound: BitBound = if (value == 0L) new BitBound(0L) else if (value == Long.MinValue) new BitBound(64) else new BitBound(64 - numberOfLeadingZeros(abs(value) - 1)) def signum: Int = value.signum def toBigDecimal(digits: Int): JBigDecimal = new JBigDecimal(value).setScale(digits, RoundingMode.HALF_UP) } @SerialVersionUID(0L) case class ConstantDouble(value: Double) extends Constant[Double] { def flagBits: Int = Flags.DoubleLeaf.bits def upperBound: BitBound = if (value == 0D) { new BitBound(0) } else { new BitBound(ceil(log(abs(value))).toLong) } def signum: Int = if (value < 0D) -1 else if (value > 0D) 1 else 0 def toBigDecimal(digits: Int): JBigDecimal = new JBigDecimal(value).setScale(digits, RoundingMode.HALF_UP) } @SerialVersionUID(0L) case class ConstantBigDecimal(value: BigDecimal) extends Constant[BigDecimal] { def flagBits: Int = Flags.BigDecimalLeaf.bits def upperBound: BitBound = if (value.signum == 0) { new BitBound(0) } else { // We just need a couple of digits, really. val mc = new MathContext(4, RoundingMode.UP) new BitBound(ceil(log(value.abs(mc))).toLong) } def signum: Int = value.signum def toBigDecimal(digits: Int): JBigDecimal = value.bigDecimal.setScale(digits, RoundingMode.HALF_UP) } @SerialVersionUID(0L) case class ConstantRational(value: Rational) extends Constant[Rational] { def flagBits: Int = Flags.RationalLeaf.bits def upperBound: BitBound = new BitBound(value.numerator.abs.bitLength - value.denominator.bitLength + 1) def signum: Int = value.signum def toBigDecimal(digits: Int): JBigDecimal = { val num = new JBigDecimal(value.numerator.toBigInteger) val den = new JBigDecimal(value.denominator.toBigInteger) num.divide(den, digits, RoundingMode.DOWN) } } @SerialVersionUID(0L) case class ConstantRoot(poly: Polynomial[BigInt], i: Int, lb: Rational, ub: Rational) extends Constant[Polynomial[BigInt]] { def value: Polynomial[BigInt] = poly def flagBits: Int = Flags.IsRadical.bits def upperBound: BitBound = if (ub.signum > 0) { new BitBound(ub.numerator.bitLength - ub.denominator.bitLength + 1) } else { new BitBound(lb.numerator.abs.bitLength - lb.denominator.bitLength + 1) } def signum: Int = if (lb.signum != 0) lb.signum else ub.signum private val refinement: AtomicReference[BigDecimalRootRefinement] = { val poly0 = poly.map { n => new BigDecimal(new JBigDecimal(n.bigInteger), MathContext.UNLIMITED) } new AtomicReference(BigDecimalRootRefinement(poly0, lb, ub)) } def toBigDecimal(digits: Int): JBigDecimal = { val oldRefinement = refinement.get val newRefinement = oldRefinement.refine(digits) refinement.set(newRefinement) newRefinement.approximateValue } def lead: BigInt = poly.maxTerm.coeff def tail: BigInt = poly.minTerm.coeff } @SerialVersionUID(0L) case class Neg(sub: Expr) extends UnaryExpr { def flagBits: Int = sub.flags.bits def upperBound: BitBound = sub.upperBound def signum: Int = -sub.signum def toBigDecimal(digits: Int): JBigDecimal = sub.toBigDecimal(digits).negate() } @SerialVersionUID(0L) sealed abstract class AddOrSubExpr extends BinaryExpr { def upperBound: BitBound = new BitBound(max(lhs.upperBound.bitBound, rhs.upperBound.bitBound) + 1) lazy val signum: Int = { val maxDigits = separationBound.decimalDigits + 1 val approxOnly = maxDigits > Int.MaxValue // An adaptive algorithm to find the sign. Rather than just compute // this number to `maxDigits` precision, we start with a smaller // precision and keep adding digits until we get one that isn't 0. @tailrec def loop(digits0: Long): Int = { val digits = min(digits0, min(maxDigits, Int.MaxValue)).toInt val approx = toBigDecimal(digits + 1).setScale(digits, RoundingMode.DOWN) if (approx.signum != 0 || digits >= maxDigits) { approx.signum } else if (digits == Int.MaxValue) { throw new ArithmeticException("required precision to calculate sign is too high") } else { loop(2 * digits0) } } loop(4) } def toBigDecimal(digits: Int): JBigDecimal = { val lValue = lhs.toBigDecimal(digits + 1) val rValue = rhs.toBigDecimal(digits + 1) val sum = this match { case (_: Add) => lValue.add(rValue) case (_: Sub) => lValue.subtract(rValue) } val result = sum.setScale(digits, RoundingMode.DOWN) result } } @SerialVersionUID(0L) case class Add(lhs: Expr, rhs: Expr) extends AddOrSubExpr @SerialVersionUID(0L) case class Sub(lhs: Expr, rhs: Expr) extends AddOrSubExpr @SerialVersionUID(0L) case class Mul(lhs: Expr, rhs: Expr) extends BinaryExpr { def upperBound: BitBound = lhs.upperBound + rhs.upperBound def signum: Int = lhs.signum * rhs.signum def toBigDecimal(digits: Int): JBigDecimal = { val lDigits = checked(rhs.upperBound.decimalDigits + digits + 1) val rDigits = checked(lhs.upperBound.decimalDigits + digits + 1) if (lDigits >= Int.MaxValue || rDigits >= Int.MaxValue) { throw new IllegalArgumentException("required precision is too high") } else { val lValue = lhs.toBigDecimal(lDigits.toInt) val rValue = rhs.toBigDecimal(rDigits.toInt) lValue.multiply(rValue).setScale(digits, RoundingMode.DOWN) } } } @SerialVersionUID(0L) case class Div(lhs: Expr, rhs: Expr) extends BinaryExpr { def upperBound: BitBound = lhs.upperBound - rhs.lowerBound def signum: Int = if (rhs.signum == 0) { throw new ArithmeticException("divide by 0") } else { lhs.signum * rhs.signum } def toBigDecimal(digits: Int): JBigDecimal = checked { val lDigits = digits + 2 - rhs.lowerBound.decimalDigits val rDigits = max( 1 - rhs.lowerBound.decimalDigits, digits + 4 - 2 * rhs.lowerBound.decimalDigits + lhs.upperBound.decimalDigits ) if (lDigits >= Int.MaxValue || rDigits >= Int.MaxValue) { throw new IllegalArgumentException("required precision is too high") } else { val lValue = lhs.toBigDecimal(lDigits.toInt) val rValue = rhs.toBigDecimal(rDigits.toInt) val quotient = lValue.divide(rValue, digits + 1, RoundingMode.DOWN) quotient.setScale(digits, RoundingMode.DOWN) } } } @SerialVersionUID(0L) case class KRoot(sub: Expr, k: Int) extends UnaryExpr { val flagBits: Int = (sub.flags | Flags.IsRadical).bits def upperBound: BitBound = (sub.upperBound + 1) / 2 def signum: Int = { val s = sub.signum if (s >= 0) s else throw new ArithmeticException(s"$k-root of negative number") } def toBigDecimal(digits: Int): JBigDecimal = { val digits0 = max( checked(digits + 1), checked(1 - (sub.lowerBound.decimalDigits + 1) / 2) ) if (digits0 >= Int.MaxValue) { throw new IllegalArgumentException("required precision is too high") } else { val value = sub.toBigDecimal(digits0.toInt) Algebraic.nroot(value, k, digits, RoundingMode.DOWN) } } // To avoid multiple traversals during degreeBound, we cache the hashCode // for KRoots. override lazy val hashCode: Int = sub.hashCode * 23 + k * 29 + 13 } @SerialVersionUID(0L) case class Pow(sub: Expr, k: Int) extends UnaryExpr { require(k > 1) def flagBits: Int = sub.flags.bits def upperBound: BitBound = sub.upperBound * k def signum: Int = { val s = sub.signum if (s == 0) { if (k < 0) throw new ArithmeticException("divide by 0") else if (k == 0) throw new ArithmeticException("indeterminate") else 0 } else if (k % 2 == 0) { if (s < 0) 1 else s } else { s } } def toBigDecimal(digits: Int): JBigDecimal = { // We could possibly do better here. Investigate. val height = 32 - java.lang.Integer.numberOfLeadingZeros(k - 1) // ceil(lg2(k)) val maxDigits = checked(digits + height * (1 + sub.upperBound.decimalDigits)) if (maxDigits >= Int.MaxValue) { throw new IllegalArgumentException("required precision is too high") } else { val leafValue = sub.toBigDecimal(maxDigits.toInt) leafValue.pow(k) } } } } /** * A bit bound represents either an upper or lower bound as some * power of 2. Specifically, the bound is typically either `2^bitBound` or * `2^-bitBound`. */ final class BitBound(val bitBound: Long) extends AnyVal { import BitBound.bitsToDecimalDigits /** * Returns the minimum number of absolute decimal digits required to * represent this separation bound. */ def decimalDigits: Long = bitsToDecimalDigits(bitBound) def unary_- : BitBound = new BitBound(-bitBound) def +(that: BitBound): BitBound = new BitBound(this.bitBound + that.bitBound) def -(that: BitBound): BitBound = new BitBound(this.bitBound - that.bitBound) def *(that: BitBound): BitBound = new BitBound(this.bitBound * that.bitBound) def /(that: BitBound): BitBound = new BitBound(this.bitBound / that.bitBound) def +(rhs: Int): BitBound = new BitBound(this.bitBound + rhs) def -(rhs: Int): BitBound = new BitBound(this.bitBound - rhs) def *(rhs: Int): BitBound = new BitBound(this.bitBound * rhs) def /(rhs: Int): BitBound = new BitBound(this.bitBound / rhs) def min(that: BitBound): BitBound = if (bitBound < that.bitBound) this else that override def toString: String = s"BitBound($bitBound)" } object BitBound { private val Epsilon: Double = 2.220446049250313E-16 private val FudgeFactor: Double = 1D + 4D * Epsilon private val lg2ToLg10: Double = log(2, 10) * FudgeFactor private def bitsToDecimalDigits(n: Long): Long = ceil(n.toDouble * lg2ToLg10).toLong final def apply(n: Int): BitBound = new BitBound(n) } /** * Returns a number that is approximately equal to `x.pow(1/n)`. This number * is useful as initial values in converging n-root algorithms, but not as a * general purpose n-root algorithm. There are no guarantees about the * accuracy here. */ final def nrootApprox(x: JBigDecimal, n: Int): JBigDecimal = { // Essentially, we'd like to just find `x.doubleValue.pow(1D / n)`, but x // may not be approximable as a finite Double (eg. exponent is larger than // 308). So, we basically treat x as a number `a*10^(i+j)`, where // `a*10^i` is approximable as a Double and `j % n == 0`. Then, we can // approximate the n-th root as `pow(a*10^i, 1 / n) * 10^(j/n)`. // If n > ~308, then we could end up with an "approximate" value that is // an Infinity, which is no good. So, we approximate all roots > 306 with // 306-th root. val k = min(n, 306) // We need to ensure that the scale of our approximate number leaves `j` // evenly divible by n. So, we start by calculating the scale requried to // put the decimal place after the first digit val width = (ceil(x.unscaledValue.bitLength * log(2) / log(10)) - 1).toInt // We then add in (x.scale - width) % n to our initial scale so that the // remaining exponenent is divisible by n. val safeWidth = width + (x.scale - width) % k val approx = new JBigDecimal(x.unscaledValue.abs, safeWidth).doubleValue new JBigDecimal(x.signum * pow(approx, 1D / k)) .scaleByPowerOfTen(-(x.scale - safeWidth) / k) .round(MathContext.DECIMAL64) } /** * Approximates the n-th root using the Newton's method. Rather than using a * fixed epsilon, it may use an adaptive epsilon, provided by `getEps`. This * function takes the previous approximation, and returns the epsilon as * `pow(10, -getEps(prev))`. This allows us to use the same algorithm for * both absolute and relative precision approximations. Absolute * approximations just returns a fixed epsilon from `getEps`, where as a * relative approximation returns an adaptive one, that uses the previous * value to guide the required epsilon. */ private final def nroot(signedValue: JBigDecimal, k: Int)(getEps: JBigDecimal => Int): JBigDecimal = { if (signedValue.compareTo(JBigDecimal.ZERO) == 0) return JBigDecimal.ZERO val value = signedValue.abs val n = new JBigDecimal(k) @tailrec def loop(prev: JBigDecimal, prevDigits: Int, prevEps: JBigDecimal): JBigDecimal = { val digits = getEps(prev) val eps = if (digits == prevDigits) prevEps else JBigDecimal.ONE.movePointLeft(digits) val prevExp = prev.pow(k - 1) val delta = value .divide(prevExp, digits, RoundingMode.HALF_UP) .subtract(prev) .divide(n, digits, RoundingMode.HALF_UP) if (delta.abs.compareTo(eps) <= 0) prev else loop(prev.add(delta), digits, eps) } val init = nrootApprox(value, k) val unsignedResult = loop(init, Int.MinValue, JBigDecimal.ZERO) if (signedValue.signum < 0) unsignedResult.negate else unsignedResult } private val bits2dec: Double = log(2, 10) /** * Returns a relative approximation of the n-th root of `value`, up to * the number of digits specified by `mc`. This only uses the rounding mode * to chop-off the few remaining digits after the approximation, so may be * inaccurate. */ final def nroot(value: JBigDecimal, n: Int, mc: MathContext): JBigDecimal = { val result = nroot(value, n) { x => x.scale - ceil(x.unscaledValue.bitLength * bits2dec).toInt + mc.getPrecision + 1 } result.round(mc) } /** * Returns an absolute approximation of the n-th root of `value`, up to * `scale` digits past the decimal point. This only uses the rounding mode * to chop-off the few remaining digits after the approximation, so may be * inaccurate. */ final def nroot(value: JBigDecimal, n: Int, scale: Int, roundingMode: RoundingMode): JBigDecimal = nroot(value, n)(_ => scale + 1).setScale(scale, roundingMode) private implicit val JBigDecimalOrder: Order[JBigDecimal] = new Order[JBigDecimal] { def compare(x: JBigDecimal, y: JBigDecimal): Int = x compareTo y } /** * Rounds an approximation (`approx`) to the `exact` Algebraic value using * the given `scale` and `RoundingMode` (`mode`). This will always be * accurate for any algebraic number. So, if `exact` represents 0.15 and the * rounding mode is set to `HALF_UP` with a scale of 1, then this is * guaranteed to round up to 0.2. * * @param exact the exact value to use a reference for tricky cases * @param approx the approximate value to round * @param scale the final scale of the result * @param mode the rounding mode to use */ private def roundExact(exact: Algebraic, approx: JBigDecimal, scale: Int, mode: RoundingMode): JBigDecimal = { import RoundingMode.{ CEILING, FLOOR, UP } if (approx.signum == 0) { // If the sign is 0, then we deal with it here. mode match { case UP | CEILING if exact.signum > 0 => new JBigDecimal(BigInteger.ONE, scale) case UP | FLOOR if exact.signum < 0 => new JBigDecimal(BigInteger.ONE.negate, scale) case _ => approx.setScale(scale, RoundingMode.DOWN) } } else if (approx.signum > 0) { roundPositive(exact, approx, scale, mode) } else { val adjustedMode = mode match { case CEILING => FLOOR case FLOOR => CEILING case _ => mode } roundPositive(-exact, approx.abs, scale, adjustedMode).negate() } } private def roundPositive(exact: Algebraic, approx: JBigDecimal, scale: Int, mode: RoundingMode): JBigDecimal = { import RoundingMode.{ CEILING, FLOOR, DOWN, UP, HALF_DOWN, HALF_UP, HALF_EVEN, UNNECESSARY } val cutoff = approx.scale - scale if (cutoff == 0) { // Nothing to do here. approx } else if (cutoff < 0) { // Just add some 0s and we're done! approx.setScale(scale, RoundingMode.DOWN) } else if (cutoff > 18) { // We'd like to work with Long arithmetic, if possible. Our rounding is // exact anyways, so it doesn't hurt to remove some digits. roundPositive(exact, approx.setScale(scale + 18, RoundingMode.DOWN), scale, mode) } else { val unscale = spire.math.pow(10L, cutoff.toLong) val Array(truncatedUnscaledValue, bigRemainder) = approx .unscaledValue .divideAndRemainder(BigInteger.valueOf(unscale)) val truncated = new JBigDecimal(truncatedUnscaledValue, scale) def epsilon: JBigDecimal = new JBigDecimal(BigInteger.ONE, scale) val remainder = bigRemainder.longValue val rounded = mode match { case UNNECESSARY => truncated case HALF_DOWN | HALF_UP | HALF_EVEN => val dangerZoneStart = (unscale / 2) - 1 val dangerZoneStop = dangerZoneStart + 2 if (remainder >= dangerZoneStart && remainder <= dangerZoneStop) { val splitter = BigDecimal(new JBigDecimal( truncatedUnscaledValue.multiply(BigInteger.TEN).add(BigInteger.valueOf(5)), scale + 1 )) val cmp = exact compare Algebraic(splitter) val roundUp = (mode: @unchecked) match { case HALF_DOWN => cmp > 0 case HALF_UP => cmp >= 0 case HALF_EVEN => cmp > 0 || cmp == 0 && truncatedUnscaledValue.testBit(0) } if (roundUp) truncated.add(epsilon) else truncated } else if (remainder < dangerZoneStart) { truncated } else { truncated.add(epsilon) } case CEILING | UP => if (remainder <= 1 && exact <= Algebraic(BigDecimal(truncated))) { truncated } else { truncated.add(epsilon) } case FLOOR | DOWN => if (remainder <= 0) { if (exact < Algebraic(BigDecimal(truncated))) { truncated.subtract(epsilon) } else { truncated } } else if (remainder >= (unscale - 1)) { val roundedUp = truncated.add(epsilon) if (exact >= Algebraic(BigDecimal(roundedUp))) { roundedUp } else { truncated } } else { truncated } } rounded } } private val MaxIntValue: BigInteger = BigInteger.valueOf(Int.MaxValue.toLong) private val MinIntValue: BigInteger = BigInteger.valueOf(Int.MinValue.toLong) private val MaxLongValue: BigInteger = BigInteger.valueOf(Long.MaxValue) private val MinLongValue: BigInteger = BigInteger.valueOf(Long.MinValue) /** * A zero bound function, defined over an algebraic expression algebra. */ sealed abstract class ZeroBoundFunction { /** * Some state that is computed for each node in the expression tree. This * state is typically memoized, to avoid recomputation. */ type Bound def apply(expr: Algebraic.Expr): Bound } /** * An implementation of "A New Constructive Root Bound for Algebraic * Expressions" by Chen Li & Chee Yap. */ @SerialVersionUID(0L) case object LiYap extends ZeroBoundFunction { import Expr._ final case class Bound( /** Bound on the leading coefficient. */ lc: Long, /** Bound on the trailing coefficient. */ tc: Long, /** Bound on the measure. */ measure: Long, /** Lower bound on the value. */ lb: Long, /** Upper bound on the value. */ ub: Long ) { def getBitBound(degreeBound: Long): Long = checked { ub * (degreeBound - 1) + lc } } def apply(expr: Algebraic.Expr): Bound = checked { // Unfortunately, we must call degreeBound early, to avoid many redundant // traversals of the Expr tree. Getting this out of the way early on // means that we will traverse the tree once and populate the degreeBound // cache in all nodes right away. If we do it in a bottom up fashion, // then we risk terrible runtime behaviour. val degreeBound = expr.degreeBound expr match { case ConstantLong(n) => rational(Rational(n)) case ConstantDouble(n) => rational(Rational(n)) case ConstantBigDecimal(n) => rational(Rational(n)) case ConstantRational(n) => rational(n) case root @ ConstantRoot(poly, _, _, _) => // Bound on the euclidean distance of the coefficients. val distBound = poly.terms.map { case Term(c, _) => 2L * c.bitLength }.qsum / 2L + 1L Bound( root.lead.bitLength + 1L, root.tail.bitLength + 1L, distBound, Roots.lowerBound(poly), Roots.upperBound(poly) ) case Neg(sub) => sub.getBound(this) case expr: AddOrSubExpr => val lhsExpr = expr.lhs val rhsExpr = expr.rhs val lhs = lhsExpr.getBound(this) val rhs = rhsExpr.getBound(this) val lc = lhs.lc * rhsExpr.degreeBound + rhs.lc * lhsExpr.degreeBound val tc = lhs.measure * rhsExpr.degreeBound + rhs.measure * lhsExpr.degreeBound + 2 * degreeBound val measure = tc val ub = max(lhs.ub, rhs.ub) + 1 val lb = max(-measure, -(ub * (degreeBound - 1) + lc)) Bound(lc, tc, measure, lb, ub) case Mul(lhsExpr, rhsExpr) => val lhs = lhsExpr.getBound(this) val rhs = rhsExpr.getBound(this) val lc = lhs.lc * rhsExpr.degreeBound + rhs.lc * lhsExpr.degreeBound val tc = lhs.tc * rhsExpr.degreeBound + rhs.tc * lhsExpr.degreeBound val measure = lhs.measure * rhsExpr.degreeBound + rhs.measure * lhsExpr.degreeBound val lb = lhs.lb + rhs.lb val ub = lhs.ub + rhs.ub Bound(lc, tc, measure, lb, ub) case Div(lhsExpr, rhsExpr) => val lhs = lhsExpr.getBound(this) val rhs = rhsExpr.getBound(this) val lc = lhs.lc * rhsExpr.degreeBound + rhs.tc * lhsExpr.degreeBound val tc = lhs.tc * rhsExpr.degreeBound + rhs.lc * lhsExpr.degreeBound val measure = lhs.measure * rhsExpr.degreeBound + rhs.measure * lhsExpr.degreeBound val lb = lhs.lb - rhs.ub val ub = lhs.ub - rhs.lb Bound(lc, tc, measure, lb, ub) case KRoot(subExpr, k) => val sub = subExpr.getBound(this) val lb = sub.lb / k val ub = if (sub.ub % k == 0) (sub.ub / k) else ((sub.ub / k) + 1) Bound(sub.lc, sub.tc, sub.measure, lb, ub) case Pow(subExpr, k) => val sub = subExpr.getBound(this) Bound(sub.lc * k, sub.tc * k, sub.measure * k, sub.lb * k, sub.ub * k) } } private def rational(n: Rational): Bound = { // TODO: We can do better here. The + 1 isn't always needed in a & b. // Also, the upper and lower bounds could be much tighter if we actually // partially perform the division. val a = n.numerator.abs.bitLength + 1 if (n.denominator == BigInt(1)) { Bound(0, a, a, a - 1, a) } else { val b = n.denominator.bitLength + 1 Bound(b, a, max(a, b), a - b - 1, a - b + 1) } } } /** * An implementation of "A Separation Bound for Real Algebraic Expressions", * by Burnikel, Funke, Mehlhorn, Schirra, and Schmitt. This provides a good * [[ZeroBoundFunction]] for use in sign tests. * * Unlike the paper, we use log-arithmetic instead of working with exact, * big integer values. This means our bound isn't technically as good as it * could be, but we save the cost of working with arithmetic. We also perform * all log arithmetic using `Long`s and check for overflow (throwing * `ArithmeticException`s when detected). In practice we shouldn't hit this * limit, but in case we do, we prefer to throw over failing silently. */ @SerialVersionUID(0L) case object BFMSS extends ZeroBoundFunction { import Expr._ /** Our state that we store, per node. */ final case class Bound(l: Long, u: Long) { def getBitBound(degreeBound: Long): Long = checked { l + u * (degreeBound - 1) } } def apply(expr: Algebraic.Expr): Bound = expr match { case ConstantLong(n) => integer(n) case ConstantDouble(n) => rational(n) case ConstantBigDecimal(n) => rational(n) case ConstantRational(n) => rational(n) case root @ ConstantRoot(poly, _, _, _) => Bound(root.lead.bitLength + 1, Roots.upperBound(poly)) case Neg(sub) => sub.getBound(this) case Add(lhs, rhs) => add(lhs.getBound(this), rhs.getBound(this)) case Sub(lhs, rhs) => add(lhs.getBound(this), rhs.getBound(this)) case Mul(lhs, rhs) => mul(lhs.getBound(this), rhs.getBound(this)) case Div(lhs, rhs) => div(lhs.getBound(this), rhs.getBound(this)) case KRoot(sub, k) => nroot(sub.getBound(this), k) case Pow(sub, k) => pow(sub.getBound(this), k) } private def integer(n: Long): Bound = integer(BigInt(n)) private def integer(n: SafeLong): Bound = Bound(0, n.abs.bitLength + 1) private def rational(n: Double): Bound = rational(BigDecimal(n)) private def rational(n: BigDecimal): Bound = rational(Rational(n)) private def rational(n: Rational): Bound = div(integer(n.numerator), integer(n.denominator)) // We're not being fair to the BFMSS bound here. We're really just // setting a bound on the max value. However, the alternative would // require us to work outside of log arithmetic. private def add(lhs: Bound, rhs: Bound): Bound = checked { Bound( lhs.l + rhs.l, math.max(lhs.u + rhs.l, lhs.l + rhs.u) + 1 ) } private def mul(lhs: Bound, rhs: Bound): Bound = checked { Bound( lhs.l + rhs.l, lhs.u + rhs.u ) } private def div(lhs: Bound, rhs: Bound): Bound = checked { Bound( lhs.l + rhs.u, lhs.u + rhs.l ) } private def nroot(sub: Bound, k: Int): Bound = checked { if (sub.u < sub.l) { Bound( (sub.l + (k - 1) * sub.u) / k, sub.u ) } else { Bound( sub.l, (sub.u * (k - 1) * sub.l) / k ) } } private def pow(sub: Bound, k: Int): Bound = { @tailrec def sum(acc: Long, k: Int, extra: Long): Long = if (k == 1) { checked(acc + extra) } else { val x = if ((k & 1) == 1) checked(acc + extra) else extra sum(checked(acc + acc), k >>> 1, x) } if (k > 1) { Bound( sum(sub.l, k - 1, sub.l), sum(sub.u, k - 1, sub.u) ) } else if (k == 1) { sub } else if (k == 0) { throw new IllegalArgumentException("exponent cannot be 0") } else { throw new IllegalArgumentException("exponent cannot be negative") } } } } trait AlgebraicInstances { implicit final val AlgebraicAlgebra = new AlgebraicAlgebra import NumberTag._ implicit final val AlgebraicTag = new LargeTag[Algebraic](Exact, Algebraic(0)) } private[math] trait AlgebraicIsFieldWithNRoot extends Field[Algebraic] with NRoot[Algebraic] { def zero: Algebraic = Algebraic.Zero def one: Algebraic = Algebraic.One def plus(a: Algebraic, b: Algebraic): Algebraic = a + b def negate(a: Algebraic): Algebraic = -a override def minus(a: Algebraic, b: Algebraic): Algebraic = a - b override def pow(a: Algebraic, b: Int): Algebraic = a pow b override def times(a: Algebraic, b: Algebraic): Algebraic = a * b def quot(a: Algebraic, b: Algebraic): Algebraic = a /~ b def mod(a: Algebraic, b: Algebraic): Algebraic = a % b def gcd(a: Algebraic, b: Algebraic): Algebraic = euclid(a, b)(Eq[Algebraic]) def div(a:Algebraic, b:Algebraic): Algebraic = a / b def nroot(a: Algebraic, k: Int): Algebraic = a nroot k def fpow(a:Algebraic, b:Algebraic): Algebraic = throw new UnsupportedOperationException("unsupported operation") override def fromInt(n: Int): Algebraic = Algebraic(n) override def fromDouble(n: Double): Algebraic = Algebraic(n) } private[math] trait AlgebraicIsReal extends IsAlgebraic[Algebraic] { def toDouble(x: Algebraic): Double = x.toDouble def toAlgebraic(x: Algebraic): Algebraic = x def ceil(a:Algebraic): Algebraic = Algebraic(a.toBigDecimal(0, RoundingMode.CEILING)) def floor(a:Algebraic): Algebraic = Algebraic(a.toBigDecimal(0, RoundingMode.FLOOR)) def round(a:Algebraic): Algebraic = Algebraic(a.toBigDecimal(0, RoundingMode.HALF_EVEN)) def isWhole(a:Algebraic): Boolean = a.isWhole override def sign(a: Algebraic): Sign = a.sign def signum(a: Algebraic): Int = a.signum def abs(a: Algebraic): Algebraic = a.abs override def eqv(x: Algebraic, y: Algebraic): Boolean = x.compare(y) == 0 override def neqv(x: Algebraic, y: Algebraic): Boolean = x.compare(y) != 0 def compare(x: Algebraic, y: Algebraic): Int = x.compare(y) } @SerialVersionUID(1L) class AlgebraicAlgebra extends AlgebraicIsFieldWithNRoot with AlgebraicIsReal with Serializable
tixxit/spire
core/shared/src/main/scala/spire/math/Algebraic.scala
Scala
mit
57,623
package com.protose.resque import com.twitter.json.Json class JobFactory(performableMap: Map[String, Performable]) { def apply(worker: Worker, queue: String, payload: String): Job = { Job(worker, queue, payload, performableMap) } } case class Job(worker: Worker, queue: String, payload: String, performableMap: Map[String, Performable]) { def perform = { performer.perform(parsedPayload("args").asInstanceOf[List[String]]) } def performer: Performable = { performableMap(parsedPayload("class")) } def parsedPayload: Map[String, String] = Json.parse(payload).asInstanceOf[Map[String, String]] } // vim: set ts=4 sw=4 et:
jamesgolick/scala-resque-worker
src/main/scala/Job.scala
Scala
mit
700
package core.formatter.serializer import com.fasterxml.jackson.core.JsonGenerator import com.fasterxml.jackson.databind.{ SerializerProvider, JsonSerializer } import models.Conversation /** * Created by zephyre on 7/11/15. */ class ConversationSerializer[T <: Conversation] extends JsonSerializer[T] { override def serialize(value: T, gen: JsonGenerator, serializers: SerializerProvider): Unit = { gen.writeStartObject() gen.writeStringField("id", value.id.toString) gen.writeBooleanField("muted", value.muted) gen.writeBooleanField("pinned", value.pinned) gen.writeNumberField("targetId", value.targetId) gen.writeEndObject() } } object ConversationSerializer { def apply[T <: Conversation]() = new ConversationSerializer[T]() }
Lvxingpai/Hedylogos-Server
app/core/formatter/serializer/ConversationSerializer.scala
Scala
apache-2.0
766
package de.jannikarndt.datamover.monitor import java.time.{LocalDateTime, ZoneOffset} import scala.collection.mutable object Monitor { val monitors: mutable.Map[String, Monitor] = mutable.Map[String, Monitor]() def getMonitor(className: String): Monitor = { monitors.getOrElseUpdate(className, new Monitor(className)) } } class Monitor(name: String) { private var inputLong = mutable.Map[LocalDateTime, Long]() private var inputString = mutable.Map[LocalDateTime, String]() private var outputLong = mutable.Map[LocalDateTime, Long]() private var outputString = mutable.Map[LocalDateTime, String]() def input(number: Long): Unit = inputLong += (LocalDateTime.now() -> number) def input(text: String): Unit = inputString += (LocalDateTime.now() -> text) def output(number: Long): Unit = outputLong += (LocalDateTime.now() -> number) def output(text: String): Unit = outputString += (LocalDateTime.now() -> text) implicit val localDateOrdering: Ordering[LocalDateTime] = Ordering.by(_.toEpochSecond(ZoneOffset.UTC)) private def makeString(map: collection.Map[_ <: LocalDateTime, _ <: Any]) = map.toList.sortBy(_._1).map(_.productIterator.mkString("\t")).mkString("\n") def dumpIn: String = makeString(inputLong.mapValues(x => x.toString) ++ inputString) def dumOut: String = makeString(outputLong.mapValues(x => x.toString) ++ outputString) def lastIn: String = inputLong.lastOption.map(_._2.toString).getOrElse("") def lastOut: String = outputLong.lastOption.map(_._2.toString).getOrElse("") }
JannikArndt/DataMover
src/main/scala/de/jannikarndt/datamover/monitor/Monitor.scala
Scala
mit
1,583
/* * Copyright 2010-2011 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb package common /** * <p> * Via an HList containing a Collection of Box[things], either generate an * HList of the things or a List[Failure] * </p> * * */ object CombinableBox { import HLists._ type Result[A] = Either[List[Failure], A] private implicit def emptyBoxToFailure(eb: EmptyBox): Failure = eb match { case (f: Failure) => f case Empty => Failure("Empty") } implicit def boxToCombinableBox[A](in: Box[A]): CombinableBox[A, HNil] = CombinableBox(in match { case Full(a) => Right(a :+: HNil) case (f: Failure) => Left(f :: Nil) case _ => Left(Failure("Empty") :: Nil) }) implicit def boxableToCombinableBox[A](in: Boxable[A]): CombinableBox[A, HNil] = in.asBox implicit def boxCombinationToCombinableBox[A, B <: HList](in: Result[A :+: B]): CombinableBox[A, B] = CombinableBox(in) implicit def resultToBox[A](result: Result[A]): Box[A] = result match { case Left(Nil) => Empty case Left(f :: Nil) => f case Left(f) => new ParamFailure("Multiple Failures", Empty, Empty, FailureList(f)) case Right(x) => Full(x) } /** * If the Failure is going to be condensed, generate a FailureList */ final case class FailureList(failures: List[Failure]) /** * The place where the results are accumulated */ final case class CombinableBox[B, C <: HList](rhs: Result[B :+: C]) { def :&: [A](lhs: Boxable[A]): Result[A :+: B :+: C] = this.:&:(lhs.asBox) def :&: [A](lhs: Box[A]): Result[A :+: B :+: C] = (lhs, rhs) match { case (failure: EmptyBox, Left(failures) ) => Left(failure :: failures) case (failure: EmptyBox, _ ) => Left(failure :: Nil) case (_, Left(failures) ) => Left(failures) case (Full(success), Right(successes)) => Right(success :+: successes) } } }
pbrant/framework
core/common/src/main/scala/net/liftweb/common/CombinableBox.scala
Scala
apache-2.0
2,622
package com.example.akkaTcpChat import akka.actor.{Props, ActorSystem} import scala.concurrent.duration._ import java.net.InetSocketAddress //import com.example.akkaTcpChat.client.{InputUserMessage, UserInteract} class Application extends Bootable { implicit val actorSystem = ActorSystem("tcpserver") implicit val executor = actorSystem.dispatcher lazy val addr = { new InetSocketAddress("localhost", 8842) } lazy val webServerAddr = { new InetSocketAddress("localhost", 8083) } def startup() = { // Register all needed actors //val server = actorSystem.actorOf(Server.props(addr), "server") val world = actorSystem.actorOf(World.props(), "world") val tcpServer = actorSystem.actorOf(Props(classOf[TcpServer], addr, classOf[TcpHandler]), "simple") val tcpClient = actorSystem.actorOf(TcpClient.props(webServerAddr), "tcpclient") // TODO - Move to own actor world ! "init" world ! "dump" //val d = world ! new Get(1,1) //Console.println("Data:"+d) world ! new Set(1,1,9,1) world ! "dump" //var perlin = new perlinNoise.PerlinNoise() ////var noise = perlin.GenerateWhiteNoise(10,10) //var noise = perlin.GetIntMap(80, 30, 0, 9, 3) //noise.Dump() } def shutdown() = { actorSystem.shutdown() actorSystem.awaitTermination(3.seconds) } } object Application { def main(args: Array[String]) { val app = new Application() app.startup() } } trait Bootable { def startup(): Unit def shutdown(): Unit sys.ShutdownHookThread(shutdown()) }
spoconnor/ElixirMessagingServer
ScalaServer/src/main/scala/Main.scala
Scala
mit
1,560
/* * Copyright (c) 2014-2018 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.reactive.internal.operators import monix.reactive.Observable import scala.concurrent.duration._ object DropByPredicateSuite extends BaseOperatorSuite { def createObservable(sourceCount: Int) = { require(sourceCount > 0, "sourceCount should be strictly positive") Some { val o = Observable.range(1, sourceCount * 2).dropWhile(_ < sourceCount) Sample(o, count(sourceCount), sum(sourceCount), 0.seconds, 0.seconds) } } def sum(sourceCount: Int): Long = (1 until sourceCount * 2).drop(sourceCount-1).sum def count(sourceCount: Int) = sourceCount def observableInError(sourceCount: Int, ex: Throwable) = { require(sourceCount > 0, "sourceCount should be strictly positive") Some { val o = createObservableEndingInError(Observable.range(1, sourceCount + 2), ex) .dropWhile(_ == 1) Sample(o, count(sourceCount), sum(sourceCount), 0.seconds, 0.seconds) } } def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some { val o = Observable.range(1, sourceCount * 2).dropWhile { elem => if (elem < sourceCount) true else throw ex } Sample(o, 0, 0, 0.seconds, 0.seconds) } override def cancelableObservables() = { val o = Observable.range(1, 1000).delayOnNext(1.second).dropWhile(_ < 100) Seq(Sample(o, 0, 0, 0.seconds, 0.seconds)) } }
Wogan/monix
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DropByPredicateSuite.scala
Scala
apache-2.0
2,040
package com.twitter.finagle.util import com.twitter.app.GlobalFlag import com.twitter.conversions.time._ import com.twitter.finagle.stats.FinagleStatsReceiver import com.twitter.logging.Logger import com.twitter.util.{JavaTimer, ProxyTimer, Timer} /** * Configures whether to probe for slow tasks executing in the default `Timer`. * * When enabled, tasks are monitored to detect tasks that are slow to complete. A counter of * the number of slow tasks is registered at `finagle/timer/slow`. Additionally, if a slow * task is observed executing the stack traces of all threads will be logged at level * WARN. The maximum runtime and minimum interval between logging stack traces can be tuned * using the global flags `c.t.f.u.defaultTimerSlowTaskMaxRuntime` and * `c.t.f.u.defaultTimerSlowTaskLogMinInterval`, defined below. * * @note Observation of a slow task in progress is performed when scheduling additional work * and is thus susceptible to false negatives. */ object defaultTimerProbeSlowTasks extends GlobalFlag( false, "Enable reporting of slow timer tasks executing in the default timer") /** * Configures the maximum allowed runtime for tasks executing in the default `Timer`. */ object defaultTimerSlowTaskMaxRuntime extends GlobalFlag( 2.seconds, "Maximum runtime allowed for tasks before they are reported") /** * Configures the minimum duration between logging stack traces when a slow task is * detected in the default `Timer`. */ object defaultTimerSlowTaskLogMinInterval extends GlobalFlag( 20.seconds, "Minimum interval between recording stack traces for slow tasks") /** * A Finagle's trusty timer that should satisfy a certain level of throughput/latency * requirements: O(1) task creation and (at least) O(log n) task cancellation. Usually, * hashed wheel timers provide a great foundation fulfilling those requirements. * * @note This is package-private such that we have a control over which timer implementations * might be considered "default" for Finagle. */ private[finagle] trait ServiceLoadedTimer extends Timer /** * Finagle's default [[Timer]] that's intended to be shared across a number of servers/clients. * * The default [[Timer]] is intended for scheduling tasks that will finish very quickly and * shouldn't be used to schedule tasks that will occupy the executing thread for a significant * duration. * * @note This timer is "unstoppable" such that calls to `stop()` is ignored. */ object DefaultTimer extends ProxyTimer { private[this] val log = Logger.get() // This timer could be one of the following (in the order of priority): // // - loaded at runtime via the `LoadService` machinery (the first available timer is used) // - `JavaTimer` // // TODO: We might consider doing round-robin over the set of "default" timers in future. protected val self: Timer = { val baseTimer = LoadService[ServiceLoadedTimer]() match { case loaded +: _ => loaded case _ => log.warning(s"Can not service-load a timer. Using JavaTimer instead.") new JavaTimer(isDaemon = true) } initializeDefaultTimer(baseTimer) } /** * An alias for [[DefaultTimer]]. */ @deprecated("Use DefaultTimer from Scala and DefaultTimer.getInstance() from Java", "2017-5-4") val twitter: Timer = this /** * An alias for [[DefaultTimer]]. */ @deprecated("Use DefaultTimer from Scala and DefaultTimer.getInstance() from Java", "2017-5-4") val get: DefaultTimer.type = this /** * A Java-friendly accessor for [[DefaultTimer]]. */ def getInstance: Timer = this override def stop(): Unit = log.warning(s"Ignoring call to `Timer.stop()` on an unstoppable DefaultTimer.\\n" + s"Current stack trace: ${ Thread.currentThread.getStackTrace.mkString("\\n") }") override def toString: String = s"DefaultTimer(${self.toString})" private[this] def initializeDefaultTimer(timer: Timer): Timer = { if (!defaultTimerProbeSlowTasks()) timer else { // Probing for slow running tasks is enabled so wrap the timer in the metering proxy. new LoggingSlowProbeProxyTimer( underlying = timer, FinagleStatsReceiver, maxRuntime = defaultTimerSlowTaskMaxRuntime(), maxLogFrequency = defaultTimerSlowTaskLogMinInterval()) } } }
koshelev/finagle
finagle-core/src/main/scala/com/twitter/finagle/util/DefaultTimer.scala
Scala
apache-2.0
4,333
package macrolog import com.typesafe.scalalogging.{Logger => ScalaLogger} import org.slf4j.{Logger => Underlying} import scala.reflect.ClassTag /** * @author Maksim Ochenashko */ object Logger { def apply(scalaLogger: ScalaLogger): Logger = new Logger(scalaLogger) def apply(underlying: Underlying): Logger = new Logger(ScalaLogger(underlying)) def apply(name: String): Logger = new Logger(ScalaLogger(name)) def apply(clazz: Class[_]): Logger = new Logger(ScalaLogger(clazz)) def apply[T: ClassTag]: Logger = new Logger(ScalaLogger[T]) } @SerialVersionUID(716196318) final class Logger private[macrolog](val underlying: ScalaLogger) extends Serializable { // Error def error(message: => String)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.error(message, PositionLoggingContext(ctx, pos)) def error(message: => String, cause: => Throwable)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.error(message, PositionLoggingContext(ctx, pos), cause) // Warn def warn(message: => String)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.warn(message, PositionLoggingContext(ctx, pos)) def warn(message: => String, cause: => Throwable)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.warn(message, PositionLoggingContext(ctx, pos), cause) // Info def info(message: => String)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.info(message, PositionLoggingContext(ctx, pos)) def info(message: => String, cause: => Throwable)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.info(message, PositionLoggingContext(ctx, pos), cause) // Debug def debug(message: => String)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.debug(message, PositionLoggingContext(ctx, pos)) def debug(message: => String, cause: => Throwable)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.debug(message, PositionLoggingContext(ctx, pos), cause) // Trace def trace(message: => String)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.trace(message, PositionLoggingContext(ctx, pos)) def trace(message: => String, cause: => Throwable)(implicit ctx: LoggingContext, pos: Position): Unit = underlying.trace(message, PositionLoggingContext(ctx, pos), cause) }
iRevive/macrolog
src/main/scala/macrolog/Logger.scala
Scala
mit
2,393
package org.jetbrains.plugins.scala.annotator.template import org.jetbrains.plugins.scala.annotator.{AnnotatorTestBase, Error} /** * Pavel Fatin */ class AbstractInstantiationTest extends AnnotatorTestBase(AbstractInstantiation.THIS) { private val Message = "(\\\\w+\\\\s\\\\w+) is abstract; cannot be instantiated".r def testOrdinaryClass() { assertNothing(messages("class C; new C")) assertNothing(messages("class C; new C {}")) assertNothing(messages("class C; new C with Object")) assertNothing(messages("class C; new C with Object {}")) assertNothing(messages("class C; new Object with C")) assertNothing(messages("class C; new Object with C {}")) assertNothing(messages("class C; class X extends C")) assertNothing(messages("class C; class X extends C {}")) assertNothing(messages("class C; class X extends C with Object")) assertNothing(messages("class C; class X extends C with Object {}")) assertNothing(messages("class C; class X extends Object with C")) assertNothing(messages("class C; class X extends Object with C {}")) } def testAbstractClass() { assertMatches(messages("trait T; new T")) { case Error("T", Message("Trait T")) :: Nil => } assertMatches(messages("abstract class C; new C")) { case Error("C", Message("Class C")) :: Nil => } assertNothing(messages("abstract class C; new C {}")) assertNothing(messages("abstract class C; new C with Object")) assertNothing(messages("abstract class C; new C with Object {}")) assertNothing(messages("abstract class C; new Object with C")) assertNothing(messages("abstract class C; new Object with C {}")) assertNothing(messages("abstract class C; class X extends C")) assertNothing(messages("abstract class C; class X extends C {}")) assertNothing(messages("abstract class C; class X extends C with Object")) assertNothing(messages("abstract class C; class X extends C with Object {}")) assertNothing(messages("abstract class C; class X extends Object with C")) assertNothing(messages("abstract class C; class X extends Object with C {}")) } def testAbstractClassEarlyDefinition() { assertMatches(messages("abstract class C; new {} with C")) { case Error("C", Message("Class C")) :: Nil => } assertNothing(messages("abstract class C; new { val a = 0 } with C")) } }
ilinum/intellij-scala
test/org/jetbrains/plugins/scala/annotator/template/AbstractInstantiationTest.scala
Scala
apache-2.0
2,382
package org.mitlware.test import scalaz._ import org.scalacheck._ import org.scalacheck.Prop.forAll import org.mitlware._ case object MockMetricIntImpl extends MetricImpl[Int] { override def cost( from : Int, to : Int ) = from - to } case object MockMetricIntState extends MetricView[Int] { override def getMetric : MetricImpl[Int] = MockMetricIntImpl override def setMetric( x : MetricImpl[Int] ) = this } object MetricSpec extends Properties("Metric") { // TODO: see how to make this pass (avoid overflow errors) property("cost positive from better to worse") = forAll { (from : Int, to : Int) => def performTest[Env <: MetricView[Int]] : State[Env,Double] = for { cost <- Metric.cost(from, to) } yield cost if (from > to) performTest.eval(MockMetricIntState) > 0 else performTest.eval(MockMetricIntState) < 0 } // TODO: see how to make this pass (avoid overflow errors) property("gain negative from better to worse") = forAll { (from : Int, to : Int) => def performTest[Env <: MetricView[Int]] : State[Env,Double] = for { cost <- Metric.gain(from, to) } yield cost if (from > to) performTest.eval(MockMetricIntState) < 0 else performTest.eval(MockMetricIntState) > 0 } }
MitLware/MitLware-scala-experimental
src/org/mitlware/test/SpecMetric.scala
Scala
bsd-3-clause
1,242
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.clustering import breeze.linalg.{argmax, argtopk, normalize, sum, DenseMatrix => BDM, DenseVector => BDV} import breeze.numerics.{exp, lgamma} import org.apache.hadoop.fs.Path import org.json4s.DefaultFormats import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.SparkContext import org.apache.spark.annotation.Since import org.apache.spark.api.java.{JavaPairRDD, JavaRDD} import org.apache.spark.graphx.{Edge, EdgeContext, Graph, VertexId} import org.apache.spark.mllib.linalg.{Matrices, Matrix, Vector, Vectors} import org.apache.spark.mllib.util.{Loader, Saveable} import org.apache.spark.rdd.RDD import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.util.{BoundedPriorityQueue, Utils} /** * Latent Dirichlet Allocation (LDA) model. * * This abstraction permits for different underlying representations, * including local and distributed data structures. */ @Since("1.3.0") abstract class LDAModel private[clustering] extends Saveable { /** Number of topics */ @Since("1.3.0") def k: Int /** Vocabulary size (number of terms or terms in the vocabulary) */ @Since("1.3.0") def vocabSize: Int /** * Concentration parameter (commonly named "alpha") for the prior placed on documents' * distributions over topics ("theta"). * * This is the parameter to a Dirichlet distribution. */ @Since("1.5.0") def docConcentration: Vector /** * Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics' * distributions over terms. * * This is the parameter to a symmetric Dirichlet distribution. * * @note The topics' distributions over terms are called "beta" in the original LDA paper * by Blei et al., but are called "phi" in many later papers such as Asuncion et al., 2009. */ @Since("1.5.0") def topicConcentration: Double /** * Shape parameter for random initialization of variational parameter gamma. * Used for variational inference for perplexity and other test-time computations. */ protected def gammaShape: Double /** * Inferred topics, where each topic is represented by a distribution over terms. * This is a matrix of size vocabSize x k, where each column is a topic. * No guarantees are given about the ordering of the topics. */ @Since("1.3.0") def topicsMatrix: Matrix /** * Return the topics described by weighted terms. * * @param maxTermsPerTopic Maximum number of terms to collect for each topic. * @return Array over topics. Each topic is represented as a pair of matching arrays: * (term indices, term weights in topic). * Each topic's terms are sorted in order of decreasing weight. */ @Since("1.3.0") def describeTopics(maxTermsPerTopic: Int): Array[(Array[Int], Array[Double])] /** * Return the topics described by weighted terms. * * WARNING: If vocabSize and k are large, this can return a large object! * * @return Array over topics. Each topic is represented as a pair of matching arrays: * (term indices, term weights in topic). * Each topic's terms are sorted in order of decreasing weight. */ @Since("1.3.0") def describeTopics(): Array[(Array[Int], Array[Double])] = describeTopics(vocabSize) /* TODO (once LDA can be trained with Strings or given a dictionary) * Return the topics described by weighted terms. * * This is similar to [[describeTopics()]] but returns String values for terms. * If this model was trained using Strings or was given a dictionary, then this method returns * terms as text. Otherwise, this method returns terms as term indices. * * This limits the number of terms per topic. * This is approximate; it may not return exactly the top-weighted terms for each topic. * To get a more precise set of top terms, increase maxTermsPerTopic. * * @param maxTermsPerTopic Maximum number of terms to collect for each topic. * @return Array over topics. Each topic is represented as a pair of matching arrays: * (terms, term weights in topic) where terms are either the actual term text * (if available) or the term indices. * Each topic's terms are sorted in order of decreasing weight. */ // def describeTopicsAsStrings(maxTermsPerTopic: Int): Array[(Array[Double], Array[String])] /* TODO (once LDA can be trained with Strings or given a dictionary) * Return the topics described by weighted terms. * * This is similar to [[describeTopics()]] but returns String values for terms. * If this model was trained using Strings or was given a dictionary, then this method returns * terms as text. Otherwise, this method returns terms as term indices. * * WARNING: If vocabSize and k are large, this can return a large object! * * @return Array over topics. Each topic is represented as a pair of matching arrays: * (terms, term weights in topic) where terms are either the actual term text * (if available) or the term indices. * Each topic's terms are sorted in order of decreasing weight. */ // def describeTopicsAsStrings(): Array[(Array[Double], Array[String])] = // describeTopicsAsStrings(vocabSize) /* TODO * Compute the log likelihood of the observed tokens, given the current parameter estimates: * log P(docs | topics, topic distributions for docs, alpha, eta) * * Note: * - This excludes the prior. * - Even with the prior, this is NOT the same as the data log likelihood given the * hyperparameters. * * @param documents RDD of documents, which are term (word) count vectors paired with IDs. * The term count vectors are "bags of words" with a fixed-size vocabulary * (where the vocabulary size is the length of the vector). * This must use the same vocabulary (ordering of term counts) as in training. * Document IDs must be unique and >= 0. * @return Estimated log likelihood of the data under this model */ // def logLikelihood(documents: RDD[(Long, Vector)]): Double /* TODO * Compute the estimated topic distribution for each document. * This is often called 'theta' in the literature. * * @param documents RDD of documents, which are term (word) count vectors paired with IDs. * The term count vectors are "bags of words" with a fixed-size vocabulary * (where the vocabulary size is the length of the vector). * This must use the same vocabulary (ordering of term counts) as in training. * Document IDs must be unique and greater than or equal to 0. * @return Estimated topic distribution for each document. * The returned RDD may be zipped with the given RDD, where each returned vector * is a multinomial distribution over topics. */ // def topicDistributions(documents: RDD[(Long, Vector)]): RDD[(Long, Vector)] } /** * Local LDA model. * This model stores only the inferred topics. * * @param topics Inferred topics (vocabSize x k matrix). */ @Since("1.3.0") class LocalLDAModel private[spark] ( @Since("1.3.0") val topics: Matrix, @Since("1.5.0") override val docConcentration: Vector, @Since("1.5.0") override val topicConcentration: Double, override protected[spark] val gammaShape: Double = 100) extends LDAModel with Serializable { private[spark] var seed: Long = Utils.random.nextLong() @Since("1.3.0") override def k: Int = topics.numCols @Since("1.3.0") override def vocabSize: Int = topics.numRows @Since("1.3.0") override def topicsMatrix: Matrix = topics @Since("1.3.0") override def describeTopics(maxTermsPerTopic: Int): Array[(Array[Int], Array[Double])] = { val brzTopics = topics.asBreeze.toDenseMatrix Range(0, k).map { topicIndex => val topic = normalize(brzTopics(::, topicIndex), 1.0) val (termWeights, terms) = topic.toArray.zipWithIndex.sortBy(-_._1).take(maxTermsPerTopic).unzip (terms, termWeights) }.toArray } /** * Random seed for cluster initialization. */ @Since("2.4.0") def getSeed: Long = seed /** * Set the random seed for cluster initialization. */ @Since("2.4.0") def setSeed(seed: Long): this.type = { this.seed = seed this } @Since("1.5.0") override def save(sc: SparkContext, path: String): Unit = { LocalLDAModel.SaveLoadV1_0.save(sc, path, topicsMatrix, docConcentration, topicConcentration, gammaShape) } // TODO: declare in LDAModel and override once implemented in DistributedLDAModel /** * Calculates a lower bound on the log likelihood of the entire corpus. * * See Equation (16) in original Online LDA paper. * * @param documents test corpus to use for calculating log likelihood * @return variational lower bound on the log likelihood of the entire corpus */ @Since("1.5.0") def logLikelihood(documents: RDD[(Long, Vector)]): Double = logLikelihoodBound(documents, docConcentration, topicConcentration, topicsMatrix.asBreeze.toDenseMatrix, gammaShape, k, vocabSize) /** * Java-friendly version of `logLikelihood` */ @Since("1.5.0") def logLikelihood(documents: JavaPairRDD[java.lang.Long, Vector]): Double = { logLikelihood(documents.rdd.asInstanceOf[RDD[(Long, Vector)]]) } /** * Calculate an upper bound on perplexity. (Lower is better.) * See Equation (16) in original Online LDA paper. * * @param documents test corpus to use for calculating perplexity * @return Variational upper bound on log perplexity per token. */ @Since("1.5.0") def logPerplexity(documents: RDD[(Long, Vector)]): Double = { val corpusTokenCount = documents .map { case (_, termCounts) => termCounts.toArray.sum } .sum() -logLikelihood(documents) / corpusTokenCount } /** * Java-friendly version of `logPerplexity` */ @Since("1.5.0") def logPerplexity(documents: JavaPairRDD[java.lang.Long, Vector]): Double = { logPerplexity(documents.rdd.asInstanceOf[RDD[(Long, Vector)]]) } /** * Estimate the variational likelihood bound of from `documents`: * log p(documents) >= E_q[log p(documents)] - E_q[log q(documents)] * This bound is derived by decomposing the LDA model to: * log p(documents) = E_q[log p(documents)] - E_q[log q(documents)] + D(q|p) * and noting that the KL-divergence D(q|p) >= 0. * * See Equation (16) in original Online LDA paper, as well as Appendix A.3 in the JMLR version of * the original LDA paper. * @param documents a subset of the test corpus * @param alpha document-topic Dirichlet prior parameters * @param eta topic-word Dirichlet prior parameter * @param lambda parameters for variational q(beta | lambda) topic-word distributions * @param gammaShape shape parameter for random initialization of variational q(theta | gamma) * topic mixture distributions * @param k number of topics * @param vocabSize number of unique terms in the entire test corpus */ private def logLikelihoodBound( documents: RDD[(Long, Vector)], alpha: Vector, eta: Double, lambda: BDM[Double], gammaShape: Double, k: Int, vocabSize: Long): Double = { val brzAlpha = alpha.asBreeze.toDenseVector // transpose because dirichletExpectation normalizes by row and we need to normalize // by topic (columns of lambda) val Elogbeta = LDAUtils.dirichletExpectation(lambda.t).t val ElogbetaBc = documents.sparkContext.broadcast(Elogbeta) val gammaSeed = this.seed // Sum bound components for each document: // component for prob(tokens) + component for prob(document-topic distribution) val corpusPart = documents.filter(_._2.numNonzeros > 0).map { case (id: Long, termCounts: Vector) => val localElogbeta = ElogbetaBc.value var docBound = 0.0D val (gammad: BDV[Double], _, _) = OnlineLDAOptimizer.variationalTopicInference( termCounts, exp(localElogbeta), brzAlpha, gammaShape, k, gammaSeed + id) val Elogthetad: BDV[Double] = LDAUtils.dirichletExpectation(gammad) // E[log p(doc | theta, beta)] termCounts.foreachNonZero { case (idx, count) => docBound += count * LDAUtils.logSumExp(Elogthetad + localElogbeta(idx, ::).t) } // E[log p(theta | alpha) - log q(theta | gamma)] docBound += sum((brzAlpha - gammad) *:* Elogthetad) docBound += sum(lgamma(gammad) - lgamma(brzAlpha)) docBound += lgamma(sum(brzAlpha)) - lgamma(sum(gammad)) docBound }.sum() ElogbetaBc.destroy() // Bound component for prob(topic-term distributions): // E[log p(beta | eta) - log q(beta | lambda)] val sumEta = eta * vocabSize val topicsPart = sum((eta - lambda) *:* Elogbeta) + sum(lgamma(lambda) - lgamma(eta)) + sum(lgamma(sumEta) - lgamma(sum(lambda(::, breeze.linalg.*)))) corpusPart + topicsPart } /** * Predicts the topic mixture distribution for each document (often called "theta" in the * literature). Returns a vector of zeros for an empty document. * * This uses a variational approximation following Hoffman et al. (2010), where the approximate * distribution is called "gamma." Technically, this method returns this approximation "gamma" * for each document. * @param documents documents to predict topic mixture distributions for * @return An RDD of (document ID, topic mixture distribution for document) */ @Since("1.3.0") // TODO: declare in LDAModel and override once implemented in DistributedLDAModel def topicDistributions(documents: RDD[(Long, Vector)]): RDD[(Long, Vector)] = { // Double transpose because dirichletExpectation normalizes by row and we need to normalize // by topic (columns of lambda) val expElogbeta = exp(LDAUtils.dirichletExpectation(topicsMatrix.asBreeze.toDenseMatrix.t).t) val expElogbetaBc = documents.sparkContext.broadcast(expElogbeta) val docConcentrationBrz = this.docConcentration.asBreeze val gammaShape = this.gammaShape val k = this.k val gammaSeed = this.seed documents.map { case (id: Long, termCounts: Vector) => if (termCounts.numNonzeros == 0) { (id, Vectors.zeros(k)) } else { val (gamma, _, _) = OnlineLDAOptimizer.variationalTopicInference( termCounts, expElogbetaBc.value, docConcentrationBrz, gammaShape, k, gammaSeed + id) (id, Vectors.dense(normalize(gamma, 1.0).toArray)) } } } /** * Predicts the topic mixture distribution for a document (often called "theta" in the * literature). Returns a vector of zeros for an empty document. * * Note this means to allow quick query for single document. For batch documents, please refer * to `topicDistributions()` to avoid overhead. * * @param document document to predict topic mixture distributions for * @return topic mixture distribution for the document */ @Since("2.0.0") def topicDistribution(document: Vector): Vector = { val gammaSeed = this.seed val expElogbeta = exp(LDAUtils.dirichletExpectation(topicsMatrix.asBreeze.toDenseMatrix.t).t) if (document.numNonzeros == 0) { Vectors.zeros(this.k) } else { val (gamma, _, _) = OnlineLDAOptimizer.variationalTopicInference( document, expElogbeta, this.docConcentration.asBreeze, gammaShape, this.k, gammaSeed) Vectors.dense(normalize(gamma, 1.0).toArray) } } /** * Java-friendly version of `topicDistributions` */ @Since("1.4.1") def topicDistributions( documents: JavaPairRDD[java.lang.Long, Vector]): JavaPairRDD[java.lang.Long, Vector] = { val distributions = topicDistributions(documents.rdd.asInstanceOf[RDD[(Long, Vector)]]) JavaPairRDD.fromRDD(distributions.asInstanceOf[RDD[(java.lang.Long, Vector)]]) } } /** * Local (non-distributed) model fitted by [[LDA]]. * * This model stores the inferred topics only; it does not store info about the training dataset. */ @Since("1.5.0") object LocalLDAModel extends Loader[LocalLDAModel] { private object SaveLoadV1_0 { val thisFormatVersion = "1.0" val thisClassName = "org.apache.spark.mllib.clustering.LocalLDAModel" // Store the distribution of terms of each topic and the column index in topicsMatrix // as a Row in data. case class Data(topic: Vector, index: Int) def save( sc: SparkContext, path: String, topicsMatrix: Matrix, docConcentration: Vector, topicConcentration: Double, gammaShape: Double): Unit = { val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val k = topicsMatrix.numCols val metadata = compact(render (("class" -> thisClassName) ~ ("version" -> thisFormatVersion) ~ ("k" -> k) ~ ("vocabSize" -> topicsMatrix.numRows) ~ ("docConcentration" -> docConcentration.toArray.toSeq) ~ ("topicConcentration" -> topicConcentration) ~ ("gammaShape" -> gammaShape))) sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path)) val topicsDenseMatrix = topicsMatrix.asBreeze.toDenseMatrix val topics = Range(0, k).map { topicInd => Data(Vectors.dense((topicsDenseMatrix(::, topicInd).toArray)), topicInd) } spark.createDataFrame(topics).repartition(1).write.parquet(Loader.dataPath(path)) } def load( sc: SparkContext, path: String, docConcentration: Vector, topicConcentration: Double, gammaShape: Double): LocalLDAModel = { val dataPath = Loader.dataPath(path) val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val dataFrame = spark.read.parquet(dataPath) Loader.checkSchema[Data](dataFrame.schema) val topics = dataFrame.collect() val vocabSize = topics(0).getAs[Vector](0).size val k = topics.length val brzTopics = BDM.zeros[Double](vocabSize, k) topics.foreach { case Row(vec: Vector, ind: Int) => brzTopics(::, ind) := vec.asBreeze } val topicsMat = Matrices.fromBreeze(brzTopics) new LocalLDAModel(topicsMat, docConcentration, topicConcentration, gammaShape) } } @Since("1.5.0") override def load(sc: SparkContext, path: String): LocalLDAModel = { val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path) implicit val formats = DefaultFormats val expectedK = (metadata \\ "k").extract[Int] val expectedVocabSize = (metadata \\ "vocabSize").extract[Int] val docConcentration = Vectors.dense((metadata \\ "docConcentration").extract[Seq[Double]].toArray) val topicConcentration = (metadata \\ "topicConcentration").extract[Double] val gammaShape = (metadata \\ "gammaShape").extract[Double] val classNameV1_0 = SaveLoadV1_0.thisClassName val model = (loadedClassName, loadedVersion) match { case (className, "1.0") if className == classNameV1_0 => SaveLoadV1_0.load(sc, path, docConcentration, topicConcentration, gammaShape) case _ => throw new Exception( s"LocalLDAModel.load did not recognize model with (className, format version):" + s"($loadedClassName, $loadedVersion). Supported:\\n" + s" ($classNameV1_0, 1.0)") } val topicsMatrix = model.topicsMatrix require(expectedK == topicsMatrix.numCols, s"LocalLDAModel requires $expectedK topics, got ${topicsMatrix.numCols} topics") require(expectedVocabSize == topicsMatrix.numRows, s"LocalLDAModel requires $expectedVocabSize terms for each topic, " + s"but got ${topicsMatrix.numRows}") model } } /** * Distributed LDA model. * This model stores the inferred topics, the full training dataset, and the topic distributions. */ @Since("1.3.0") class DistributedLDAModel private[clustering] ( private[clustering] val graph: Graph[LDA.TopicCounts, LDA.TokenCount], private[clustering] val globalTopicTotals: LDA.TopicCounts, @Since("1.3.0") val k: Int, @Since("1.3.0") val vocabSize: Int, @Since("1.5.0") override val docConcentration: Vector, @Since("1.5.0") override val topicConcentration: Double, private[spark] val iterationTimes: Array[Double], override protected[clustering] val gammaShape: Double = DistributedLDAModel.defaultGammaShape, private[spark] val checkpointFiles: Array[String] = Array.empty[String]) extends LDAModel { import LDA._ /** * Convert model to a local model. * The local model stores the inferred topics but not the topic distributions for training * documents. */ @Since("1.3.0") def toLocal: LocalLDAModel = new LocalLDAModel(topicsMatrix, docConcentration, topicConcentration, gammaShape) /** * Inferred topics, where each topic is represented by a distribution over terms. * This is a matrix of size vocabSize x k, where each column is a topic. * No guarantees are given about the ordering of the topics. * * WARNING: This matrix is collected from an RDD. Beware memory usage when vocabSize, k are large. */ @Since("1.3.0") override lazy val topicsMatrix: Matrix = { // Collect row-major topics val termTopicCounts: Array[(Int, TopicCounts)] = graph.vertices.filter(_._1 < 0).map { case (termIndex, cnts) => (index2term(termIndex), cnts) }.collect() // Convert to Matrix val brzTopics = BDM.zeros[Double](vocabSize, k) termTopicCounts.foreach { case (term, cnts) => var j = 0 while (j < k) { brzTopics(term, j) = cnts(j) j += 1 } } Matrices.fromBreeze(brzTopics) } @Since("1.3.0") override def describeTopics(maxTermsPerTopic: Int): Array[(Array[Int], Array[Double])] = { val numTopics = k // Note: N_k is not needed to find the top terms, but it is needed to normalize weights // to a distribution over terms. val N_k: TopicCounts = globalTopicTotals val topicsInQueues: Array[BoundedPriorityQueue[(Double, Int)]] = graph.vertices.filter(isTermVertex) .mapPartitions { termVertices => // For this partition, collect the most common terms for each topic in queues: // queues(topic) = queue of (term weight, term index). // Term weights are N_{wk} / N_k. val queues = Array.fill(numTopics)(new BoundedPriorityQueue[(Double, Int)](maxTermsPerTopic)) for ((termId, n_wk) <- termVertices) { var topic = 0 while (topic < numTopics) { queues(topic) += (n_wk(topic) / N_k(topic) -> index2term(termId.toInt)) topic += 1 } } Iterator(queues) }.reduce { (q1, q2) => q1.zip(q2).foreach { case (a, b) => a ++= b} q1 } topicsInQueues.map { q => val (termWeights, terms) = q.toArray.sortBy(-_._1).unzip (terms, termWeights) } } /** * Return the top documents for each topic * * @param maxDocumentsPerTopic Maximum number of documents to collect for each topic. * @return Array over topics. Each element represent as a pair of matching arrays: * (IDs for the documents, weights of the topic in these documents). * For each topic, documents are sorted in order of decreasing topic weights. */ @Since("1.5.0") def topDocumentsPerTopic(maxDocumentsPerTopic: Int): Array[(Array[Long], Array[Double])] = { val numTopics = k val topicsInQueues: Array[BoundedPriorityQueue[(Double, Long)]] = topicDistributions.mapPartitions { docVertices => // For this partition, collect the most common docs for each topic in queues: // queues(topic) = queue of (doc topic, doc ID). val queues = Array.fill(numTopics)(new BoundedPriorityQueue[(Double, Long)](maxDocumentsPerTopic)) for ((docId, docTopics) <- docVertices) { var topic = 0 while (topic < numTopics) { queues(topic) += (docTopics(topic) -> docId) topic += 1 } } Iterator(queues) }.treeReduce { (q1, q2) => q1.zip(q2).foreach { case (a, b) => a ++= b } q1 } topicsInQueues.map { q => val (docTopics, docs) = q.toArray.sortBy(-_._1).unzip (docs, docTopics) } } /** * Return the top topic for each (doc, term) pair. I.e., for each document, what is the most * likely topic generating each term? * * @return RDD of (doc ID, assignment of top topic index for each term), * where the assignment is specified via a pair of zippable arrays * (term indices, topic indices). Note that terms will be omitted if not present in * the document. */ @Since("1.5.0") lazy val topicAssignments: RDD[(Long, Array[Int], Array[Int])] = { // For reference, compare the below code with the core part of EMLDAOptimizer.next(). val eta = topicConcentration val W = vocabSize val alpha = docConcentration(0) val N_k = globalTopicTotals val sendMsg: EdgeContext[TopicCounts, TokenCount, (Array[Int], Array[Int])] => Unit = (edgeContext) => { // E-STEP: Compute gamma_{wjk} (smoothed topic distributions). val scaledTopicDistribution: TopicCounts = computePTopic(edgeContext.srcAttr, edgeContext.dstAttr, N_k, W, eta, alpha) // For this (doc j, term w), send top topic k to doc vertex. val topTopic: Int = argmax(scaledTopicDistribution) val term: Int = index2term(edgeContext.dstId) edgeContext.sendToSrc((Array(term), Array(topTopic))) } val mergeMsg: ((Array[Int], Array[Int]), (Array[Int], Array[Int])) => (Array[Int], Array[Int]) = (terms_topics0, terms_topics1) => { (terms_topics0._1 ++ terms_topics1._1, terms_topics0._2 ++ terms_topics1._2) } // M-STEP: Aggregation computes new N_{kj}, N_{wk} counts. val perDocAssignments = graph.aggregateMessages[(Array[Int], Array[Int])](sendMsg, mergeMsg).filter(isDocumentVertex) perDocAssignments.map { case (docID: Long, (terms: Array[Int], topics: Array[Int])) => // TODO: Avoid zip, which is inefficient. val (sortedTerms, sortedTopics) = terms.zip(topics).sortBy(_._1).unzip (docID, sortedTerms, sortedTopics) } } /** Java-friendly version of [[topicAssignments]] */ @Since("1.5.0") lazy val javaTopicAssignments: JavaRDD[(java.lang.Long, Array[Int], Array[Int])] = { topicAssignments.asInstanceOf[RDD[(java.lang.Long, Array[Int], Array[Int])]].toJavaRDD() } // TODO // override def logLikelihood(documents: RDD[(Long, Vector)]): Double = ??? /** * Log likelihood of the observed tokens in the training set, * given the current parameter estimates: * log P(docs | topics, topic distributions for docs, alpha, eta) * * Note: * - This excludes the prior; for that, use [[logPrior]]. * - Even with [[logPrior]], this is NOT the same as the data log likelihood given the * hyperparameters. */ @Since("1.3.0") lazy val logLikelihood: Double = { // TODO: generalize this for asymmetric (non-scalar) alpha val alpha = this.docConcentration(0) // To avoid closure capture of enclosing object val eta = this.topicConcentration assert(eta > 1.0) assert(alpha > 1.0) val N_k = globalTopicTotals val smoothed_N_k: TopicCounts = N_k + (vocabSize * (eta - 1.0)) // Edges: Compute token log probability from phi_{wk}, theta_{kj}. val sendMsg: EdgeContext[TopicCounts, TokenCount, Double] => Unit = (edgeContext) => { val N_wj = edgeContext.attr val smoothed_N_wk: TopicCounts = edgeContext.dstAttr + (eta - 1.0) val smoothed_N_kj: TopicCounts = edgeContext.srcAttr + (alpha - 1.0) val phi_wk: TopicCounts = smoothed_N_wk /:/ smoothed_N_k val theta_kj: TopicCounts = normalize(smoothed_N_kj, 1.0) val tokenLogLikelihood = N_wj * math.log(phi_wk.dot(theta_kj)) edgeContext.sendToDst(tokenLogLikelihood) } graph.aggregateMessages[Double](sendMsg, _ + _) .map(_._2).fold(0.0)(_ + _) } /** * Log probability of the current parameter estimate: * log P(topics, topic distributions for docs | alpha, eta) */ @Since("1.3.0") lazy val logPrior: Double = { // TODO: generalize this for asymmetric (non-scalar) alpha val alpha = this.docConcentration(0) // To avoid closure capture of enclosing object val eta = this.topicConcentration // Term vertices: Compute phi_{wk}. Use to compute prior log probability. // Doc vertex: Compute theta_{kj}. Use to compute prior log probability. val N_k = globalTopicTotals val smoothed_N_k: TopicCounts = N_k + (vocabSize * (eta - 1.0)) val seqOp: (Double, (VertexId, TopicCounts)) => Double = { case (sumPrior: Double, vertex: (VertexId, TopicCounts)) => if (isTermVertex(vertex)) { val N_wk = vertex._2 val smoothed_N_wk: TopicCounts = N_wk + (eta - 1.0) val phi_wk: TopicCounts = smoothed_N_wk /:/ smoothed_N_k sumPrior + (eta - 1.0) * sum(phi_wk.map(math.log)) } else { val N_kj = vertex._2 val smoothed_N_kj: TopicCounts = N_kj + (alpha - 1.0) val theta_kj: TopicCounts = normalize(smoothed_N_kj, 1.0) sumPrior + (alpha - 1.0) * sum(theta_kj.map(math.log)) } } graph.vertices.aggregate(0.0)(seqOp, _ + _) } /** * For each document in the training set, return the distribution over topics for that document * ("theta_doc"). * * @return RDD of (document ID, topic distribution) pairs */ @Since("1.3.0") def topicDistributions: RDD[(Long, Vector)] = { graph.vertices.filter(LDA.isDocumentVertex).map { case (docID, topicCounts) => (docID, Vectors.fromBreeze(normalize(topicCounts, 1.0))) } } /** * Java-friendly version of [[topicDistributions]] */ @Since("1.4.1") def javaTopicDistributions: JavaPairRDD[java.lang.Long, Vector] = { JavaPairRDD.fromRDD(topicDistributions.asInstanceOf[RDD[(java.lang.Long, Vector)]]) } /** * For each document, return the top k weighted topics for that document and their weights. * @return RDD of (doc ID, topic indices, topic weights) */ @Since("1.5.0") def topTopicsPerDocument(k: Int): RDD[(Long, Array[Int], Array[Double])] = { graph.vertices.filter(LDA.isDocumentVertex).map { case (docID, topicCounts) => val topIndices = argtopk(topicCounts, k) val sumCounts = sum(topicCounts) val weights = if (sumCounts != 0) { topicCounts(topIndices).toArray.map(_ / sumCounts) } else { topicCounts(topIndices).toArray } (docID, topIndices.toArray, weights) } } /** * Java-friendly version of [[topTopicsPerDocument]] */ @Since("1.5.0") def javaTopTopicsPerDocument(k: Int): JavaRDD[(java.lang.Long, Array[Int], Array[Double])] = { val topics = topTopicsPerDocument(k) topics.asInstanceOf[RDD[(java.lang.Long, Array[Int], Array[Double])]].toJavaRDD() } // TODO: // override def topicDistributions(documents: RDD[(Long, Vector)]): RDD[(Long, Vector)] = ??? @Since("1.5.0") override def save(sc: SparkContext, path: String): Unit = { // Note: This intentionally does not save checkpointFiles. DistributedLDAModel.SaveLoadV1_0.save( sc, path, graph, globalTopicTotals, k, vocabSize, docConcentration, topicConcentration, iterationTimes, gammaShape) } } /** * Distributed model fitted by [[LDA]]. * This type of model is currently only produced by Expectation-Maximization (EM). * * This model stores the inferred topics, the full training dataset, and the topic distribution * for each training document. */ @Since("1.5.0") object DistributedLDAModel extends Loader[DistributedLDAModel] { /** * The [[DistributedLDAModel]] constructor's default arguments assume gammaShape = 100 * to ensure equivalence in LDAModel.toLocal conversion. */ private[clustering] val defaultGammaShape: Double = 100 private object SaveLoadV1_0 { val thisFormatVersion = "1.0" val thisClassName = "org.apache.spark.mllib.clustering.DistributedLDAModel" // Store globalTopicTotals as a Vector. case class Data(globalTopicTotals: Vector) // Store each term and document vertex with an id and the topicWeights. case class VertexData(id: Long, topicWeights: Vector) // Store each edge with the source id, destination id and tokenCounts. case class EdgeData(srcId: Long, dstId: Long, tokenCounts: Double) def save( sc: SparkContext, path: String, graph: Graph[LDA.TopicCounts, LDA.TokenCount], globalTopicTotals: LDA.TopicCounts, k: Int, vocabSize: Int, docConcentration: Vector, topicConcentration: Double, iterationTimes: Array[Double], gammaShape: Double): Unit = { val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val metadata = compact(render (("class" -> thisClassName) ~ ("version" -> thisFormatVersion) ~ ("k" -> k) ~ ("vocabSize" -> vocabSize) ~ ("docConcentration" -> docConcentration.toArray.toSeq) ~ ("topicConcentration" -> topicConcentration) ~ ("iterationTimes" -> iterationTimes.toSeq) ~ ("gammaShape" -> gammaShape))) sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path)) val newPath = new Path(Loader.dataPath(path), "globalTopicTotals").toUri.toString spark.createDataFrame(Seq(Data(Vectors.fromBreeze(globalTopicTotals)))).write.parquet(newPath) val verticesPath = new Path(Loader.dataPath(path), "topicCounts").toUri.toString spark.createDataFrame(graph.vertices.map { case (ind, vertex) => VertexData(ind, Vectors.fromBreeze(vertex)) }).write.parquet(verticesPath) val edgesPath = new Path(Loader.dataPath(path), "tokenCounts").toUri.toString spark.createDataFrame(graph.edges.map { case Edge(srcId, dstId, prop) => EdgeData(srcId, dstId, prop) }).write.parquet(edgesPath) } def load( sc: SparkContext, path: String, vocabSize: Int, docConcentration: Vector, topicConcentration: Double, iterationTimes: Array[Double], gammaShape: Double): DistributedLDAModel = { val dataPath = new Path(Loader.dataPath(path), "globalTopicTotals").toUri.toString val vertexDataPath = new Path(Loader.dataPath(path), "topicCounts").toUri.toString val edgeDataPath = new Path(Loader.dataPath(path), "tokenCounts").toUri.toString val spark = SparkSession.builder().sparkContext(sc).getOrCreate() val dataFrame = spark.read.parquet(dataPath) val vertexDataFrame = spark.read.parquet(vertexDataPath) val edgeDataFrame = spark.read.parquet(edgeDataPath) Loader.checkSchema[Data](dataFrame.schema) Loader.checkSchema[VertexData](vertexDataFrame.schema) Loader.checkSchema[EdgeData](edgeDataFrame.schema) val globalTopicTotals: LDA.TopicCounts = dataFrame.first().getAs[Vector](0).asBreeze.toDenseVector val vertices: RDD[(VertexId, LDA.TopicCounts)] = vertexDataFrame.rdd.map { case Row(ind: Long, vec: Vector) => (ind, vec.asBreeze.toDenseVector) } val edges: RDD[Edge[LDA.TokenCount]] = edgeDataFrame.rdd.map { case Row(srcId: Long, dstId: Long, prop: Double) => Edge(srcId, dstId, prop) } val graph: Graph[LDA.TopicCounts, LDA.TokenCount] = Graph(vertices, edges) new DistributedLDAModel(graph, globalTopicTotals, globalTopicTotals.length, vocabSize, docConcentration, topicConcentration, iterationTimes, gammaShape) } } @Since("1.5.0") override def load(sc: SparkContext, path: String): DistributedLDAModel = { val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path) implicit val formats = DefaultFormats val expectedK = (metadata \\ "k").extract[Int] val vocabSize = (metadata \\ "vocabSize").extract[Int] val docConcentration = Vectors.dense((metadata \\ "docConcentration").extract[Seq[Double]].toArray) val topicConcentration = (metadata \\ "topicConcentration").extract[Double] val iterationTimes = (metadata \\ "iterationTimes").extract[Seq[Double]] val gammaShape = (metadata \\ "gammaShape").extract[Double] val classNameV1_0 = SaveLoadV1_0.thisClassName val model = (loadedClassName, loadedVersion) match { case (className, "1.0") if className == classNameV1_0 => DistributedLDAModel.SaveLoadV1_0.load(sc, path, vocabSize, docConcentration, topicConcentration, iterationTimes.toArray, gammaShape) case _ => throw new Exception( s"DistributedLDAModel.load did not recognize model with (className, format version):" + s"($loadedClassName, $loadedVersion). Supported: ($classNameV1_0, 1.0)") } require(model.vocabSize == vocabSize, s"DistributedLDAModel requires $vocabSize vocabSize, got ${model.vocabSize} vocabSize") require(model.docConcentration == docConcentration, s"DistributedLDAModel requires $docConcentration docConcentration, " + s"got ${model.docConcentration} docConcentration") require(model.topicConcentration == topicConcentration, s"DistributedLDAModel requires $topicConcentration docConcentration, " + s"got ${model.topicConcentration} docConcentration") require(expectedK == model.k, s"DistributedLDAModel requires $expectedK topics, got ${model.k} topics") model } }
ueshin/apache-spark
mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala
Scala
apache-2.0
38,814
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import org.scalatest.prop.Checkers import org.scalacheck._ import Arbitrary._ import Prop._ import Integer.MIN_VALUE import org.scalatest.exceptions.TestFailedException import org.scalatest.enablers.Length import org.scalatest.enablers.Size class ShouldLengthSpec extends Spec with Matchers with Checkers with ReturnsNormallyThrowsAssertion { // Checking for a specific length object `The 'have length (Int)' syntax` { object `on String` { def `should do nothing if string length matches specified length` { "hi" should have length (2) check((s: String) => returnsNormally(s should have length (s.length))) } def `should do nothing if string length does not match and used with should not` { "hi" should not { have length (3) } "hi" should not have length (3) check((s: String, i: Int) => i != s.length ==> returnsNormally(s should not { have length (i) })) check((s: String, i: Int) => i != s.length ==> returnsNormally(s should not have length (i))) } def `should do nothing when string length matches and used in a logical-and expression` { "hi" should (have length (2) and (have length (3 - 1))) "hi" should (have length (2) and have length (3 - 1)) } def `should do nothing when string length matches and used in a logical-or expression` { "hi" should { have length (77) or (have length (3 - 1)) } "hi" should (have length (77) or have length (3 - 1)) } def `should do nothing when string length doesn't match and used in a logical-and expression with not` { "hi" should (not (have length (5)) and not (have length (3))) "hi" should { not have length (5) and (not have length (3)) } "hi" should (not have length (5) and not have length (3)) } def `should do nothing when string length doesn't match and used in a logical-or expression with not` { "hi" should (not (have length (2)) or not (have length (3))) "hi" should ((not have length (2)) or (not have length (3))) "hi" should (not have length (2) or not have length (3)) } def `should throw TestFailedException if string length does not match specified length` { val caught1 = intercept[TestFailedException] { "hi" should have length (3) } assert(caught1.getMessage === "\\"hi\\" did not have length 3") check((s: String) => throwsTestFailedException(s should have length (s.length + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { "hi" should have length (-2) } assert(caught1.getMessage === "\\"hi\\" did not have length -2") check((s: String) => throwsTestFailedException(s should have length (if (s.length == 0) -1 else -s.length))) } def `should throw an assertion error when string length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { "hi" should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "\\"hi\\" did not have length 5") val caught2 = intercept[TestFailedException] { "hi" should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "\\"hi\\" did not have length 5") val caught3 = intercept[TestFailedException] { "hi" should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "\\"hi\\" did not have length 5") } def `should throw an assertion error when string length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { "hi" should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "\\"hi\\" did not have length 55, and \\"hi\\" did not have length 22") val caught2 = intercept[TestFailedException] { "hi" should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "\\"hi\\" did not have length 55, and \\"hi\\" did not have length 22") val caught3 = intercept[TestFailedException] { "hi" should (have length (55) or have length (22)) } assert(caught3.getMessage === "\\"hi\\" did not have length 55, and \\"hi\\" did not have length 22") } def `should throw an assertion error when string length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { "hi" should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "\\"hi\\" did not have length 3, but \\"hi\\" had length 2") val caught2 = intercept[TestFailedException] { "hi" should { not have length (3) and (not have length (2)) } } assert(caught2.getMessage === "\\"hi\\" did not have length 3, but \\"hi\\" had length 2") val caught3 = intercept[TestFailedException] { "hi" should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "\\"hi\\" did not have length 3, but \\"hi\\" had length 2") } def `should throw an assertion error when string length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { "hi" should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "\\"hi\\" had length 2, and \\"hi\\" had length 2") val caught2 = intercept[TestFailedException] { "hi" should { not have length (2) or (not have length (2)) } } assert(caught2.getMessage === "\\"hi\\" had length 2, and \\"hi\\" had length 2") val caught3 = intercept[TestFailedException] { "hi" should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "\\"hi\\" had length 2, and \\"hi\\" had length 2") } def `should give good error messages when more than two clauses are used with logical connectors` { val caught1 = intercept[TestFailedException] { "hi" should (not have length (1) and not have length (3) and not have length (2)) } assert(caught1.getMessage === "\\"hi\\" did not have length 1, and \\"hi\\" did not have length 3, but \\"hi\\" had length 2") val caught2 = intercept[TestFailedException] { "hi" should (not have length (2) or not equal ("hi") or equal ("frog")) } assert(caught2.getMessage === "\\"hi\\" had length 2, and \\"hi\\" equaled \\"hi\\", and \\"[hi]\\" did not equal \\"[frog]\\"") } } object `on Array` { def `should do nothing if array length matches specified length` { Array(1, 2) should have length (2) // check((arr: Array[Int]) => returnsNormally(arr should have length (arr.length))) } def `should do nothing if array length does not match and used with should not` { Array(1, 2) should not { have length (3) } Array(1, 2) should not have length (3) // check((arr: Array[Int], i: Int) => i != arr.length ==> returnsNormally(arr should not { have length (i) })) // check((arr: Array[Int], i: Int) => i != arr.length ==> returnsNormally(arr should not have length (i))) } def `should do nothing when array length matches and used in a logical-and expression` { Array(1, 2) should { have length (2) and (have length (3 - 1)) } Array(1, 2) should (have length (2) and have length (3 - 1)) } def `should do nothing when array length matches and used in a logical-or expression` { Array(1, 2) should { have length (77) or (have length (3 - 1)) } Array(1, 2) should (have length (77) or have length (3 - 1)) } def `should do nothing when array length doesn't match and used in a logical-and expression with not` { Array(1, 2) should { not { have length (5) } and not { have length (3) }} Array(1, 2) should { not have length (5) and (not have length (3)) } Array(1, 2) should (not have length (5) and not have length (3)) } def `should do nothing when array length doesn't match and used in a logical-or expression with not` { Array(1, 2) should { not { have length (2) } or not { have length (3) }} Array(1, 2) should { not have length (2) or (not have length (3)) } Array(1, 2) should (not have length (5) and not have length (3)) } def `should throw TestFailedException if array length does not match specified length` { val caught1 = intercept[TestFailedException] { Array(1, 2) should have length (3) } assert(caught1.getMessage.endsWith("Array(1, 2) did not have length 3")) // check((arr: Array[String]) => throwsTestFailedException(arr should have length (arr.length + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { Array(1, 2) should have length (-2) } assert(caught1.getMessage.endsWith("Array(1, 2) did not have length -2")) // check((arr: Array[Int]) => throwsTestFailedException(arr should have length (if (arr.length == 0) -1 else -arr.length))) } def `should throw an assertion error when array length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { Array(1, 2) should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "Array(1, 2) did not have length 5") val caught2 = intercept[TestFailedException] { Array(1, 2) should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "Array(1, 2) did not have length 5") val caught3 = intercept[TestFailedException] { Array(1, 2) should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "Array(1, 2) did not have length 5") } def `should throw an assertion error when array length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { Array(1, 2) should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "Array(1, 2) did not have length 55, and Array(1, 2) did not have length 22") val caught2 = intercept[TestFailedException] { Array(1, 2) should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "Array(1, 2) did not have length 55, and Array(1, 2) did not have length 22") val caught3 = intercept[TestFailedException] { Array(1, 2) should (have length (55) or have length (22)) } assert(caught3.getMessage === "Array(1, 2) did not have length 55, and Array(1, 2) did not have length 22") } def `should throw an assertion error when array length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { Array(1, 2) should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "Array(1, 2) did not have length 3, but Array(1, 2) had length 2") val caught2 = intercept[TestFailedException] { Array(1, 2) should { not have length (3) and (not have length (2)) } } assert(caught2.getMessage === "Array(1, 2) did not have length 3, but Array(1, 2) had length 2") val caught3 = intercept[TestFailedException] { Array(1, 2) should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "Array(1, 2) did not have length 3, but Array(1, 2) had length 2") } def `should throw an assertion error when array length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { Array(1, 2) should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "Array(1, 2) had length 2, and Array(1, 2) had length 2") val caught2 = intercept[TestFailedException] { Array(1, 2) should { not have length (2) or (not have length (2)) } } assert(caught2.getMessage === "Array(1, 2) had length 2, and Array(1, 2) had length 2") val caught3 = intercept[TestFailedException] { Array(1, 2) should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "Array(1, 2) had length 2, and Array(1, 2) had length 2") } def `should work on parallel form` { Array(1, 2).par should have length (2) } } object `on scala.List` { def `should do nothing if list length matches specified length` { List(1, 2) should have length (2) check((lst: List[Int]) => returnsNormally(lst should have length (lst.length))) } def `should do nothing if list length does not match and used with should not` { List(1, 2) should not { have length (3) } List(1, 2) should not have length (3) check((lst: List[Int], i: Int) => i != lst.length ==> returnsNormally(lst should not { have length (i) })) check((lst: List[Int], i: Int) => i != lst.length ==> returnsNormally(lst should not have length (i))) } def `should do nothing when list length matches and used in a logical-and expression` { List(1, 2) should { have length (2) and (have length (3 - 1)) } List(1, 2) should (have length (2) and have length (3 - 1)) } def `should do nothing when list length matches and used in a logical-or expression` { List(1, 2) should { have length (77) or (have length (3 - 1)) } List(1, 2) should (have length (77) or have length (3 - 1)) } def `should do nothing when list length doesn't match and used in a logical-and expression with not` { List(1, 2) should { not { have length (5) } and not { have length (3) }} List(1, 2) should { not have length (5) and (not have length (3)) } } def `should do nothing when list length doesn't match and used in a logical-or expression with not` { List(1, 2) should { not { have length (2) } or not { have length (3) }} List(1, 2) should { not have length (2) or (not have length (3)) } List(1, 2) should (not have length (5) and not have length (3)) } def `should throw TestFailedException if list length does not match specified length` { val caught1 = intercept[TestFailedException] { List(1, 2) should have length (3) } assert(caught1.getMessage === "List(1, 2) did not have length 3") check((lst: List[String]) => throwsTestFailedException(lst should have length (lst.length + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { List(1, 2) should have length (-2) } assert(caught1.getMessage === "List(1, 2) did not have length -2") check((lst: List[Int]) => throwsTestFailedException(lst should have length (if (lst.length == 0) -1 else -lst.length))) } def `should throw an assertion error when list length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { List(1, 2) should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "List(1, 2) did not have length 5") val caught2 = intercept[TestFailedException] { List(1, 2) should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "List(1, 2) did not have length 5") val caught3 = intercept[TestFailedException] { List(1, 2) should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "List(1, 2) did not have length 5") } def `should throw an assertion error when list length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { List(1, 2) should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "List(1, 2) did not have length 55, and List(1, 2) did not have length 22") val caught2 = intercept[TestFailedException] { List(1, 2) should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "List(1, 2) did not have length 55, and List(1, 2) did not have length 22") val caught3 = intercept[TestFailedException] { List(1, 2) should (have length (55) or have length (22)) } assert(caught3.getMessage === "List(1, 2) did not have length 55, and List(1, 2) did not have length 22") } def `should throw an assertion error when list length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { List(1, 2) should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "List(1, 2) did not have length 3, but List(1, 2) had length 2") val caught2 = intercept[TestFailedException] { List(1, 2) should { not have length (3) and (not have length (2)) } } assert(caught2.getMessage === "List(1, 2) did not have length 3, but List(1, 2) had length 2") val caught3 = intercept[TestFailedException] { List(1, 2) should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "List(1, 2) did not have length 3, but List(1, 2) had length 2") } def `should throw an assertion error when list length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { List(1, 2) should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "List(1, 2) had length 2, and List(1, 2) had length 2") val caught2 = intercept[TestFailedException] { List(1, 2) should { not have length (2) or (not have length (2)) } } assert(caught2.getMessage === "List(1, 2) had length 2, and List(1, 2) had length 2") val caught3 = intercept[TestFailedException] { List(1, 2) should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "List(1, 2) had length 2, and List(1, 2) had length 2") } def `should work on parallel form` { List(1, 2).par should have length (2) } } object `on java.util.List` { val javaList: java.util.List[Int] = new java.util.ArrayList javaList.add(1) javaList.add(2) def `should do nothing if list length matches specified length` { javaList should have length (2) // check((lst: java.util.List[Int]) => returnsNormally(lst should have length (lst.length))) } def `should do nothing if list length does not match and used with should not` { javaList should not { have length (3) } javaList should not have length (3) // check((lst: List[Int], i: Int) => i != lst.length ==> returnsNormally(lst should not { have length (i) })) } def `should do nothing when list length matches and used in a logical-and expression` { javaList should { have length (2) and (have length (3 - 1)) } javaList should (have length (2) and have length (3 - 1)) } def `should do nothing when list length matches and used in a logical-or expression` { javaList should { have length (77) or (have length (3 - 1)) } javaList should (have length (77) or have length (3 - 1)) } def `should do nothing when list length doesn't match and used in a logical-and expression with not` { javaList should { not { have length (5) } and not { have length (3) }} javaList should (not have length (5) and not have length (3)) } def `should do nothing when list length doesn't match and used in a logical-or expression with not` { javaList should { not { have length (2) } or not { have length (3) }} javaList should (not have length (2) or not have length (3)) } def `should throw TestFailedException if list length does not match specified length` { val caught1 = intercept[TestFailedException] { javaList should have length (3) } assert(caught1.getMessage === "[1, 2] did not have length 3") // check((lst: List[String]) => throwsTestFailedException(lst should have length (lst.length + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { javaList should have length (-2) } assert(caught1.getMessage === "[1, 2] did not have length -2") // check((lst: List[Int]) => throwsTestFailedException(lst should have length (if (lst.length == 0) -1 else -lst.length))) } def `should throw an assertion error when list length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { javaList should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "[1, 2] did not have length 5") val caught2 = intercept[TestFailedException] { javaList should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "[1, 2] did not have length 5") val caught3 = intercept[TestFailedException] { javaList should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "[1, 2] did not have length 5") } def `should throw an assertion error when list length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { javaList should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "[1, 2] did not have length 55, and [1, 2] did not have length 22") val caught2 = intercept[TestFailedException] { javaList should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "[1, 2] did not have length 55, and [1, 2] did not have length 22") val caught3 = intercept[TestFailedException] { javaList should (have length (55) or have length (22)) } assert(caught3.getMessage === "[1, 2] did not have length 55, and [1, 2] did not have length 22") } def `should throw an assertion error when list length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { javaList should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "[1, 2] did not have length 3, but [1, 2] had length 2") val caught2 = intercept[TestFailedException] { javaList should { not have length (3) and (not have length (2)) } } assert(caught2.getMessage === "[1, 2] did not have length 3, but [1, 2] had length 2") val caught3 = intercept[TestFailedException] { javaList should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "[1, 2] did not have length 3, but [1, 2] had length 2") } def `should throw an assertion error when list length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { javaList should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "[1, 2] had length 2, and [1, 2] had length 2") val caught2 = intercept[TestFailedException] { javaList should { not have length (2) or (not have length (2)) } } assert(caught2.getMessage === "[1, 2] had length 2, and [1, 2] had length 2") val caught3 = intercept[TestFailedException] { javaList should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "[1, 2] had length 2, and [1, 2] had length 2") } } // I repeat these with copy and paste, becuase I need to test that each static structural type works, and // that makes it hard to pass them to a common "behaves like" method object `on an arbitrary object that has an empty-paren Int length method` { class Lengthy(len: Int) { def length(): Int = len override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.length() } def `should do nothing if object length matches specified length` { obj should have length (2) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has a parameterless Int length method` { class Lengthy(len: Int) { def length: Int = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.length } def `should do nothing if object length matches specified length` { obj should have length (2) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has a Int length field` { class Lengthy(len: Int) { val length: Int = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.length } def `should do nothing if object length matches specified length` { obj should have length (2) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has an empty-paren Int getLength method` { class Lengthy(len: Int) { def getLength(): Int = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.getLength() } def `should do nothing if object length matches specified length` { obj should have length (2) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has a parameterless Int getLength method` { class Lengthy(len: Int) { def getLength: Int = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.getLength } def `should do nothing if object length matches specified length` { obj should have length (2) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has an Int getLength field` { class Lengthy(len: Int) { val getLength: Int = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.getLength } def `should do nothing if object length matches specified length` { obj should have length (2) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has an empty-paren Long length method` { class Lengthy(len: Long) { def length(): Long = len override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.length() } def `should do nothing if object length matches specified length` { obj should have length (2) obj should have length (2L) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) check((len: Long) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not { have length (3L) } obj should not have length (3) obj should not have length (3L) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should { have length (2L) and (have length (3 - 1)) } } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (2L)) } obj should { have length (77L) or (have length (2)) } } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has a parameterless Long length method` { class Lengthy(len: Long) { def length: Long = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.length } def `should do nothing if object length matches specified length` { obj should have length (2) obj should have length (2L) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) check((len: Long) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not { have length (3L) } obj should not have length (3) obj should not have length (3L) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has a Long length field` { class Lengthy(len: Long) { val length: Long = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.length } def `should do nothing if object length matches specified length` { obj should have length (2) obj should have length (2L) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) check((len: Long) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has an empty-paren Long getLength method` { class Lengthy(len: Long) { def getLength(): Long = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.getLength() } def `should do nothing if object length matches specified length` { obj should have length (2) obj should have length (2L) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) check((len: Long) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not { have length (3L) } obj should not have length (3) obj should not have length (3L) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has a parameterless Long getLength method` { class Lengthy(len: Long) { def getLength: Long = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.getLength } def `should do nothing if object length matches specified length` { obj should have length (2) obj should have length (2L) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) check((len: Long) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not { have length (3L) } obj should not have length (3) obj should not have length (3L) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has a Long getLength field` { class Lengthy(len: Long) { val getLength: Long = len // The only difference between the previous is the structure of this member override def toString = "lengthy" } val obj = new Lengthy(2) implicit val lengthOfLengthy = new Length[Lengthy] { def extentOf(o: Lengthy): Long = o.getLength } def `should do nothing if object length matches specified length` { obj should have length (2) obj should have length (2L) check((len: Int) => returnsNormally(new Lengthy(len) should have length (len))) check((len: Long) => returnsNormally(new Lengthy(len) should have length (len))) } def `should do nothing if object length does not match and used with should not` { obj should not { have length (3) } obj should not { have length (3L) } obj should not have length (3) obj should not have length (3L) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not { have length (wrongLen) })) check((len: Int, wrongLen: Int) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) check((len: Long, wrongLen: Long) => len != wrongLen ==> returnsNormally(new Lengthy(len) should not have length (wrongLen))) } def `should do nothing when object length matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) } def `should do nothing when object length matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) } def `should do nothing when object length doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) } def `should do nothing when object length doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) } def `should throw TestFailedException if object length does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (len + 1))) } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") check((len: Int) => throwsTestFailedException(new Lengthy(len) should have length (if ((len == 0) || (len == MIN_VALUE)) -1 else -len))) } def `should throw an assertion error when object length doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") } def `should throw an assertion error when object length doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") } def `should throw an assertion error when object length matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") } def `should throw an assertion error when object length matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") } } object `on an arbitrary object that has both parameterless Int length and parameterless Int size methods` { class Lengthy(len: Int) { def length: Int = len def size: Int = len override def toString = "lengthy" } val obj = new Lengthy(2) implicit val extentOfLengthy = new Length[Lengthy] with Size[Lengthy] { def extentOf(o: Lengthy): Long = o.length } def `should do nothing if object length or size matches specified length` { obj should have length (2) obj should have size (2) } def `should do nothing if object length or size does not match and used with should not` { obj should not { have length (3) } obj should not have length (3) obj should not { have size (3) } obj should not have size (3) } def `should do nothing when object length or size matches and used in a logical-and expression` { obj should { have length (2) and (have length (3 - 1)) } obj should (have length (2) and have length (3 - 1)) obj should { have size (2) and (have size (3 - 1)) } obj should (have size (2) and have size (3 - 1)) } def `should do nothing when object length or size matches and used in a logical-or expression` { obj should { have length (77) or (have length (3 - 1)) } obj should (have length (77) or have length (3 - 1)) obj should { have size (77) or (have size (3 - 1)) } obj should (have size (77) or have size (3 - 1)) } def `should do nothing when object length or size doesn't match and used in a logical-and expression with not` { obj should { not { have length (5) } and not { have length (3) }} obj should (not have length (5) and not have length (3)) obj should { not { have size (5) } and not { have size (3) }} obj should (not have size (5) and not have size (3)) } def `should do nothing when object length or size doesn't match and used in a logical-or expression with not` { obj should { not { have length (2) } or not { have length (3) }} obj should (not have length (2) or not have length (3)) obj should { not { have size (2) } or not { have size (3) }} obj should (not have size (2) or not have size (3)) } def `should throw TestFailedException if object length or size does not match specified length` { val caught1 = intercept[TestFailedException] { obj should have length (3) } assert(caught1.getMessage === "lengthy did not have length 3") val caught2 = intercept[TestFailedException] { obj should have size (3) } assert(caught2.getMessage === "lengthy did not have size 3") } def `should throw TestFailedException with normal error message if specified length is negative` { val caught1 = intercept[TestFailedException] { obj should have length (-2) } assert(caught1.getMessage === "lengthy did not have length -2") val caught2 = intercept[TestFailedException] { obj should have size (-2) } assert(caught2.getMessage === "lengthy did not have size -2") } def `should throw an assertion error when object length or size doesn't match and used in a logical-and expression` { val caught1 = intercept[TestFailedException] { obj should { have length (5) and (have length (2 - 1)) } } assert(caught1.getMessage === "lengthy did not have length 5") val caught2 = intercept[TestFailedException] { obj should ((have length (5)) and (have length (2 - 1))) } assert(caught2.getMessage === "lengthy did not have length 5") val caught3 = intercept[TestFailedException] { obj should (have length (5) and have length (2 - 1)) } assert(caught3.getMessage === "lengthy did not have length 5") val caught1b = intercept[TestFailedException] { obj should { have size (5) and (have size (2 - 1)) } } assert(caught1b.getMessage === "lengthy did not have size 5") val caughtb2 = intercept[TestFailedException] { obj should ((have size (5)) and (have size (2 - 1))) } assert(caughtb2.getMessage === "lengthy did not have size 5") val caughtb3 = intercept[TestFailedException] { obj should (have size (5) and have size (2 - 1)) } assert(caughtb3.getMessage === "lengthy did not have size 5") } def `should throw an assertion error when object length or size doesn't match and used in a logical-or expression` { val caught1 = intercept[TestFailedException] { obj should { have length (55) or (have length (22)) } } assert(caught1.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught2 = intercept[TestFailedException] { obj should ((have length (55)) or (have length (22))) } assert(caught2.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught3 = intercept[TestFailedException] { obj should (have length (55) or have length (22)) } assert(caught3.getMessage === "lengthy did not have length 55, and lengthy did not have length 22") val caught1b = intercept[TestFailedException] { obj should { have size (55) or (have size (22)) } } assert(caught1b.getMessage === "lengthy did not have size 55, and lengthy did not have size 22") val caught2b = intercept[TestFailedException] { obj should ((have size (55)) or (have size (22))) } assert(caught2b.getMessage === "lengthy did not have size 55, and lengthy did not have size 22") val caught3b = intercept[TestFailedException] { obj should (have size (55) or have size (22)) } assert(caught3b.getMessage === "lengthy did not have size 55, and lengthy did not have size 22") } def `should throw an assertion error when object length or size matches and used in a logical-and expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (3) } and not { have length (2) }} } assert(caught1.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (3) } and { not have length (2) }} } assert(caught2.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (3) and not have length (2)) } assert(caught3.getMessage === "lengthy did not have length 3, but lengthy had length 2") val caught1b = intercept[TestFailedException] { obj should { not { have size (3) } and not { have size (2) }} } assert(caught1b.getMessage === "lengthy did not have size 3, but lengthy had size 2") val caught2b = intercept[TestFailedException] { obj should { { not have size (3) } and { not have size (2) }} } assert(caught2b.getMessage === "lengthy did not have size 3, but lengthy had size 2") val caught3b = intercept[TestFailedException] { obj should (not have size (3) and not have size (2)) } assert(caught3b.getMessage === "lengthy did not have size 3, but lengthy had size 2") } def `should throw an assertion error when object length or size matches and used in a logical-or expression with not` { val caught1 = intercept[TestFailedException] { obj should { not { have length (2) } or not { have length (2) }} } assert(caught1.getMessage === "lengthy had length 2, and lengthy had length 2") val caught2 = intercept[TestFailedException] { obj should { { not have length (2) } or { not have length (2) }} } assert(caught2.getMessage === "lengthy had length 2, and lengthy had length 2") val caught3 = intercept[TestFailedException] { obj should (not have length (2) or not have length (2)) } assert(caught3.getMessage === "lengthy had length 2, and lengthy had length 2") val caught1b = intercept[TestFailedException] { obj should { not { have size (2) } or not { have size (2) }} } assert(caught1b.getMessage === "lengthy had size 2, and lengthy had size 2") val caught2b = intercept[TestFailedException] { obj should { { not have size (2) } or { not have size (2) }} } assert(caught2b.getMessage === "lengthy had size 2, and lengthy had size 2") val caught3b = intercept[TestFailedException] { obj should (not have size (2) or not have size (2)) } assert(caught3b.getMessage === "lengthy had size 2, and lengthy had size 2") } } def `should allow multiple implicits of the same type class (such as Length) to be resolve so long as the type param is not ambiguous` { import java.net.DatagramPacket val dp = new DatagramPacket(Array(0x0, 0x1, 0x2, 0x3), 4) dp.getLength implicit val lengthOfDatagramPacket = new Length[DatagramPacket] { def extentOf(dp: DatagramPacket): Long = dp.getLength } dp should have length 4 import java.awt.image.DataBufferByte val db = new DataBufferByte(4) implicit val sizeOfDataBufferByte = new Length[DataBufferByte] { def extentOf(db: DataBufferByte): Long = db.getSize } db should have length 4 } } }
svn2github/scalatest
src/test/scala/org/scalatest/ShouldLengthSpec.scala
Scala
apache-2.0
110,949
package org.langmeta.tests package io import org.langmeta._ import org.langmeta.internal.io._ import org.scalatest.FunSuite class IOSuite extends FunSuite { val buildSbt: AbsolutePath = RelativePath("build.sbt").toAbsolute test("PathIO.workingDirectory") { val obtained = PathIO.workingDirectory.toString // assuming we never run tests from root directory, check that the // returned value is not the default value "/" when running outside node. assert(obtained != "/") } test("FileIO.listFiles(Directory)") { val obtained = FileIO.listFiles(PathIO.workingDirectory) assert(obtained.contains(buildSbt)) assert(!obtained.contains(".")) } test("FileIO.listFiles(File)") { assert(FileIO.listFiles(buildSbt).isEmpty) } test("FileIO.listAllFilesRecursively") { val bin = PathIO.workingDirectory.resolve("bin") val obtained = FileIO.listAllFilesRecursively(bin) val scalafmt = bin.resolve("scalafmt") assert(obtained.contains(scalafmt)) } test("FileIO.readAllBytes") { val obtained = new String(FileIO.readAllBytes(buildSbt)) assert(obtained.contains("project")) } test("Input.File.slurp") { val obtained = new String(Input.File(buildSbt).chars) assert(obtained.contains("project")) } test("AbsolutePath(relpath)(customCwd)") { implicit val customWorkingDirectory = AbsolutePath.root val obtained = AbsolutePath("foo") assert(obtained == customWorkingDirectory.resolve("foo")) } }
DavidDudson/scalameta
langmeta/tests/src/test/scala/org/langmeta/tests/io/IOSuite.scala
Scala
bsd-3-clause
1,491
package api.icalendar import org.scalatest.matchers.ShouldMatchers import java.net.URI import com.google.common.io.Resources.getResource import net.fortuna.ical4j.model.property._ import models.Event import org.joda.time.DateTime import org.scalatest.{BeforeAndAfter, FunSuite} import api.icalendar.ICalendar._ import models.mapping.Event$VEventMapping class ICalendarTest extends FunSuite with ShouldMatchers with BeforeAndAfter with Event$VEventMapping { var ical: String = _ before { val events: List[Event] = List( Event( uid = "0", title = "Event1", begin = new DateTime(), end = new DateTime(), location = "place1", description = "super java conf", tags = List("java") ), Event( uid = "1", title = "Event2", begin = new DateTime(), end = new DateTime(), location = "place2", description = "super scala conf", tags = List("scala") ) ) ical = buildCalendar(events) } test("should retireve empty list when feed is empty") { val vEvents = retrieveVEvents( getResource("api/icalendar/empty.ics").openStream() ) vEvents should be (Right(Nil)) } test("should retireve VEvent from icalendar source") { val vEvents = retrieveVEvents( getResource("api/icalendar/singleEvent.ics").openStream() ) vEvents.right.get should have size 1 vEvents.right.get should contain (new VEvent(getVEvent())) } test("should retrieve empty list when feed is invalid") { val vEvents = retrieveVEvents( getResource("api/icalendar/invalid.ics").openStream() ) vEvents.left.get.message should be ("Parsing error from ICalendar") } test("ical should be a valid iCal") { ical should startWith ("BEGIN:VCALENDAR") ical should include ("VERSION:2.0") ical should include ("PRODID:") } test("ical should have all events") { ical should include ("SUMMARY:Event1") ical should include ("SUMMARY:Event2") } test("event should have all properties") { val event = buildCalendar( List( Event( uid = "0", title = "Event1", begin = new DateTime( 2010, 01, 01, 12, 0, 0 ), end = new DateTime( 2010, 01, 01, 14, 0, 0 ), location = "place1", description = "super java conf", tags = List("java") )) ) event should include ( "DTSTART:20100101T120000" ) event should include ( "DTEND:20100101T140000" ) event should include ( "DESCRIPTION:super java conf" ) event should include ( "LOCATION:place1" ) event should include ( "UID:0" ) } ignore("should retrieve 2 valid events when an event is invalid") { val vEvents = retrieveVEvents( getResource("api/icalendar/twoValidOneInvalidEvent.ics").openStream() ) vEvents match { case Right(list) => list should have size (2) case Left(err) => fail(err.e) } //cause : new URI("marketing%[email protected]") } // TODO duplication voir VEventTest private def getVEvent(uid: Boolean = true): net.fortuna.ical4j.model.component.VEvent = { val vevent = new net.fortuna.ical4j.model.component.VEvent() if(uid) vevent.getProperties.add(new Uid("http://lacantine.org/events/reunion-d-etude-sur-le-projet-soho-de-la-ville-de-paris")) vevent.getProperties.add( new DtStart( new net.fortuna.ical4j.model.DateTime(123456L) ) ) vevent.getProperties.add( new DtEnd( new net.fortuna.ical4j.model.DateTime(456457L) ) ) vevent.getProperties.add(new Summary("title")) vevent.getProperties.add(new Description("description")) vevent.getProperties.add(new Location("location")) vevent.getProperties.add(new Url(new URI("url"))) vevent } }
mdia/OneCalendar
test/api/icalendar/ICalendarTest.scala
Scala
apache-2.0
4,136
package org.emailscript.mail import java.io.{File, FileOutputStream} import java.util import java.util.{Date, Properties} import javax.mail.Flags.Flag import javax.mail.{Folder => JavaMailFolder, _} import javax.mail.internet.{InternetAddress, MimeMessage} import javax.mail.search.{ComparisonTerm, ReceivedDateTerm} import com.sun.mail.imap.{IMAPFolder, IMAPMessage} import com.sun.mail.pop3.POP3Folder import org.emailscript.api._ import org.emailscript.helpers.{Configuration, LoggerFactory, Tags, Values, Yaml} /** * Basic mail handling utilities using javax.mail */ object MailUtils { val yaml = Yaml(Configuration.DataDir) val logger = LoggerFactory.getLogger(getClass) val MoveHeader = "Mailscript-Move" var dryRun: Boolean = false lazy val defaultPermissions = if (dryRun) JavaMailFolder.READ_ONLY else JavaMailFolder.READ_WRITE def sendMessage(account: EmailAccountBean, messageBean: EmailBean) = { logger.info(s"sending email to ${messageBean.getTo}") val session = Session.getInstance(toSmtpProperties(account)) val message = messageBean.toMessage(session) Transport.send(message, account.user, account.password) } def moveTo(toFolderName: String, m: MailMessageHelper) { if (dryRun) { logger.info(s"DRY RUN -- moving message from: ${m.from} subject: ${m.subject} to folder: $toFolderName") return } val fromFolder: JavaMailFolder = m.message.getFolder val store = fromFolder.getStore val toFolder = store.getFolder(toFolderName).asInstanceOf[IMAPFolder] if (!toFolder.exists()){ logger.warn(s"ignoring request to move message to folder that does not exist: $toFolderName") return } try { toFolder.open(JavaMailFolder.READ_WRITE) val newMessage = new MimeMessage(m.message.asInstanceOf[MimeMessage]) newMessage.removeHeader(MoveHeader) newMessage.addHeader(MoveHeader, toFolderName) val messageArray: Array[Message] = Array(newMessage) logger.info(s"moving mail from: ${m.from} subject: ${m.subject} to folder: $toFolderName") toFolder.appendMessages(messageArray) m.message.setFlag(Flag.DELETED, true) } catch { case e: Throwable => logger.warn(s"failed moving message to folder: $toFolderName", e) } closeFolder(toFolder, true) } def delete(permanent: Boolean, m: MailMessageHelper): Unit = { if (dryRun) { logger.info(s"DRY RUN -- deleting message from: ${m.from} subject: ${m.subject}") return } if (permanent) m.message.setFlag(Flag.DELETED, true) else moveTo(trashFolder(m.account), m) } def closeFolder(folder: IMAPFolder, expunge: Boolean = !dryRun): Unit = { try { if (folder != null && folder.isOpen) folder.close(expunge) } catch { case e: Throwable => //ignore } } def saveToFile(message: IMAPMessage, file : File) = { message.writeTo(new FileOutputStream(file)) } def getUID(folder: JavaMailFolder, m: Message): Long = { folder match { case imap: IMAPFolder => imap.getUID(m) case pop3: POP3Folder => pop3.getUID(m).toLong } } def isValidEmail(email: String): Boolean = { try { val emailAddr = new InternetAddress(email) emailAddr.validate() true } catch { case e: Throwable => false } } def toSmtpProperties(account: EmailAccountBean):Properties = { val props = new Properties() props.put("mail.smtp.host", account.smtpHost) props.put("mail.smtp.socketFactory.port", account.smtpPort.toString) props.put("mail.smtp.socketFactory.class", "javax.net.ssl.SSLSocketFactory") props.put("mail.smtp.auth", "true") props.put("mail.smtp.port", account.smtpPort.toString) props } def createDataName(account: EmailAccountBean, dataName: String, folderName: String) = { s"${account.user}-${folderName}$dataName" } // // Folder names // private val Trash = "Trash" private val GmailTrash = "[Gmail]/Trash" private val Spam = "Spam" private val GmailSpam = "[Gmail]/Spam" def trashFolder(account: EmailAccountBean): String = { if (isGmail(account)) GmailTrash else Trash } val gmailTopLevelFolders = Set("inbox", "deleted messages", "drafts", "sent", "sent messages") def isGmail(account: EmailAccountBean) = account.user.toLowerCase.endsWith("gmail.com") def getFolderName(account: EmailAccountBean, name: String): String = { if (!isGmail(account)) return name // Handle Gmail specific folders name.toLowerCase() match { case "trash" => "[Gmail]/Trash" case "spam" => "[Gmail]/Spam" case "drafts" => "[Gmail]/Drafts" case _ => name } } }
OdysseusLevy/emailscript
src/main/scala/org/emailscript/mail/MailUtils.scala
Scala
lgpl-3.0
4,731
package at.forsyte.apalache.tla package object types { type typeContext = Map[TypeVar, SmtTypeVariable] type nameContext = Map[String, SmtTypeVariable] }
konnov/apalache
tla-types/src/main/scala/at/forsyte/apalache/tla/types/package.scala
Scala
apache-2.0
159
package com.github.mdr.mash.ns.core.help import com.github.mdr.mash.classes.{ AbstractObjectWrapper, Field, MashClass, NewStaticMethod } import com.github.mdr.mash.evaluator.EvaluatorException import com.github.mdr.mash.ns.core.{ ClassClass, StringClass } import com.github.mdr.mash.runtime.{ MashNull, MashObject, MashString, MashValue } import scala.collection.immutable.ListMap object FieldHelpClass extends MashClass("core.help.FieldHelp") { object Fields { val Name = Field("name", Some("Field name"), StringClass) val OwningClass = Field("owningClass", Some("Class this field belongs to"), ClassClass) } import Fields._ def create(name: String, owningClass: MashClass): MashObject = MashObject.of( ListMap( Name -> MashString(name), OwningClass -> owningClass), FieldHelpClass) case class Wrapper(any: MashValue) extends AbstractObjectWrapper(any) { def name = getStringField(Name) def klass = getClassField(OwningClass) def field = klass.getField(name).getOrElse( throw EvaluatorException(s"No field '$name' found in '${klass.fullyQualifiedName}'")) } override val fields = Seq(Name, OwningClass) override val staticMethods = Seq(NewStaticMethod(this)) override def summaryOpt = Some("Help documentation for a field") }
mdr/mash
src/main/scala/com/github/mdr/mash/ns/core/help/FieldHelpClass.scala
Scala
mit
1,330
package reactivemongo.api /** * MongoDB [[https://docs.mongodb.com/manual/core/read-preference/index.html read preference]] enables to read from primary or secondaries * with a predefined strategy. * * {{{ * import reactivemongo.api.ReadPreference * * val pref: ReadPreference = ReadPreference.primary * }}} */ sealed trait ReadPreference { /** Indicates whether a slave member is ok. */ def slaveOk: Boolean = true //def filterTag: Option[BSONDocument => Boolean] } /** [[ReadPreference]] utilities and factories. */ object ReadPreference { /** Reads only from the primary. This is the default choice. */ object Primary extends ReadPreference { override val slaveOk = false val filterTag = None override val toString = "Primary" } private[reactivemongo] def TagFilter( tagSet: Seq[Map[String, String]]): Option[Map[String, String] => Boolean] = { if (tagSet.isEmpty) None else Some { tags: Map[String, String] => val matching = tagSet.find(_.foldLeft(Map.empty[String, String]) { case (ms, (k, v)) => if (tags.get(k).exists(_ == v)) { ms + (k -> v) } else ms }.isEmpty) matching.isDefined } } private[api] sealed trait Taggable { self: ReadPreference => /** Returns the tags to be used. */ def tags: List[Map[String, String]] } /** Extractor for taggable read preference. */ object Taggable { def unapply(pref: ReadPreference): Option[List[Map[String, String]]] = pref match { case p: Taggable => p.tags.headOption.map(_ :: p.tags.tail) case _ => None } } /** Reads from the primary if it is available, or secondaries if it is not. */ class PrimaryPreferred private[api] (val tags: List[Map[String, String]]) extends ReadPreference with Taggable with Product1[List[Map[String, String]]] with Serializable { @deprecated("No longer case class", "0.20.3") @inline def _1 = tags @deprecated("No longer case class", "0.20.3") def canEqual(that: Any): Boolean = that match { case _: PrimaryPreferred => false case _ => false } override def equals(that: Any): Boolean = that match { case other: PrimaryPreferred => this.tags == other.tags case _ => false } @inline override def hashCode: Int = tags.hashCode override val toString = s"""PrimaryPreferred(${tags mkString ", "})""" } object PrimaryPreferred extends scala.runtime.AbstractFunction1[List[Map[String, String]], PrimaryPreferred] { def apply(tags: List[Map[String, String]]): PrimaryPreferred = new PrimaryPreferred(tags) def unapply(pref: PrimaryPreferred): Option[List[Map[String, String]]] = Option(pref).map(_.tags) } /** Reads only from any secondary. */ class Secondary private[api] (val tags: List[Map[String, String]]) extends ReadPreference with Taggable with Product1[List[Map[String, String]]] with Serializable { @deprecated("No longer case class", "0.20.3") @inline def _1 = tags @deprecated("No longer case class", "0.20.3") def canEqual(that: Any): Boolean = that match { case _: Secondary => false case _ => false } override def equals(that: Any): Boolean = that match { case other: Secondary => this.tags == other.tags case _ => false } @inline override def hashCode: Int = tags.hashCode override val toString = s"""Secondary(${tags mkString ", "})""" } object Secondary extends scala.runtime.AbstractFunction1[List[Map[String, String]], Secondary] { def apply(tags: List[Map[String, String]]): Secondary = new Secondary(tags) def unapply(pref: Secondary): Option[List[Map[String, String]]] = Option(pref).map(_.tags) } /** * Reads from any secondary, * or from the primary if they are not available. */ class SecondaryPreferred private[api] (val tags: List[Map[String, String]]) extends ReadPreference with Taggable with Product1[List[Map[String, String]]] with Serializable { @deprecated("No longer case class", "0.20.3") @inline def _1 = tags @deprecated("No longer case class", "0.20.3") def canEqual(that: Any): Boolean = that match { case _: SecondaryPreferred => false case _ => false } override def equals(that: Any): Boolean = that match { case other: SecondaryPreferred => this.tags == other.tags case _ => false } @inline override def hashCode: Int = tags.hashCode override val toString = s"""SecondaryPreferred(${tags mkString ", "})""" } object SecondaryPreferred extends scala.runtime.AbstractFunction1[List[Map[String, String]], SecondaryPreferred] { def apply(tags: List[Map[String, String]]): SecondaryPreferred = new SecondaryPreferred(tags) def unapply(pref: SecondaryPreferred): Option[List[Map[String, String]]] = Option(pref).map(_.tags) } /** * Reads from the faster node (e.g. the node which replies faster than * all others), regardless its status (primary or secondary). */ class Nearest private[api] (val tags: List[Map[String, String]]) extends ReadPreference with Taggable with Product1[List[Map[String, String]]] with Serializable { @deprecated("No longer case class", "0.20.3") @inline def _1 = tags @deprecated("No longer case class", "0.20.3") def canEqual(that: Any): Boolean = that match { case _: Nearest => false case _ => false } override def equals(that: Any): Boolean = that match { case other: Nearest => this.tags == other.tags case _ => false } @inline override def hashCode: Int = tags.hashCode override val toString = s"""Nearest(${tags mkString ", "})""" } object Nearest extends scala.runtime.AbstractFunction1[List[Map[String, String]], Nearest] { def apply(tags: List[Map[String, String]]): Nearest = new Nearest(tags) def unapply(pref: Nearest): Option[List[Map[String, String]]] = Option(pref).map(_.tags) } /** [[https://docs.mongodb.com/manual/reference/read-preference/#primary Reads only from the primary]]. This is the default choice. */ def primary: Primary.type = Primary /** Reads from the [[https://docs.mongodb.com/manual/reference/read-preference/#primaryPreferred primary if it is available]], or secondaries if it is not. */ val primaryPreferred: PrimaryPreferred = new PrimaryPreferred(List.empty) /** Reads from any node that has the given `tagSet` in the replica set (preferably the primary). */ def primaryPreferred(tagSet: List[Map[String, String]]): PrimaryPreferred = new PrimaryPreferred(tagSet) /** [[https://docs.mongodb.com/manual/reference/read-preference/#secondary Reads only from any secondary]]. */ val secondary: Secondary = new Secondary(List.empty) /** Reads from a secondary that has the given `tagSet` in the replica set. */ def secondary(tagSet: List[Map[String, String]]): Secondary = new Secondary(tagSet) /** [[https://docs.mongodb.com/manual/reference/read-preference/#secondaryPreferred Reads from any secondary]], or from the primary if they are not available. */ val secondaryPreferred: SecondaryPreferred = new SecondaryPreferred(List.empty) /** Reads from any node that has the given `tagSet` in the replica set (preferably a secondary). */ def secondaryPreferred(tagSet: List[Map[String, String]]): SecondaryPreferred = new SecondaryPreferred(tagSet) /** * Reads from the [[https://docs.mongodb.com/manual/reference/read-preference/#nearest nearest node]] (the node which replies faster than all others), regardless its status (primary or secondary). */ val nearest: Nearest = new Nearest(List.empty) /** * Reads from the fastest node (e.g. the node which replies faster than all others) that has the given `tagSet`, regardless its status (primary or secondary). */ def nearest[T](tagSet: List[Map[String, String]]): Nearest = new Nearest(tagSet) }
cchantep/ReactiveMongo
driver/src/main/scala/api/ReadPreference.scala
Scala
apache-2.0
8,114
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.util import java.nio.charset.StandardCharsets import java.sql.{Date, Timestamp} import java.time._ import java.time.temporal.{ChronoField, ChronoUnit, IsoFields} import java.util.{Locale, TimeZone} import java.util.concurrent.TimeUnit._ import scala.util.control.NonFatal import org.apache.spark.sql.types.Decimal import org.apache.spark.unsafe.types.UTF8String /** * Helper functions for converting between internal and external date and time representations. * Dates are exposed externally as java.sql.Date and are represented internally as the number of * dates since the Unix epoch (1970-01-01). Timestamps are exposed externally as java.sql.Timestamp * and are stored internally as longs, which are capable of storing timestamps with microsecond * precision. */ object DateTimeUtils { // we use Int and Long internally to represent [[DateType]] and [[TimestampType]] type SQLDate = Int type SQLTimestamp = Long // see http://stackoverflow.com/questions/466321/convert-unix-timestamp-to-julian // it's 2440587.5, rounding up to compatible with Hive final val JULIAN_DAY_OF_EPOCH = 2440588 // Pre-calculated values can provide an opportunity of additional optimizations // to the compiler like constants propagation and folding. final val NANOS_PER_MICROS: Long = 1000 final val MICROS_PER_MILLIS: Long = 1000 final val MILLIS_PER_SECOND: Long = 1000 final val SECONDS_PER_DAY: Long = 24 * 60 * 60 final val MICROS_PER_SECOND: Long = MILLIS_PER_SECOND * MICROS_PER_MILLIS final val NANOS_PER_MILLIS: Long = NANOS_PER_MICROS * MICROS_PER_MILLIS final val NANOS_PER_SECOND: Long = NANOS_PER_MICROS * MICROS_PER_SECOND final val MICROS_PER_DAY: Long = SECONDS_PER_DAY * MICROS_PER_SECOND final val MILLIS_PER_MINUTE: Long = 60 * MILLIS_PER_SECOND final val MILLIS_PER_HOUR: Long = 60 * MILLIS_PER_MINUTE final val MILLIS_PER_DAY: Long = SECONDS_PER_DAY * MILLIS_PER_SECOND // number of days between 1.1.1970 and 1.1.2001 final val to2001 = -11323 // this is year -17999, calculation: 50 * daysIn400Year final val YearZero = -17999 final val toYearZero = to2001 + 7304850 final val TimeZoneGMT = TimeZone.getTimeZone("GMT") final val TimeZoneUTC = TimeZone.getTimeZone("UTC") val TIMEZONE_OPTION = "timeZone" def defaultTimeZone(): TimeZone = TimeZone.getDefault() def getZoneId(timeZoneId: String): ZoneId = ZoneId.of(timeZoneId, ZoneId.SHORT_IDS) def getTimeZone(timeZoneId: String): TimeZone = { TimeZone.getTimeZone(getZoneId(timeZoneId)) } // we should use the exact day as Int, for example, (year, month, day) -> day def millisToDays(millisUtc: Long): SQLDate = { millisToDays(millisUtc, defaultTimeZone()) } def millisToDays(millisUtc: Long, timeZone: TimeZone): SQLDate = { // SPARK-6785: use Math.floorDiv so negative number of days (dates before 1970) // will correctly work as input for function toJavaDate(Int) val millisLocal = millisUtc + timeZone.getOffset(millisUtc) Math.floorDiv(millisLocal, MILLIS_PER_DAY).toInt } // reverse of millisToDays def daysToMillis(days: SQLDate): Long = { daysToMillis(days, defaultTimeZone()) } def daysToMillis(days: SQLDate, timeZone: TimeZone): Long = { val millisLocal = days.toLong * MILLIS_PER_DAY millisLocal - getOffsetFromLocalMillis(millisLocal, timeZone) } // Converts Timestamp to string according to Hive TimestampWritable convention. def timestampToString(tf: TimestampFormatter, us: SQLTimestamp): String = { tf.format(us) } /** * Returns the number of days since epoch from java.sql.Date. */ def fromJavaDate(date: Date): SQLDate = { millisToDays(date.getTime) } /** * Returns a java.sql.Date from number of days since epoch. */ def toJavaDate(daysSinceEpoch: SQLDate): Date = { new Date(daysToMillis(daysSinceEpoch)) } /** * Returns a java.sql.Timestamp from number of micros since epoch. */ def toJavaTimestamp(us: SQLTimestamp): Timestamp = { Timestamp.from(microsToInstant(us)) } /** * Returns the number of micros since epoch from java.sql.Timestamp. */ def fromJavaTimestamp(t: Timestamp): SQLTimestamp = { instantToMicros(t.toInstant) } /** * Returns the number of microseconds since epoch from Julian day * and nanoseconds in a day */ def fromJulianDay(day: Int, nanoseconds: Long): SQLTimestamp = { // use Long to avoid rounding errors val seconds = (day - JULIAN_DAY_OF_EPOCH).toLong * SECONDS_PER_DAY SECONDS.toMicros(seconds) + NANOSECONDS.toMicros(nanoseconds) } /** * Returns Julian day and nanoseconds in a day from the number of microseconds * * Note: support timestamp since 4717 BC (without negative nanoseconds, compatible with Hive). */ def toJulianDay(us: SQLTimestamp): (Int, Long) = { val julian_us = us + JULIAN_DAY_OF_EPOCH * MICROS_PER_DAY val day = julian_us / MICROS_PER_DAY val micros = julian_us % MICROS_PER_DAY (day.toInt, MICROSECONDS.toNanos(micros)) } /* * Converts the timestamp to milliseconds since epoch. In spark timestamp values have microseconds * precision, so this conversion is lossy. */ def toMillis(us: SQLTimestamp): Long = { // When the timestamp is negative i.e before 1970, we need to adjust the millseconds portion. // Example - 1965-01-01 10:11:12.123456 is represented as (-157700927876544) in micro precision. // In millis precision the above needs to be represented as (-157700927877). Math.floorDiv(us, MICROS_PER_MILLIS) } /* * Converts milliseconds since epoch to SQLTimestamp. */ def fromMillis(millis: Long): SQLTimestamp = { MILLISECONDS.toMicros(millis) } def microsToEpochDays(epochMicros: SQLTimestamp, zoneId: ZoneId): SQLDate = { localDateToDays(microsToInstant(epochMicros).atZone(zoneId).toLocalDate) } def epochDaysToMicros(epochDays: SQLDate, zoneId: ZoneId): SQLTimestamp = { val localDate = LocalDate.ofEpochDay(epochDays) val zeroLocalTime = LocalTime.MIDNIGHT val localDateTime = LocalDateTime.of(localDate, zeroLocalTime) instantToMicros(localDateTime.atZone(zoneId).toInstant) } /** * Trim and parse a given UTF8 date string to the corresponding a corresponding [[Long]] value. * The return type is [[Option]] in order to distinguish between 0L and null. The following * formats are allowed: * * `yyyy` * `yyyy-[m]m` * `yyyy-[m]m-[d]d` * `yyyy-[m]m-[d]d ` * `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]` * `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z` * `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m` * `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m` * `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]` * `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z` * `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m` * `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m` * `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]` * `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z` * `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m` * `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m` * `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]` * `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z` * `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m` * `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m` */ def stringToTimestamp(s: UTF8String, timeZoneId: ZoneId): Option[SQLTimestamp] = { if (s == null) { return None } var tz: Option[Byte] = None val segments: Array[Int] = Array[Int](1, 1, 1, 0, 0, 0, 0, 0, 0) var i = 0 var currentSegmentValue = 0 val bytes = s.trim.getBytes val specialTimestamp = convertSpecialTimestamp(bytes, timeZoneId) if (specialTimestamp.isDefined) return specialTimestamp var j = 0 var digitsMilli = 0 var justTime = false while (j < bytes.length) { val b = bytes(j) val parsedValue = b - '0'.toByte if (parsedValue < 0 || parsedValue > 9) { if (j == 0 && b == 'T') { justTime = true i += 3 } else if (i < 2) { if (b == '-') { if (i == 0 && j != 4) { // year should have exact four digits return None } segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 } else if (i == 0 && b == ':') { justTime = true segments(3) = currentSegmentValue currentSegmentValue = 0 i = 4 } else { return None } } else if (i == 2) { if (b == ' ' || b == 'T') { segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 } else { return None } } else if (i == 3 || i == 4) { if (b == ':') { segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 } else { return None } } else if (i == 5 || i == 6) { if (b == 'Z') { segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 tz = Some(43) } else if (b == '-' || b == '+') { segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 tz = Some(b) } else if (b == '.' && i == 5) { segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 } else { return None } if (i == 6 && b != '.') { i += 1 } } else { if (b == ':' || b == ' ') { segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 } else { return None } } } else { if (i == 6) { digitsMilli += 1 } currentSegmentValue = currentSegmentValue * 10 + parsedValue } j += 1 } segments(i) = currentSegmentValue if (!justTime && i == 0 && j != 4) { // year should have exact four digits return None } while (digitsMilli < 6) { segments(6) *= 10 digitsMilli += 1 } // We are truncating the nanosecond part, which results in loss of precision while (digitsMilli > 6) { segments(6) /= 10 digitsMilli -= 1 } try { val zoneId = if (tz.isEmpty) { timeZoneId } else { val sign = if (tz.get.toChar == '-') -1 else 1 ZoneOffset.ofHoursMinutes(sign * segments(7), sign * segments(8)) } val nanoseconds = MICROSECONDS.toNanos(segments(6)) val localTime = LocalTime.of(segments(3), segments(4), segments(5), nanoseconds.toInt) val localDate = if (justTime) { LocalDate.now(zoneId) } else { LocalDate.of(segments(0), segments(1), segments(2)) } val localDateTime = LocalDateTime.of(localDate, localTime) val zonedDateTime = ZonedDateTime.of(localDateTime, zoneId) val instant = Instant.from(zonedDateTime) Some(instantToMicros(instant)) } catch { case NonFatal(_) => None } } def instantToMicros(instant: Instant): Long = { val us = Math.multiplyExact(instant.getEpochSecond, MICROS_PER_SECOND) val result = Math.addExact(us, NANOSECONDS.toMicros(instant.getNano)) result } def microsToInstant(us: Long): Instant = { val secs = Math.floorDiv(us, MICROS_PER_SECOND) val mos = Math.floorMod(us, MICROS_PER_SECOND) Instant.ofEpochSecond(secs, mos * NANOS_PER_MICROS) } def instantToDays(instant: Instant): Int = { val seconds = instant.getEpochSecond val days = Math.floorDiv(seconds, SECONDS_PER_DAY) days.toInt } def localDateToDays(localDate: LocalDate): Int = { Math.toIntExact(localDate.toEpochDay) } def daysToLocalDate(days: Int): LocalDate = LocalDate.ofEpochDay(days) /** * Trim and parse a given UTF8 date string to a corresponding [[Int]] value. * The return type is [[Option]] in order to distinguish between 0 and null. The following * formats are allowed: * * `yyyy` * `yyyy-[m]m` * `yyyy-[m]m-[d]d` * `yyyy-[m]m-[d]d ` * `yyyy-[m]m-[d]d *` * `yyyy-[m]m-[d]dT*` */ def stringToDate(s: UTF8String, zoneId: ZoneId): Option[SQLDate] = { if (s == null) { return None } val segments: Array[Int] = Array[Int](1, 1, 1) var i = 0 var currentSegmentValue = 0 val bytes = s.trim.getBytes val specialDate = convertSpecialDate(bytes, zoneId) if (specialDate.isDefined) return specialDate var j = 0 while (j < bytes.length && (i < 3 && !(bytes(j) == ' ' || bytes(j) == 'T'))) { val b = bytes(j) if (i < 2 && b == '-') { if (i == 0 && j != 4) { // year should have exact four digits return None } segments(i) = currentSegmentValue currentSegmentValue = 0 i += 1 } else { val parsedValue = b - '0'.toByte if (parsedValue < 0 || parsedValue > 9) { return None } else { currentSegmentValue = currentSegmentValue * 10 + parsedValue } } j += 1 } if (i == 0 && j != 4) { // year should have exact four digits return None } if (i < 2 && j < bytes.length) { // For the `yyyy` and `yyyy-[m]m` formats, entire input must be consumed. return None } segments(i) = currentSegmentValue try { val localDate = LocalDate.of(segments(0), segments(1), segments(2)) Some(localDateToDays(localDate)) } catch { case NonFatal(_) => None } } /** * Returns the microseconds since year zero (-17999) from microseconds since epoch. */ private def absoluteMicroSecond(microsec: SQLTimestamp): SQLTimestamp = { microsec + toYearZero * MICROS_PER_DAY } private def localTimestamp(microsec: SQLTimestamp, timeZone: TimeZone): SQLTimestamp = { val zoneOffsetUs = MILLISECONDS.toMicros(timeZone.getOffset(MICROSECONDS.toMillis(microsec))) absoluteMicroSecond(microsec) + zoneOffsetUs } /** * Returns the hour value of a given timestamp value. The timestamp is expressed in microseconds. */ def getHours(microsec: SQLTimestamp, timeZone: TimeZone): Int = { (MICROSECONDS.toHours(localTimestamp(microsec, timeZone)) % 24).toInt } /** * Returns the minute value of a given timestamp value. The timestamp is expressed in * microseconds. */ def getMinutes(microsec: SQLTimestamp, timeZone: TimeZone): Int = { (MICROSECONDS.toMinutes(localTimestamp(microsec, timeZone)) % 60).toInt } /** * Returns the second value of a given timestamp value. The timestamp is expressed in * microseconds. */ def getSeconds(microsec: SQLTimestamp, timeZone: TimeZone): Int = { (MICROSECONDS.toSeconds(localTimestamp(microsec, timeZone)) % 60).toInt } /** * Returns the seconds part and its fractional part with microseconds. */ def getSecondsWithFraction(microsec: SQLTimestamp, timeZone: TimeZone): Decimal = { val secFrac = localTimestamp(microsec, timeZone) % (MILLIS_PER_MINUTE * MICROS_PER_MILLIS) Decimal(secFrac, 8, 6) } /** * Returns seconds, including fractional parts, multiplied by 1000. The timestamp * is expressed in microseconds since the epoch. */ def getMilliseconds(timestamp: SQLTimestamp, timeZone: TimeZone): Decimal = { Decimal(getMicroseconds(timestamp, timeZone), 8, 3) } /** * Returns seconds, including fractional parts, multiplied by 1000000. The timestamp * is expressed in microseconds since the epoch. */ def getMicroseconds(timestamp: SQLTimestamp, timeZone: TimeZone): Int = { Math.floorMod(localTimestamp(timestamp, timeZone), MICROS_PER_SECOND * 60).toInt } /** * Returns the 'day in year' value for the given date. The date is expressed in days * since 1.1.1970. */ def getDayInYear(date: SQLDate): Int = { LocalDate.ofEpochDay(date).getDayOfYear } private def extractFromYear(date: SQLDate, divider: Int): Int = { val localDate = daysToLocalDate(date) val yearOfEra = localDate.get(ChronoField.YEAR_OF_ERA) var result = yearOfEra / divider if ((yearOfEra % divider) != 0 || yearOfEra <= 1) result += 1 if (localDate.get(ChronoField.ERA) == 0) result = -result result } /** Returns the millennium for the given date. The date is expressed in days since 1.1.1970. */ def getMillennium(date: SQLDate): Int = extractFromYear(date, 1000) /** Returns the century for the given date. The date is expressed in days since 1.1.1970. */ def getCentury(date: SQLDate): Int = extractFromYear(date, 100) /** Returns the decade for the given date. The date is expressed in days since 1.1.1970. */ def getDecade(date: SQLDate): Int = Math.floorDiv(getYear(date), 10) /** * Returns the year value for the given date. The date is expressed in days * since 1.1.1970. */ def getYear(date: SQLDate): Int = { LocalDate.ofEpochDay(date).getYear } /** * Returns the year which conforms to ISO 8601. Each ISO 8601 week-numbering * year begins with the Monday of the week containing the 4th of January. */ def getIsoYear(date: SQLDate): Int = { daysToLocalDate(date).get(IsoFields.WEEK_BASED_YEAR) } /** * Returns the quarter for the given date. The date is expressed in days * since 1.1.1970. */ def getQuarter(date: SQLDate): Int = { LocalDate.ofEpochDay(date).get(IsoFields.QUARTER_OF_YEAR) } /** * Split date (expressed in days since 1.1.1970) into four fields: * year, month (Jan is Month 1), dayInMonth, daysToMonthEnd (0 if it's last day of month). */ def splitDate(date: SQLDate): (Int, Int, Int, Int) = { val ld = LocalDate.ofEpochDay(date) (ld.getYear, ld.getMonthValue, ld.getDayOfMonth, ld.lengthOfMonth() - ld.getDayOfMonth) } /** * Returns the month value for the given date. The date is expressed in days * since 1.1.1970. January is month 1. */ def getMonth(date: SQLDate): Int = { LocalDate.ofEpochDay(date).getMonthValue } /** * Returns the 'day of month' value for the given date. The date is expressed in days * since 1.1.1970. */ def getDayOfMonth(date: SQLDate): Int = { LocalDate.ofEpochDay(date).getDayOfMonth } /** * Add date and year-month interval. * Returns a date value, expressed in days since 1.1.1970. */ def dateAddMonths(days: SQLDate, months: Int): SQLDate = { LocalDate.ofEpochDay(days).plusMonths(months).toEpochDay.toInt } /** * Add timestamp and full interval. * Returns a timestamp value, expressed in microseconds since 1.1.1970 00:00:00. */ def timestampAddInterval( start: SQLTimestamp, months: Int, microseconds: Long, zoneId: ZoneId): SQLTimestamp = { val resultTimestamp = microsToInstant(start) .atZone(zoneId) .plusMonths(months) .plus(microseconds, ChronoUnit.MICROS) instantToMicros(resultTimestamp.toInstant) } /** * Returns number of months between time1 and time2. time1 and time2 are expressed in * microseconds since 1.1.1970. If time1 is later than time2, the result is positive. * * If time1 and time2 are on the same day of month, or both are the last day of month, * returns, time of day will be ignored. * * Otherwise, the difference is calculated based on 31 days per month. * The result is rounded to 8 decimal places if `roundOff` is set to true. */ def monthsBetween( time1: SQLTimestamp, time2: SQLTimestamp, roundOff: Boolean, timeZone: TimeZone): Double = { val millis1 = MICROSECONDS.toMillis(time1) val millis2 = MICROSECONDS.toMillis(time2) val date1 = millisToDays(millis1, timeZone) val date2 = millisToDays(millis2, timeZone) val (year1, monthInYear1, dayInMonth1, daysToMonthEnd1) = splitDate(date1) val (year2, monthInYear2, dayInMonth2, daysToMonthEnd2) = splitDate(date2) val months1 = year1 * 12 + monthInYear1 val months2 = year2 * 12 + monthInYear2 val monthDiff = (months1 - months2).toDouble if (dayInMonth1 == dayInMonth2 || ((daysToMonthEnd1 == 0) && (daysToMonthEnd2 == 0))) { return monthDiff } // using milliseconds can cause precision loss with more than 8 digits // we follow Hive's implementation which uses seconds val secondsInDay1 = MILLISECONDS.toSeconds(millis1 - daysToMillis(date1, timeZone)) val secondsInDay2 = MILLISECONDS.toSeconds(millis2 - daysToMillis(date2, timeZone)) val secondsDiff = (dayInMonth1 - dayInMonth2) * SECONDS_PER_DAY + secondsInDay1 - secondsInDay2 val secondsInMonth = DAYS.toSeconds(31) val diff = monthDiff + secondsDiff / secondsInMonth.toDouble if (roundOff) { // rounding to 8 digits math.round(diff * 1e8) / 1e8 } else { diff } } // Thursday = 0 since 1970/Jan/01 => Thursday private val SUNDAY = 3 private val MONDAY = 4 private val TUESDAY = 5 private val WEDNESDAY = 6 private val THURSDAY = 0 private val FRIDAY = 1 private val SATURDAY = 2 /* * Returns day of week from String. Starting from Thursday, marked as 0. * (Because 1970-01-01 is Thursday). */ def getDayOfWeekFromString(string: UTF8String): Int = { val dowString = string.toString.toUpperCase(Locale.ROOT) dowString match { case "SU" | "SUN" | "SUNDAY" => SUNDAY case "MO" | "MON" | "MONDAY" => MONDAY case "TU" | "TUE" | "TUESDAY" => TUESDAY case "WE" | "WED" | "WEDNESDAY" => WEDNESDAY case "TH" | "THU" | "THURSDAY" => THURSDAY case "FR" | "FRI" | "FRIDAY" => FRIDAY case "SA" | "SAT" | "SATURDAY" => SATURDAY case _ => -1 } } /** * Returns the first date which is later than startDate and is of the given dayOfWeek. * dayOfWeek is an integer ranges in [0, 6], and 0 is Thu, 1 is Fri, etc,. */ def getNextDateForDayOfWeek(startDate: SQLDate, dayOfWeek: Int): SQLDate = { startDate + 1 + ((dayOfWeek - 1 - startDate) % 7 + 7) % 7 } /** * Returns last day of the month for the given date. The date is expressed in days * since 1.1.1970. */ def getLastDayOfMonth(date: SQLDate): SQLDate = { val localDate = LocalDate.ofEpochDay(date) (date - localDate.getDayOfMonth) + localDate.lengthOfMonth() } // The constants are visible for testing purpose only. private[sql] val TRUNC_INVALID = -1 // The levels from TRUNC_TO_MICROSECOND to TRUNC_TO_DAY are used in truncations // of TIMESTAMP values only. private[sql] val TRUNC_TO_MICROSECOND = 0 private[sql] val MIN_LEVEL_OF_TIMESTAMP_TRUNC = TRUNC_TO_MICROSECOND private[sql] val TRUNC_TO_MILLISECOND = 1 private[sql] val TRUNC_TO_SECOND = 2 private[sql] val TRUNC_TO_MINUTE = 3 private[sql] val TRUNC_TO_HOUR = 4 private[sql] val TRUNC_TO_DAY = 5 // The levels from TRUNC_TO_WEEK to TRUNC_TO_MILLENNIUM are used in truncations // of DATE and TIMESTAMP values. private[sql] val TRUNC_TO_WEEK = 6 private[sql] val MIN_LEVEL_OF_DATE_TRUNC = TRUNC_TO_WEEK private[sql] val TRUNC_TO_MONTH = 7 private[sql] val TRUNC_TO_QUARTER = 8 private[sql] val TRUNC_TO_YEAR = 9 private[sql] val TRUNC_TO_DECADE = 10 private[sql] val TRUNC_TO_CENTURY = 11 private[sql] val TRUNC_TO_MILLENNIUM = 12 /** * Returns the trunc date from original date and trunc level. * Trunc level should be generated using `parseTruncLevel()`, should be between 0 and 6. */ def truncDate(d: SQLDate, level: Int): SQLDate = { def truncToYearLevel(divider: Int, adjust: Int): SQLDate = { val oldYear = getYear(d) var newYear = Math.floorDiv(oldYear, divider) if (adjust > 0 && Math.floorMod(oldYear, divider) == 0) { newYear -= 1 } newYear = newYear * divider + adjust localDateToDays(LocalDate.of(newYear, 1, 1)) } level match { case TRUNC_TO_WEEK => getNextDateForDayOfWeek(d - 7, MONDAY) case TRUNC_TO_MONTH => d - DateTimeUtils.getDayOfMonth(d) + 1 case TRUNC_TO_QUARTER => localDateToDays(daysToLocalDate(d).`with`(IsoFields.DAY_OF_QUARTER, 1L)) case TRUNC_TO_YEAR => d - DateTimeUtils.getDayInYear(d) + 1 case TRUNC_TO_DECADE => truncToYearLevel(10, 0) case TRUNC_TO_CENTURY => truncToYearLevel(100, 1) case TRUNC_TO_MILLENNIUM => truncToYearLevel(1000, 1) case _ => // caller make sure that this should never be reached sys.error(s"Invalid trunc level: $level") } } /** * Returns the trunc date time from original date time and trunc level. * Trunc level should be generated using `parseTruncLevel()`, should be between 0 and 12. */ def truncTimestamp(t: SQLTimestamp, level: Int, timeZone: TimeZone): SQLTimestamp = { if (level == TRUNC_TO_MICROSECOND) return t var millis = MICROSECONDS.toMillis(t) val truncated = level match { case TRUNC_TO_MILLISECOND => millis case TRUNC_TO_SECOND => millis - millis % MILLIS_PER_SECOND case TRUNC_TO_MINUTE => millis - millis % MILLIS_PER_MINUTE case TRUNC_TO_HOUR => val offset = timeZone.getOffset(millis) millis += offset millis - millis % MILLIS_PER_HOUR - offset case TRUNC_TO_DAY => val offset = timeZone.getOffset(millis) millis += offset millis - millis % MILLIS_PER_DAY - offset case _ => // Try to truncate date levels val dDays = millisToDays(millis, timeZone) daysToMillis(truncDate(dDays, level), timeZone) } truncated * MICROS_PER_MILLIS } /** * Returns the truncate level, could be from TRUNC_TO_MICROSECOND to TRUNC_TO_MILLENNIUM, * or TRUNC_INVALID, TRUNC_INVALID means unsupported truncate level. */ def parseTruncLevel(format: UTF8String): Int = { if (format == null) { TRUNC_INVALID } else { format.toString.toUpperCase(Locale.ROOT) match { case "MICROSECOND" => TRUNC_TO_MICROSECOND case "MILLISECOND" => TRUNC_TO_MILLISECOND case "SECOND" => TRUNC_TO_SECOND case "MINUTE" => TRUNC_TO_MINUTE case "HOUR" => TRUNC_TO_HOUR case "DAY" | "DD" => TRUNC_TO_DAY case "WEEK" => TRUNC_TO_WEEK case "MON" | "MONTH" | "MM" => TRUNC_TO_MONTH case "QUARTER" => TRUNC_TO_QUARTER case "YEAR" | "YYYY" | "YY" => TRUNC_TO_YEAR case "DECADE" => TRUNC_TO_DECADE case "CENTURY" => TRUNC_TO_CENTURY case "MILLENNIUM" => TRUNC_TO_MILLENNIUM case _ => TRUNC_INVALID } } } /** * Lookup the offset for given millis seconds since 1970-01-01 00:00:00 in given timezone. * TODO: Improve handling of normalization differences. * TODO: Replace with JSR-310 or similar system - see SPARK-16788 */ private[sql] def getOffsetFromLocalMillis(millisLocal: Long, tz: TimeZone): Long = { var guess = tz.getRawOffset // the actual offset should be calculated based on milliseconds in UTC val offset = tz.getOffset(millisLocal - guess) if (offset != guess) { guess = tz.getOffset(millisLocal - offset) if (guess != offset) { // fallback to do the reverse lookup using java.time.LocalDateTime // this should only happen near the start or end of DST val localDate = LocalDate.ofEpochDay(MILLISECONDS.toDays(millisLocal)) val localTime = LocalTime.ofNanoOfDay(MILLISECONDS.toNanos( Math.floorMod(millisLocal, MILLIS_PER_DAY))) val localDateTime = LocalDateTime.of(localDate, localTime) val millisEpoch = localDateTime.atZone(tz.toZoneId).toInstant.toEpochMilli guess = (millisLocal - millisEpoch).toInt } } guess } /** * Convert the timestamp `ts` from one timezone to another. * * TODO: Because of DST, the conversion between UTC and human time is not exactly one-to-one * mapping, the conversion here may return wrong result, we should make the timestamp * timezone-aware. */ def convertTz(ts: SQLTimestamp, fromZone: TimeZone, toZone: TimeZone): SQLTimestamp = { // We always use local timezone to parse or format a timestamp val localZone = defaultTimeZone() val utcTs = if (fromZone.getID == localZone.getID) { ts } else { // get the human time using local time zone, that actually is in fromZone. val localZoneOffsetMs = localZone.getOffset(MICROSECONDS.toMillis(ts)) val localTsUs = ts + MILLISECONDS.toMicros(localZoneOffsetMs) // in fromZone val offsetFromLocalMs = getOffsetFromLocalMillis(MICROSECONDS.toMillis(localTsUs), fromZone) localTsUs - MILLISECONDS.toMicros(offsetFromLocalMs) } if (toZone.getID == localZone.getID) { utcTs } else { val toZoneOffsetMs = toZone.getOffset(MICROSECONDS.toMillis(utcTs)) val localTsUs = utcTs + MILLISECONDS.toMicros(toZoneOffsetMs) // in toZone // treat it as local timezone, convert to UTC (we could get the expected human time back) val offsetFromLocalMs = getOffsetFromLocalMillis(MICROSECONDS.toMillis(localTsUs), localZone) localTsUs - MILLISECONDS.toMicros(offsetFromLocalMs) } } /** * Returns a timestamp of given timezone from utc timestamp, with the same string * representation in their timezone. */ def fromUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = { convertTz(time, TimeZoneGMT, getTimeZone(timeZone)) } /** * Returns a utc timestamp from a given timestamp from a given timezone, with the same * string representation in their timezone. */ def toUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = { convertTz(time, getTimeZone(timeZone), TimeZoneGMT) } /** * Returns the number of seconds with fractional part in microsecond precision * since 1970-01-01 00:00:00 local time. */ def getEpoch(timestamp: SQLTimestamp, zoneId: ZoneId): Decimal = { val offset = SECONDS.toMicros( zoneId.getRules.getOffset(microsToInstant(timestamp)).getTotalSeconds) val sinceEpoch = timestamp + offset Decimal(sinceEpoch, 20, 6) } def currentTimestamp(): SQLTimestamp = instantToMicros(Instant.now()) def currentDate(zoneId: ZoneId): SQLDate = localDateToDays(LocalDate.now(zoneId)) private def today(zoneId: ZoneId): ZonedDateTime = { Instant.now().atZone(zoneId).`with`(LocalTime.MIDNIGHT) } private val specialValueRe = """(\\p{Alpha}+)\\p{Blank}*(.*)""".r /** * Extracts special values from an input string ignoring case. * @param input - a trimmed string * @param zoneId - zone identifier used to get the current date. * @return some special value in lower case or None. */ private def extractSpecialValue(input: String, zoneId: ZoneId): Option[String] = { def isValid(value: String, timeZoneId: String): Boolean = { // Special value can be without any time zone if (timeZoneId.isEmpty) return true // "now" must not have the time zone field if (value.compareToIgnoreCase("now") == 0) return false // If the time zone field presents in the input, it must be resolvable try { getZoneId(timeZoneId) true } catch { case NonFatal(_) => false } } assert(input.trim.length == input.length) if (input.length < 3 || !input(0).isLetter) return None input match { case specialValueRe(v, z) if isValid(v, z) => Some(v.toLowerCase(Locale.US)) case _ => None } } /** * Converts notational shorthands that are converted to ordinary timestamps. * @param input - a trimmed string * @param zoneId - zone identifier used to get the current date. * @return some of microseconds since the epoch if the conversion completed * successfully otherwise None. */ def convertSpecialTimestamp(input: String, zoneId: ZoneId): Option[SQLTimestamp] = { extractSpecialValue(input, zoneId).flatMap { case "epoch" => Some(0) case "now" => Some(currentTimestamp()) case "today" => Some(instantToMicros(today(zoneId).toInstant)) case "tomorrow" => Some(instantToMicros(today(zoneId).plusDays(1).toInstant)) case "yesterday" => Some(instantToMicros(today(zoneId).minusDays(1).toInstant)) case _ => None } } private def convertSpecialTimestamp(bytes: Array[Byte], zoneId: ZoneId): Option[SQLTimestamp] = { if (bytes.length > 0 && Character.isAlphabetic(bytes(0))) { convertSpecialTimestamp(new String(bytes, StandardCharsets.UTF_8), zoneId) } else { None } } /** * Converts notational shorthands that are converted to ordinary dates. * @param input - a trimmed string * @param zoneId - zone identifier used to get the current date. * @return some of days since the epoch if the conversion completed successfully otherwise None. */ def convertSpecialDate(input: String, zoneId: ZoneId): Option[SQLDate] = { extractSpecialValue(input, zoneId).flatMap { case "epoch" => Some(0) case "now" | "today" => Some(currentDate(zoneId)) case "tomorrow" => Some(Math.addExact(currentDate(zoneId), 1)) case "yesterday" => Some(Math.subtractExact(currentDate(zoneId), 1)) case _ => None } } private def convertSpecialDate(bytes: Array[Byte], zoneId: ZoneId): Option[SQLDate] = { if (bytes.length > 0 && Character.isAlphabetic(bytes(0))) { convertSpecialDate(new String(bytes, StandardCharsets.UTF_8), zoneId) } else { None } } }
rezasafi/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala
Scala
apache-2.0
34,603
package probability.random import au.id.cxd.math.probability.analysis.AndersonDarlingTest import au.id.cxd.math.probability.discrete.Binomial import au.id.cxd.math.probability.random.{RApproxBinom, RBinom} import org.scalatest.{FlatSpec, Matchers} // TODO: need a goodness of fit test to test membership of the binomial function. // as it stands the results can be visually compared against generating the same distributions in R. // the vectors can be cut and paste into R and visually compared. class TestRBinom extends FlatSpec with Matchers { def makeCDF(n:Double, p:Double):Double => Double = { val binom = Binomial(n)(p) (x:Double) => binom.cdf(for (i <- 0 to x.toInt) yield i.toDouble) } "rbinom" should "Draw random samples from distribution" in { val binom = RBinom(n=5, p=0.3) val samples = binom.draw(100) val cdf = makeCDF(5,0.3) val test = AndersonDarlingTest(samples, cdf) val result = test.test(0.05) println(result.toString) println(samples) } "rbinom" should "Draw random samples from distribution with high p" in { val binom = RBinom(n=5, p=0.8) val samples = binom.draw(100) println(samples) val cdf = makeCDF(5,0.8) val test = AndersonDarlingTest(samples, cdf) val result = test.test(0.05) println(result.toString) } "rapproxbinom" should "Draw random samples from distribution" in { val binom = RApproxBinom(n=5, p=0.3) val samples = binom.draw(100) println(samples) val cdf = makeCDF(5,0.3) val test = AndersonDarlingTest(samples, cdf) val result = test.test(0.05) println(result.toString) } "rbinom" should "Draw random samples from distribution with large n" in { val binom = RBinom(n=25, p=0.3) val samples = binom.draw(100) println(samples) val cdf = makeCDF(25,0.3) val test = AndersonDarlingTest(samples, cdf) val result = test.test(0.05) println(result.toString) } "rbinom" should "Draw random samples from distribution with large n and high p" in { val binom = RBinom(n=25, p=0.8) val samples = binom.draw(100) println(samples) val cdf = makeCDF(25,0.8) val test = AndersonDarlingTest(samples, cdf) val result = test.test(0.05) println(result.toString) } }
cxd/scala-au.id.cxd.math
math/src/test/scala/probability/random/TestRBinom.scala
Scala
mit
2,282
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU Affero GPL v3, the copyright holders add the following * Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3 * licence, when you create a Related Module, this Related Module is * not considered as a part of the work and may be distributed under the * license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>. * ************************************************************************************* */ package com.normation.rudder.repository.jdbc import com.normation.inventory.domain.NodeId import com.normation.rudder.domain.policies.DirectiveId import com.normation.rudder.domain.policies.RuleId import com.normation.rudder.repository.ReportsRepository import scala.collection._ import org.joda.time._ import org.slf4j.{Logger,LoggerFactory} import com.normation.rudder.domain.reports.bean._ import com.normation.cfclerk.domain.{Cf3PolicyDraftId} import org.springframework.jdbc.core._ import java.sql.ResultSet import java.sql.Timestamp import scala.collection.JavaConversions._ import net.liftweb.common._ import net.liftweb.common.Box._ import java.sql.Types import org.springframework.dao.DataAccessException class ReportsJdbcRepository(jdbcTemplate : JdbcTemplate) extends ReportsRepository with Loggable { val baseQuery = "select executiondate, nodeid, ruleid, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from RudderSysEvents where 1=1 "; val baseArchivedQuery = "select executiondate, nodeid, ruleid, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from archivedruddersysevents where 1=1 "; val reportsTable = "ruddersysevents" val archiveTable = "archivedruddersysevents" val idQuery = "select id, executiondate, nodeid, ruleid, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from ruddersysevents where 1=1 "; // find the last full run per node // we are not looking for older request that 15 minutes for the moment val lastQuery = "select nodeid as Node, max(executiontimestamp) as Time from ruddersysevents where ruleId = 'hasPolicyServer-root' and component = 'common' and keyValue = 'EndRun' and executionTimeStamp > (now() - interval '15 minutes') group by nodeid" val lastQueryByNode = "select nodeid as Node, max(executiontimestamp) as Time from ruddersysevents where ruleId = 'hasPolicyServer-root' and component = 'common' and keyValue = 'EndRun' and nodeid = ? and executionTimeStamp > (now() - interval '15 minutes') group by nodeid" val joinQuery = "select executiondate, nodeid, ruleId, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from RudderSysEvents join (" + lastQuery +" ) as Ordering on Ordering.Node = nodeid and executionTimeStamp = Ordering.Time where 1=1"; val joinQueryByNode = "select executiondate, nodeid, ruleId, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from RudderSysEvents join (" + lastQueryByNode +" ) as Ordering on Ordering.Node = nodeid and executionTimeStamp = Ordering.Time where 1=1"; def findReportsByRule( ruleId : RuleId , serial : Option[Int] , beginDate: Option[DateTime] , endDate : Option[DateTime] ): Seq[Reports] = { var query = baseQuery + " and ruleId = ? " var array = mutable.Buffer[AnyRef](ruleId.value) serial match { case None => ; case Some(int) => query = query + " and serial = ?"; array += new java.lang.Integer(int) } beginDate match { case None => case Some(date) => query = query + " and executionTimeStamp > ?"; array += new Timestamp(date.getMillis) } endDate match { case None => case Some(date) => query = query + " and executionTimeStamp < ?"; array += new Timestamp(date.getMillis) } jdbcTemplate.query(query, array.toArray[AnyRef], ReportsMapper).toSeq; } def findReportsByNode( nodeId : NodeId , ruleId : Option[RuleId] , serial : Option[Int] , beginDate: Option[DateTime] , endDate : Option[DateTime] ) : Seq[Reports] = { var query = baseQuery + " and nodeId = ? " var array = mutable.Buffer[AnyRef](nodeId.value) ruleId match { case None => case Some(cr) => query = query + " and ruleId = ?"; array += cr.value // A serial makes sense only if the CR is set serial match { case None => ; case Some(int) => query = query + " and serial = ?"; array += new java.lang.Integer(int) } } beginDate match { case None => case Some(date) => query = query + " and executionDate > ?"; array += new Timestamp(date.getMillis) } endDate match { case None => case Some(date) => query = query + " and executionDate < ?"; array += new Timestamp(date.getMillis) } query = query + " ORDER BY id desc LIMIT 1000" jdbcTemplate.query(query, array.toArray[AnyRef], ReportsMapper).toSeq; } def findReportsByNode( nodeId : NodeId , ruleId : RuleId , serial : Int , beginDate: DateTime , endDate : Option[DateTime] ): Seq[Reports] = { var query = baseQuery + " and nodeId = ? and ruleId = ? and serial = ? and executionTimeStamp >= ?" var array = mutable.Buffer[AnyRef](nodeId.value, ruleId.value, new java.lang.Integer(serial), new Timestamp(beginDate.getMillis)) endDate match { case None => case Some(date) => query = query + " and executionTimeStamp < ?"; array += new Timestamp(date.getMillis) } query = query + " ORDER BY executionTimeStamp asc" jdbcTemplate.query(query, array.toArray[AnyRef], ReportsMapper).toSeq; } /** * Return the last (really the last, serial wise, with full execution) reports for a rule */ def findLastReportByRule( ruleId: RuleId , serial: Int , node : Option[NodeId] ) : Seq[Reports] = { var query = "" var array = mutable.Buffer[AnyRef]() node match { case None => query += joinQuery + " and ruleId = ? and serial = ? and executionTimeStamp > (now() - interval '15 minutes')" array ++= mutable.Buffer[AnyRef](ruleId.value, new java.lang.Integer(serial)) case Some(nodeId) => query += joinQueryByNode + " and ruleId = ? and serial = ? and executionTimeStamp > (now() - interval '15 minutes') and nodeId = ?" array ++= mutable.Buffer[AnyRef](nodeId.value, ruleId.value, new java.lang.Integer(serial), nodeId.value) } jdbcTemplate.query(query, array.toArray[AnyRef], ReportsMapper).toSeq; } def findExecutionTimeByNode( nodeId : NodeId , beginDate: DateTime , endDate : Option[DateTime] ) : Seq[DateTime] = { var query = "select distinct executiontimestamp from ruddersysevents where ruleId = 'hasPolicyServer-root' and component = 'common' and keyValue = 'EndRun' and nodeId = ? and executiontimestamp >= ?" var array = mutable.Buffer[AnyRef](nodeId.value, new Timestamp(beginDate.getMillis)) endDate match { case None => ; case Some(date) => query = query + " and executiontimestamp < ?"; array += new Timestamp(date.getMillis) } query = query + " order by executiontimestamp " jdbcTemplate.query(query, array.toArray[AnyRef], ExecutionTimeMapper).toSeq; } def getOldestReports() : Box[Option[Reports]] = { jdbcTemplate.query(baseQuery + " order by executionTimeStamp asc limit 1", ReportsMapper).toSeq match { case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database") case seq => Full(seq.headOption) } } def getOldestArchivedReports() : Box[Option[Reports]] = { jdbcTemplate.query(baseArchivedQuery + " order by executionTimeStamp asc limit 1", ReportsMapper).toSeq match { case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database") case seq => Full(seq.headOption) } } def getNewestReportOnNode(nodeId:NodeId) : Box[Option[Reports]] = { val array = Seq(nodeId.value) val query = baseQuery + s" and nodeid = ? order by executionTimeStamp desc limit 1" jdbcTemplate.query(query,array.toArray[AnyRef],ReportsMapper).toSeq match { case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database") case seq => Full(seq.headOption) } } def getNewestReports() : Box[Option[Reports]] = { jdbcTemplate.query(baseQuery + " order by executionTimeStamp desc limit 1", ReportsMapper).toSeq match { case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database") case seq => Full(seq.headOption) } } def getNewestArchivedReports() : Box[Option[Reports]] = { jdbcTemplate.query(baseArchivedQuery + " order by executionTimeStamp desc limit 1", ReportsMapper).toSeq match { case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database") case seq => Full(seq.headOption) } } def getDatabaseSize(databaseName:String) : Box[Long] = { try { jdbcTemplate.query( """SELECT nspname || '.' || relname AS "relation", pg_relation_size(C.oid) AS "size" FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid= C.relnamespace) WHERE nspname NOT IN ('pg_catalog', 'information_schema') and relname = '%s' """.format(databaseName) , DatabaseSizeMapper).toSeq match { case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database") case seq => seq.headOption ?~! "The query used to find database size did not return any tuple" } } catch { case e: DataAccessException => val msg ="Could not compute the size of the database, cause is " + e.getMessage() logger.error(msg) Failure(msg,Full(e),Empty) } } def archiveEntries(date : DateTime) : Box[Int] = { try{ val migrate = jdbcTemplate.execute(""" insert into %s (id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg) (select id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg from %s where executionTimeStamp < '%s') """.format(archiveTable,reportsTable,date.toString("yyyy-MM-dd") ) ) logger.debug("""Archiving report with SQL query: [[ | insert into %s (id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg) | (select id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg from %s | where executionTimeStamp < '%s') |]]""".stripMargin.format(archiveTable,reportsTable,date.toString("yyyy-MM-dd"))) val delete = jdbcTemplate.update(""" delete from %s where executionTimeStamp < '%s' """.format(reportsTable,date.toString("yyyy-MM-dd") ) ) jdbcTemplate.execute("vacuum %s".format(reportsTable)) Full(delete) } catch { case e: DataAccessException => val msg ="Could not archive entries in the database, cause is " + e.getMessage() logger.error(msg) Failure(msg,Full(e),Empty) } } def deleteEntries(date : DateTime) : Box[Int] = { logger.debug("""Deleting report with SQL query: [[ | delete from %s where executionTimeStamp < '%s' |]] and: [[ | delete from %s where executionTimeStamp < '%s' |]]""".stripMargin.format(reportsTable,date.toString("yyyy-MM-dd"),archiveTable,date.toString("yyyy-MM-dd"))) try{ val delete = jdbcTemplate.update(""" delete from %s where executionTimeStamp < '%s' """.format(reportsTable,date.toString("yyyy-MM-dd") ) ) + jdbcTemplate.update(""" delete from %s where executionTimeStamp < '%s' """.format(archiveTable,date.toString("yyyy-MM-dd") ) ) jdbcTemplate.execute("vacuum %s".format(reportsTable)) jdbcTemplate.execute("vacuum full %s".format(archiveTable)) Full(delete) } catch { case e: DataAccessException => val msg ="Could not delete entries in the database, cause is " + e.getMessage() logger.error(msg) Failure(msg,Full(e),Empty) } } def getHighestId : Box[Int] = { val query = "select id from RudderSysEvents order by id desc limit 1" try { jdbcTemplate.query(query,IdMapper).toSeq match { case seq if seq.size > 1 => Failure("Too many answer for the highest id in the database") case seq => seq.headOption ?~! "No report where found in database (and so, we can not get highest id)" } } catch { case e:DataAccessException => logger.error("Could not fetch highest id in the database. Reason is : %s".format(e.getMessage())) Failure(e.getMessage()) } } def getLastHundredErrorReports(kinds:List[String]) : Box[Seq[(Reports,Int)]] = { val query = "%s and (%s) order by executiondate desc limit 100".format(idQuery,kinds.map("eventtype='%s'".format(_)).mkString(" or ")) try { Full(jdbcTemplate.query(query,ReportsWithIdMapper).toSeq) } catch { case e:DataAccessException => logger.error("Could not fetch last hundred reports in the database. Reason is : %s".format(e.getMessage())) Failure("Could not fetch last hundred reports in the database. Reason is : %s".format(e.getMessage())) } } def getErrorReportsBeetween(lower : Int, upper:Int,kinds:List[String]) : Box[Seq[Reports]] = { if (lower>=upper) Empty else{ val query = "%s and id between '%d' and '%d' and (%s) order by executiondate asc".format(baseQuery,lower,upper,kinds.map("eventtype='%s'".format(_)).mkString(" or ")) try { Full(jdbcTemplate.query(query,ReportsMapper).toSeq) } catch { case e:DataAccessException => logger.error("Could not fetch reports between ids %d and %d in the database. Reason is : %s".format(lower,upper,e.getMessage())) Failure("Could not fetch reports between ids %d and %d in the database. Reason is : %s".format(lower,upper,e.getMessage())) } } } } object ReportsMapper extends RowMapper[Reports] { def mapRow(rs : ResultSet, rowNum: Int) : Reports = { Reports.factory(new DateTime(rs.getTimestamp("executionDate")), RuleId(rs.getString("ruleId")), DirectiveId(rs.getString("directiveId")), NodeId(rs.getString("nodeId")), rs.getInt("serial"), rs.getString("component"), rs.getString("keyValue"), new DateTime(rs.getTimestamp("executionTimeStamp")), rs.getString("eventType"), rs.getString("msg")) } } object ExecutionTimeMapper extends RowMapper[DateTime] { def mapRow(rs : ResultSet, rowNum: Int) : DateTime = { new DateTime(rs.getTimestamp("executiontimestamp")) } } object DatabaseSizeMapper extends RowMapper[Long] { def mapRow(rs : ResultSet, rowNum: Int) : Long = { rs.getLong("size") } } object IdMapper extends RowMapper[Int] { def mapRow(rs : ResultSet, rowNum: Int) : Int = { rs.getInt("id") } } object ReportsWithIdMapper extends RowMapper[(Reports,Int)] { def mapRow(rs : ResultSet, rowNum: Int) : (Reports,Int) = { (ReportsMapper.mapRow(rs, rowNum),IdMapper.mapRow(rs, rowNum)) } }
jooooooon/rudder
rudder-core/src/main/scala/com/normation/rudder/repository/jdbc/ReportsJdbcRepository.scala
Scala
agpl-3.0
17,171
package co.ledger.wallet.core.wallet.ripple.api import co.ledger.wallet.core.concurrent.AsyncCursor import co.ledger.wallet.core.net.{HttpClient, HttpException} import co.ledger.wallet.core.wallet.ripple._ import co.ledger.wallet.core.wallet.ripple.database.AccountRow import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import java.util.Date import java.text.DateFormat import java.text.SimpleDateFormat import java.util.Locale import exceptions.{FeesException, RippleException} import scala.collection.mutable.ArrayBuffer import scala.scalajs.js import scala.util.Try /** * Created by alix on 4/14/17. */ class ApiAccountRestClient(http: HttpClient, private val accountRow: AccountRow = null ) { def balance(): Future[XRP] = { val request = http.get(s"/accounts/${ accountRow.rippleAccount}/balances?currency=XRP") request.json map { case (json, response) => if (response.statusCode == 500) { XRP.Zero } else { var value = json.getJSONArray("balances").getJSONObject(0) .getString("value") new XRP((BigDecimal(value) * BigDecimal(10).pow(6)).toBigInt()) } } recover { case HttpException(json, response, _) => throw RippleException() case other: Throwable => throw other } } def transactions(init: String): Future[Array[JsonTransaction]] = { val start = new Date(init) val dateLiteral = new js.Date(start.getTime + 1000).toJSON() var transactionsBuffer = ArrayBuffer[JsonTransaction]() def iterate(marker: String = ""): Future[Array[JsonTransaction]] = { var url = s"/accounts/${accountRow.rippleAccount}/transactions?type=Payment" + s"&descending=false&result=tesSUCCESS&start=${dateLiteral}" if (marker != "") { url += s"&marker=$marker" } var request = http.get(url) request.json flatMap { case (json, _) => if (json.getInt("count") > 0) { val txs = json.getJSONArray("transactions") (0 until txs.length()) map { (index: Int) => transactionsBuffer.append(new JsonTransaction(txs.getJSONObject(index))) } } if (json.has("marker")) { iterate(json.getString("marker")) } else { Future.successful(transactionsBuffer.toArray) } } recover { case HttpException(json, response, _) => throw RippleException() case other: Throwable => throw other } } iterate() } def fees(): Future[XRP] = { val request = http.get(s"/network/fees?interval=ledger&limit=1&descending=true") request.json map { case (json, _) => val fees = json.getJSONArray("rows").getJSONObject(0).getDouble("avg") new XRP((BigDecimal(fees) * BigDecimal(10).pow(6)).toBigInt()) } recover { case all => XRP(10) } } def account(address: String): Future[Boolean] = { val request = http.get(s"/accounts/$address") request.json map { case (json, _) => json.getString("result") == "success" } recover { case HttpException(json, response, _) => if (response.statusCode == 404) { false } else { throw RippleException() } case other: Throwable => throw other } } def ledger(): Future[Double] = { val request = http.get(s"/ledgers") request.json map { case (json, _) => json.getJSONObject("ledger").getDouble("ledger_index") } } }
LedgerHQ/ledger-wallet-ripple
src/main/scala/co/ledger/wallet/core/wallet/ripple/api/ApiAccountRestClient.scala
Scala
mit
3,680
package hackerRank.algorithms object CompareTriplets { def main(args: Array[String]) { val sc = new java.util.Scanner(System.in); val a0 = sc.nextInt() val a1 = sc.nextInt() val a2 = sc.nextInt() val b0 = sc.nextInt() val b1 = sc.nextInt() val b2 = sc.nextInt() def givePoint(val1: Int, val2: Int): (Int, Int) = { if (val1 > val2 ) (1, 0) else if (val1 < val2) (0, 1) else (0, 0) } val c = Seq(a0 -> b0, a1 -> b1, a2 -> b2) val points = c.map { case (i, j) => givePoint(i, j) } val summedPoints = points.foldLeft((0, 0)) { case ((accA, accB), (a, b)) => (accA + a, accB + b) } println(s"""${summedPoints._1} ${summedPoints._2}""") } }
cricanr/AlgorithmsHackerRank
src/main/scala/hackerRank/algorithms/CompareTriplets.scala
Scala
mit
735
package org.nikosoft.oanda.bot import akka.actor.{ActorSystem, Props} import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import org.nikosoft.oanda.api.ApiModel.InstrumentModel.CandlestickGranularity import org.nikosoft.oanda.instruments.Model.{StochasticCandleIndicator, _} object Launcher extends App { implicit val actorSystem = ActorSystem("streamer") implicit val materializer = ActorMaterializer() val source = new Chart( indicators = Seq( new MACDCandleCloseIndicator(), new RSICandleCloseIndicator(14), new EMACandleCloseIndicator(50), new EMACandleCloseIndicator(100), new ATRCandleIndicator(14), new CMOCandleCloseIndicator(21), new StochasticCandleIndicator(5, Some(3), Some(3)) ) ).streamCsv("/Users/niko/projects/oanda-trader/data/EURUSD.csv", ";") val sink = Sink.foreach[CandleStick](_ => {}) source.runWith(sink) // val managerActor = actorSystem.actorOf(Props.create(classOf[ManagerActor], chart), "manager-actor") }
cnnickolay/forex-trader
trading-bot/src/main/scala/org/nikosoft/oanda/bot/Launcher.scala
Scala
mit
1,022
package zzz.akka.avionics import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSelection, Props } import akka.pattern._ import akka.util.Timeout import scala.concurrent.Await import scala.concurrent.duration._ import zzz.akka.{ IsolatedResumeSupervisor, IsolatedStopSupervisor, OneForOneSupervisor } class Plane extends Actor with ActorLogging { this: PilotProvider with AltimeterProvider with HeadingIndicatorProvider with LeadFlightAttendantProvider ⇒ import EventSource._ import Altimeter._ import ControlSurfaces._ import Plane._ import Pilot._ import Autopilot._ import zzz.akka.IsolatedLifeCycleSupervisor.{ Started, WaitForStart } lazy val configKey = "zzz.akka.avionics.flightcrew" lazy val config = context.system.settings.config lazy val pilotName = config.getString(s"$configKey.pilotName") lazy val copilotName = config.getString(s"$configKey.copilotName") implicit val askTimeout = Timeout(1.second) val plane = context.self def actorForControls(name: String) = context .actorSelection(s"Equipment/$name") def startEquipment = { val controls = context.actorOf( Props( new IsolatedResumeSupervisor with OneForOneSupervisor { def childStarter = { val alt = context.actorOf(Props(altimeter), "Altimeter") val heading = context.actorOf(Props(headingIndicator), "HeadingIndicator") context.actorOf(Props(autopilot(plane)), "Autopilot") context.actorOf(Props(ControlSurfaces(plane, alt, heading)), "ControlSurfaces") } }), "Equipment") Await.result(controls ? WaitForStart, 1.second) } def actorForPilots(name: String) = context.actorSelection(s"Pilots/$name") def startPeople = { val (plane, controls, autopilot, altimeter, heading) = ( self, actorForControls("ControlSurfaces"), actorForControls("Autopilot"), actorForControls("Altimeter"), actorForControls("HeadingIndicator")) val people = context.actorOf( Props( new IsolatedStopSupervisor with OneForOneSupervisor { def childStarter = { context .actorOf( Props(pilot( plane, autopilot, heading, altimeter)), pilotName) context .actorOf( Props(copilot( plane, autopilot, controls, altimeter)), copilotName) } }), "Pilots") context .actorOf( Props(leadFlightAttendant), config.getString(s"$configKey.leadAttendantName")) Await.result(people ? WaitForStart, 1.second) } def receive = { case AltitudeUpdate(altitudeInFeet) ⇒ log debug (s"Altitude is now: $altitudeInFeet") case GiveMeControl ⇒ log info ("Plane giving control.") sender ! Controls(actorForControls("ControlSurfaces")) case RequestCopilot ⇒ CopilotSelection(actorForPilots(s"$copilotName")) case CopilotIdentified ⇒ log debug s"Autopilot has identified the copilot" case PilotIdentified ⇒ log debug s"Copilot has identified the pilot" case LostControl ⇒ actorForControls("Autopilot") ! TakeControl } override def preStart() { startEquipment startPeople actorForControls("Altimeter") ! RegisterListener(self) actorForPilots(pilotName) :: actorForPilots(copilotName) :: Nil foreach { _ ! ReadyToGo } } } object Plane { case object GiveMeControl case object RequestCopilot case object CopilotIdentified case object PilotIdentified case object LostControl case class Controls(controls: ActorSelection) def apply() = new Plane with PilotProvider with AltimeterProvider with HeadingIndicatorProvider with LeadFlightAttendantProvider } trait PlaneProviderComponent { def plane: ActorRef }
jackcviers/learning-akka
src/main/scala/zzz/akka/avionics/Plane.scala
Scala
apache-2.0
3,947
package spotlight.train import akka.NotUsed import scala.concurrent.duration.FiniteDuration import scala.concurrent.{Future, Promise} import akka.actor.ActorSystem import akka.stream.scaladsl.Flow import scalaz.{-\\/, \\/-} import scalaz.concurrent.Task import spotlight.model.timeseries.{TimeSeries, TimeSeriesBase, TimeSeriesCohort} import spotlight.model.timeseries.TimeSeriesBase.Merging /** * Created by rolfsd on 12/7/15. */ object TrainOutlierAnalysis { def taskToFuture[A]( task: Task[A] ): Future[A] = { val p = Promise[A]() task.unsafePerformAsync { case \\/-(a) => p success a case -\\/(ex) => p failure ex } p.future } def feedTrainingFlow[T <: TimeSeriesBase]( interpreter: TrainingRepository.Interpreter, maxPoints: Int, batchingWindow: FiniteDuration )( implicit system: ActorSystem, merging: Merging[T] ): Flow[T, T, NotUsed] = { Flow[T] .groupedWithin( maxPoints, batchingWindow ) // batch points before archiving .map { _.groupBy{ _.topic } .map { case (topic, tss) => tss.tail.foldLeft( tss.head ){ (acc, e) => merging.merge( acc, e ) valueOr { exs => throw exs.head } } } } .mapConcat { identity } .map { ts => val protocol = ts match { case s: TimeSeries => TrainingRepository putSeries s case c: TimeSeriesCohort => TrainingRepository putCohort c } ( protocol, ts ) } .map { case (p, ts) => interpreter( p ).map{ _ => ts }.unsafePerformSync } // avro's DataFileWriter is not thread safe } }
dmrolfs/spotlight
sandbox/src/main/scala/spotlight/train/TrainOutlierAnalysis.scala
Scala
mit
1,573
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.plan.nodes.exec import org.apache.flink.streaming.api.transformations.StreamTransformation import org.apache.flink.table.api.TableEnvironment import org.apache.flink.table.plan.nodes.physical.FlinkPhysicalRel import java.util /** * The representation of execution information for a [[FlinkPhysicalRel]]. * * @tparam E The TableEnvironment * @tparam T The type of the elements that result from this [[StreamTransformation]] */ trait ExecNode[E <: TableEnvironment, T] { /** * The [[StreamTransformation]] translated from this node. */ private var transformation: StreamTransformation[T] = _ /** * Translates this node into a Flink operator. * * <p>NOTE: returns same translate result if called multiple times. * * @param tableEnv The [[TableEnvironment]] of the translated Table. */ def translateToPlan(tableEnv: E): StreamTransformation[T] = { if (transformation == null) { transformation = translateToPlanInternal(tableEnv) } transformation } /** * Internal method, translates this node into a Flink operator. * * @param tableEnv The [[TableEnvironment]] of the translated Table. */ protected def translateToPlanInternal(tableEnv: E): StreamTransformation[T] /** * Returns an array of this node's inputs. If there are no inputs, * returns an empty list, not null. * * @return Array of this node's inputs */ def getInputNodes: util.List[ExecNode[E, _]] /** * Accepts a visit from a [[ExecNodeVisitor]]. * * @param visitor ExecNodeVisitor */ def accept(visitor: ExecNodeVisitor): Unit = { visitor.visit(this) } }
ueshin/apache-flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/exec/ExecNode.scala
Scala
apache-2.0
2,499
/* * LabelExpandedImpl.scala * (LucreSwing) * * Copyright (c) 2014-2021 Hanns Holger Rutz. All rights reserved. * * This software is published under the GNU Affero General Public License v3+ * * * For further information, please contact Hanns Holger Rutz at * [email protected] */ package de.sciss.lucre.swing package graph package impl import de.sciss.lucre.expr.Context import de.sciss.lucre.expr.graph.Ex import de.sciss.lucre.swing.LucreSwing.deferTx import de.sciss.lucre.swing.graph.Label.{defaultHAlign, defaultVAlign, keyHAlign, keyVAlign} import de.sciss.lucre.swing.impl.ComponentHolder import de.sciss.lucre.{Disposable, Txn} import scala.swing.{Swing, Label => Peer} final class LabelExpandedImpl[T <: Txn[T]](protected val peer: Label) extends View[T] with ComponentHolder[Peer] with ComponentExpandedImpl[T] { type C = View.Component private[this] var obs: Disposable[T] = _ override def initComponent()(implicit tx: T, ctx: Context[T]): this.type = { val text = peer.text.expand[T] val text0 = text.value val hAlign = ctx.getProperty[Ex[Int]](peer, keyHAlign).fold(defaultHAlign)(_.expand[T].value) val vAlign = ctx.getProperty[Ex[Int]](peer, keyVAlign).fold(defaultVAlign)(_.expand[T].value) deferTx { val hAlignSwing = hAlign match { case Align.Left => scala.swing.Alignment.Left case Align.Center => scala.swing.Alignment.Center case Align.Right => scala.swing.Alignment.Right case Align.Trailing => scala.swing.Alignment.Trailing case _ => scala.swing.Alignment.Leading } // N.B. Scala Swing uses divergent default horizontal alignment of Center instead of Java Swing (CENTER) val c = new Peer(text0, Swing.EmptyIcon, hAlignSwing) if (vAlign != defaultVAlign) { c.verticalAlignment = vAlign match { case Align.Top => scala.swing.Alignment.Top case Align.Bottom => scala.swing.Alignment.Bottom case _ => scala.swing.Alignment.Center } } component = c } obs = text.changed.react { implicit tx => ch => deferTx { component.text = ch.now } } super.initComponent() } override def dispose()(implicit tx: T): Unit = { obs.dispose() super.dispose() } }
Sciss/LucreSwing
jvm/src/main/scala/de/sciss/lucre/swing/graph/impl/LabelExpandedImpl.scala
Scala
agpl-3.0
2,342
// Copyright (C) 2016 MapRoulette contributors (see CONTRIBUTORS.md). // Licensed under the Apache License, Version 2.0 (see LICENSE). package org.maproulette.provider import play.api.libs.mailer._ import java.io.File import org.apache.commons.mail.EmailAttachment import javax.inject.{Inject, Singleton} import org.maproulette.Config import org.maproulette.models.{UserNotification, UserNotificationEmail} import scala.concurrent.{Future} /** * @author nrotstan * * TODO: internationalize these messages and move them out into templates */ @Singleton class EmailProvider @Inject() (mailerClient: MailerClient, config: Config) { import scala.concurrent.ExecutionContext.Implicits.global def emailNotification(toAddress: String, notification: UserNotificationEmail) = { val notificationName = UserNotification.notificationTypeMap.get(notification.notificationType).get val emailSubject = s"New MapRoulette notification: ${notificationName}" val notificationDetails = notification.extra match { case Some(details) => s"\\n${details}" case None => "" } val emailBody = s""" |You have received a new MapRoulette notification: | |${notificationName} |${notificationDetails} |${this.notificationFooter}""".stripMargin val email = Email(emailSubject, config.getEmailFrom.get, Seq(toAddress), bodyText = Some(emailBody)) mailerClient.send(email) } def emailNotificationDigest(toAddress: String, notifications: List[UserNotificationEmail]) = { val notificationNames = notifications.map(notification => UserNotification.notificationTypeMap.get(notification.notificationType).get ) val notificationNameCounts = notificationNames.groupBy(identity).view.mapValues(_.size) val notificationLines = notificationNameCounts.foldLeft("") { (s: String, pair: (String, Int)) => s + pair._1 + " (" + pair._2 + ")\\n" } val emailSubject = s"MapRoulette Notifications Daily Digest" val emailBody = s""" |You have received new MapRoulette notifications over the past day: | |${notificationLines}${this.notificationFooter}""".stripMargin val email = Email(emailSubject, config.getEmailFrom.get, Seq(toAddress), bodyText = Some(emailBody)) mailerClient.send(email) } private def notificationFooter: String = { val urlPrefix = config.getPublicOrigin.get s""" |You can view your notifications by visiting your MapRoulette Inbox at: |${urlPrefix}/inbox | |Happy mapping! |--The MapRoulette Team | | |P.S. You received this because you asked to be emailed when you |received this type of notification in MapRoulette. You can manage |your notification subscriptions and email preferences at: |${urlPrefix}/profile""".stripMargin } }
Crashfreak/maproulette2
app/org/maproulette/provider/EmailProvider.scala
Scala
apache-2.0
2,878
package scodec.msgpack package codecs import scodec.bits.ByteVector class MessagePackCodecSpec extends TestSuite { implicit val codec: scodec.Codec[MessagePack] = MessagePackCodec "nil" should "be able to encode and decode" in { roundtrip[MessagePack](MNil) } "bool" should "be able to encode and decode" in { roundtrip[MessagePack](MTrue) roundtrip[MessagePack](MFalse) } "positive fix int" should "be able to encode and decode" in { roundtrip[MessagePack](MPositiveFixInt(0)) roundtrip[MessagePack](MPositiveFixInt(127)) } "uint8" should "be able to encode and decode" in { roundtrip[MessagePack](MUInt8(0)) roundtrip[MessagePack](MUInt8(255)) } "uint16" should "be able to encode and decode" in { roundtrip[MessagePack](MUInt16(0)) roundtrip[MessagePack](MUInt16(65535)) } "uint32" should "be able to encode and decode" in { roundtrip[MessagePack](MUInt32(0)) roundtrip[MessagePack](MUInt32(4294967295L)) } "negative fix int" should "be able to encode and decode" in { roundtrip[MessagePack](MNegativeFixInt(-32)) roundtrip[MessagePack](MNegativeFixInt(-1)) } "int8" should "be able to encode and decode" in { roundtrip[MessagePack](MInt8(-128)) roundtrip[MessagePack](MInt8(127)) } "int16" should "be able to encode and decode" in { roundtrip[MessagePack](MInt16(-32768)) roundtrip[MessagePack](MInt16(32767)) } "int32" should "be able to encode and decode" in { roundtrip[MessagePack](MInt32(Int.MinValue)) roundtrip[MessagePack](MInt32(Int.MaxValue)) } "int64" should "be able to encode and decode" in { roundtrip[MessagePack](MInt64(Long.MinValue)) roundtrip[MessagePack](MInt64(Long.MaxValue)) } "float32" should "be able to encode and decode" in { roundtrip[MessagePack](MFloat32(Float.MinValue)) roundtrip[MessagePack](MFloat32(Float.MaxValue)) } "float64" should "be able to encode and decode" in { roundtrip[MessagePack](MFloat64(Double.MinValue)) roundtrip[MessagePack](MFloat64(Double.MaxValue)) } "fix str" should "be able to encode and decode" in { roundtrip[MessagePack](MFixString("")) roundtrip[MessagePack](MFixString("a" * 10)) } "str8" should "be able to encode and decode" in { roundtrip[MessagePack](MString8("")) roundtrip[MessagePack](MString8("a" * 255)) } "str16" should "be able to encode and decode" in { roundtrip[MessagePack](MString16("")) roundtrip[MessagePack](MString16("a" * 65535)) } "str32" should "be able to encode and decode" in { roundtrip[MessagePack](MString32("")) roundtrip[MessagePack](MString32("a" * Long.MaxValue.toInt)) } "bin8" should "be able to encode and decode" in { roundtrip[MessagePack](MBinary8(ByteVector.empty)) roundtrip[MessagePack](MBinary8(ByteVector(0xa0))) } "bin16" should "be able to encode and decode" in { roundtrip[MessagePack](MBinary16(ByteVector.empty)) roundtrip[MessagePack](MBinary16(ByteVector(0xff))) } "bin32" should "be able to encode and decode" in { roundtrip[MessagePack](MBinary32(ByteVector.empty)) roundtrip[MessagePack](MBinary32(ByteVector(0x11))) } "fix array" should "be able to encode and decode" in { roundtrip[MessagePack](MFixArray(Vector.empty[MessagePack])) roundtrip[MessagePack](MFixArray(Vector(MInt8(127)))) } "array16" should "be able to encode and decode" in { roundtrip[MessagePack](MArray16(Vector.empty[MessagePack])) roundtrip[MessagePack](MArray16(Vector(MInt8(127)))) } "array32" should "be able to encode and decode" in { roundtrip[MessagePack](MArray32(Vector.empty[MessagePack])) roundtrip[MessagePack](MArray32(Vector(MInt8(127)))) } "fix map" should "be able to encode and decode" in { roundtrip[MessagePack](MFixMap(Map.empty)) roundtrip[MessagePack](MFixMap(Map(MFixString("a") -> MInt8(1)))) } "map16" should "be able to encode and decode" in { roundtrip[MessagePack](MMap16(Map.empty)) roundtrip[MessagePack](MMap16(Map(MFixString("a") -> MInt8(1)))) } "map32" should "be able to encode and decode" in { roundtrip[MessagePack](MMap32(Map.empty)) roundtrip[MessagePack](MMap32(Map(MFixString("a") -> MInt8(1)))) } }
xuwei-k/scodec-msgpack
shared/src/test/scala/scodec/msgpack/codecs/MessagePackCodecSpec.scala
Scala
mit
4,274
sealed trait IntegralNumber sealed trait FiniteNumber extends IntegralNumber object IntegralNumber { sealed abstract class BaseNumber extends IntegralNumber sealed abstract class NonFinite extends BaseNumber object NaN extends NonFinite sealed abstract class FiniteNumberImpl[N](val value: N) extends BaseNumber with FiniteNumber sealed class IntNumber(value: Int) extends FiniteNumberImpl[Int](value) def test(t: IntNumber, o: IntegralNumber) = o match { case NaN => -1 case o: IntNumber => t.value.compare(o.value) } }
som-snytt/dotty
tests/patmat/t7437.scala
Scala
apache-2.0
546
/* * Copyright 2012 Twitter Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.twitter.zipkin.web import com.twitter.finagle.tracing.SpanId import com.twitter.zipkin.common.{Endpoint, Span, Trace} case class SpanTimestamp(name: String, startTs: Long, endTs: Long) { def duration = endTs - startTs } object TraceSummary { /** * Return a summary of this trace or none if we * cannot construct a trace summary. Could be that we have no spans. */ def apply(t: Trace): Option[TraceSummary] = { for (traceId <- t.id; startEnd <- t.getStartAndEndTimestamp) yield TraceSummary( SpanId(traceId).toString, startEnd.start, startEnd.end, (startEnd.end - startEnd.start).toInt, spanTimestamps(t.spans), t.endpoints.toList) } /** * Returns a map of services to a list of their durations */ def spanTimestamps(spans: Seq[Span]): List[SpanTimestamp] = { for { span <- spans.toList serviceName <- span.serviceNames first <- span.firstAnnotation last <- span.lastAnnotation } yield SpanTimestamp(serviceName, first.timestamp, last.timestamp) } } /** * json-friendly representation of a trace summary * * @param traceId id of this trace * @param startTs when did the trace start? * @param endTs when did the trace end? * @param durationMicro how long did the traced operation take? * @param endpoints endpoints involved in the traced operation */ case class TraceSummary( traceId: String, startTs: Long, endTs: Long, durationMicro: Int, spanTimestamps: List[SpanTimestamp], endpoints: List[Endpoint])
jfeltesse-mdsol/zipkin
zipkin-web/src/main/scala/com/twitter/zipkin/web/TraceSummary.scala
Scala
apache-2.0
2,151
package mljoin import java.util.concurrent.atomic.AtomicLong import java.util.ArrayList import java.util.concurrent.atomic.AtomicBoolean import org.apache.spark.rdd._ object Statistics { val serializeTime = new AtomicLong val deserializeTime = new AtomicLong val serialized_B_i = new AtomicLong val serialized_simulated_local_B_i = new AtomicLong val serialized_local_B_i = new AtomicLong val seeding = new AtomicLong val prepareParameters = new AtomicLong val doSparkSQLJoinNCoGroup = new AtomicLong val groupByKeyFlatMapApplication = new AtomicLong val modelProcessTime = new AtomicLong val dataProcessTime = new AtomicLong val numSerialization = new AtomicLong val numDeSerialization = new AtomicLong // Assumption no parallel joinNCoGroup @volatile var isFetched = false def get(): ArrayList[Long] = { val ret:ArrayList[Long] = new ArrayList[Long] Statistics.synchronized { if(!isFetched) { isFetched = true ret.add(serializeTime.get) ret.add(deserializeTime.get) ret.add(serialized_B_i.get) ret.add(serialized_simulated_local_B_i.get) ret.add(serialized_local_B_i.get) ret.add(seeding.get) ret.add(prepareParameters.get) ret.add(doSparkSQLJoinNCoGroup.get) ret.add(groupByKeyFlatMapApplication.get) ret.add(modelProcessTime.get) ret.add(dataProcessTime.get) ret.add(numSerialization.get) ret.add(numDeSerialization.get) serializeTime.set(0) deserializeTime.set(0) serialized_B_i.set(0) serialized_simulated_local_B_i.set(0) serialized_local_B_i.set(0) seeding.set(0) prepareParameters.set(0) doSparkSQLJoinNCoGroup.set(0) groupByKeyFlatMapApplication.set(0) modelProcessTime.set(0) dataProcessTime.set(0) numSerialization.set(0) numDeSerialization.set(0) } } ret } def printStatistics(X:RDD[Data2]): Unit = { val stats = X.map(x => Statistics.get()).filter(_.size() > 0).reduce((x, y) => { val ret11 = new ArrayList[Long] for(i <- 0 until y.size()) { ret11.add(y.get(i) + x.get(i)) } ret11 }) // --------------------------------------------------------------------------------------------------- System.out.println("The statistics for this run are:") System.out.print("Serialization time: " + stats.get(0)*(1e-9) + " sec.\\n") System.out.print("Deserialization time: " + stats.get(1)*(1e-9) + " sec.\\n") System.out.print("serialized_B_i time: " + stats.get(2)*(1e-9) + " sec.\\n") System.out.print("serialized_simulated_local_B_i time: " + stats.get(3)*(1e-9) + " sec.\\n") System.out.print("serialized_local_B_i time: " + stats.get(4)*(1e-9) + " sec.\\n") System.out.print("seeding time: " + stats.get(5)*(1e-9) + " sec.\\n") System.out.print("prepareParameters time: " + stats.get(6)*(1e-9) + " sec.\\n") System.out.print("doSparkSQLJoinNCoGroup time: " + stats.get(7)*(1e-9) + " sec.\\n") System.out.print("groupByKeyFlatMapApplication time: " + stats.get(8)*(1e-9) + " sec.\\n") System.out.print("Model processing time: " + stats.get(9)*(1e-9) + " sec.\\n") System.out.print("Data processing time: " + stats.get(10)*(1e-9) + " sec.\\n") System.out.print("Serialization count: " + stats.get(11) + " \\n") System.out.print("Deserialization count: " + stats.get(12) + " \\n") // --------------------------------------------------------------------------------------------------- } }
niketanpansare/mljoin
src/main/scala/mljoin/Statistics.scala
Scala
apache-2.0
3,642
// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. package ducttape.exec import ducttape.util.BashException import ducttape.util.Shell import ducttape.util.Files import ducttape.syntax.Namespace import ducttape.syntax.AbstractSyntaxTree.PackageDef import ducttape.syntax.AbstractSyntaxTree.LiteralSpec import grizzled.slf4j.Logging object PackageBuilder { def isBuildSuccessful(buildEnv: BuildEnvironment): Boolean = CompletionChecker.isExitCodeZero(buildEnv.buildExitCodeFile) } /** * If we determine that a package is out of date (the requested version is not * already built within ducttape), then we use this PackageBuilder to build * a newly checked-out copy. */ class PackageBuilder(dirs: DirectoryArchitect, packageVersions: PackageVersioner) extends Logging { def build(packages: Iterable[PackageDef]) { for (myPackage: PackageDef <- packages) { val packageNamespace: Namespace = myPackage.name // may contain slash delimited namespace val packageName: String = packageNamespace.toString val version: String = packageVersions(packageNamespace) val buildEnv = new BuildEnvironment(dirs, version, packageNamespace) // TODO: XXX: Can build ever interfere with another running workflow? if (buildEnv.buildDir.exists) { System.err.println(s"Removing incomplete package build: ${buildEnv.buildDir.toString}") Files.deleteDir(buildEnv.buildDir) } System.err.println(s"Checking out tool ${packageName} into ${buildEnv.buildDir}") packageVersions.checkout(myPackage, buildEnv.buildDir) // TODO: Check when the build code changes System.err.println(s"Building tool ${packageName} in ${buildEnv.buildDir}") val buildCmds = Seq(myPackage.commands.toString) // package params have already been checked to be literal val env: Seq[(String, String)] = myPackage.params.filter(!_.dotVariable).map(_.asInstanceOf[LiteralSpec]).map { spec => (spec.name, spec.rval.value) } val stdPrefix = "build " + packageName val exitCode = Shell.run(buildCmds, stdPrefix, buildEnv.buildDir, env, buildEnv.buildStdoutFile, buildEnv.buildStderrFile) Files.write(s"${exitCode}", buildEnv.buildExitCodeFile) if (exitCode != 0) { // just bail out, this workflow is doomed without its tools throw new BashException(s"Build task ${packageName} returned ${exitCode}") } packageVersions.writeHeadVersion(myPackage, version) } } }
jhclark/ducttape
src/main/scala/ducttape/exec/PackageBuilder.scala
Scala
mpl-2.0
2,707
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import org.apache.spark.SparkEnv import org.apache.spark.internal.Logging object CachedPartitionedFilePreferredLocs extends Logging{ private var externalDBClient: ExternalDBClient = null def getPreferredLocsByCache(files: Array[PartitionedFile]): Seq[String] = { if (null == externalDBClient) { externalDBClient = ExternalDBClientFactory.getDBClientInstance(SparkEnv.get) } var preferredLocs = new ArrayBuffer[String] val hostToCachedBytes = mutable.HashMap.empty[String, Long] files.foreach { file => val cacheMetaInfoValueArr = externalDBClient.get(file.filePath, file.start, file.length) if (cacheMetaInfoValueArr.size > 0) { // host<-> cachedBytes cacheMetaInfoValueArr.foreach(x => { hostToCachedBytes.put(x.host, hostToCachedBytes.getOrElse(x.host, 0L) + x.length) }) } } // TODO if cachedBytes <<< hdfsPreferLocBytes hostToCachedBytes.toSeq.sortWith(_._2 > _._2).take(3).foreach(x => preferredLocs.+=(x._1)) preferredLocs.take(3) } }
Intel-bigdata/OAP
oap-cache/oap/src/main/scala/org/apache/spark/sql/execution/datasources/CachedPartitionedFilePreferredLocs.scala
Scala
apache-2.0
1,980
package io.github.mandar2812.dynaml.optimization import breeze.linalg.{DenseMatrix, DenseVector, norm} import org.scalatest.{FlatSpec, Matchers} /** * Created by mandar on 5/7/16. */ class ConjugateGradientSpec extends FlatSpec with Matchers { "Conjugate Gradient " should "be able to solve linear systems "+ "of the form A.x = b, where A is symmetric positive definite. " in { val A = DenseMatrix((1.0, 0.0, 0.0), (0.0, 2.0, 0.0), (0.0, 0.0, 4.0)) val b = DenseVector(2.0, 4.0, 8.0) val x = DenseVector(2.0, 2.0, 2.0) val epsilon = 1E-6 val xnew = ConjugateGradient.runCG(A, b, DenseVector(1.0, 1.0, 1.0), epsilon, MAX_ITERATIONS = 3) assert(norm(xnew-x) <= epsilon) } }
transcendent-ai-labs/DynaML
dynaml-core/src/test/scala/io/github/mandar2812/dynaml/optimization/ConjugateGradientSpec.scala
Scala
apache-2.0
717
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.metric import org.apache.spark.{Accumulable, AccumulableParam, SparkContext} /** * Create a layer for specialized metric. We cannot add `@specialized` to * `Accumulable/AccumulableParam` because it will break Java source compatibility. * * An implementation of SQLMetric should override `+=` and `add` to avoid boxing. */ private[sql] abstract class SQLMetric[R <: SQLMetricValue[T], T]( name: String, val param: SQLMetricParam[R, T]) extends Accumulable[R, T](param.zero, param, Some(name), true) /** * Create a layer for specialized metric. We cannot add `@specialized` to * `Accumulable/AccumulableParam` because it will break Java source compatibility. */ private[sql] trait SQLMetricParam[R <: SQLMetricValue[T], T] extends AccumulableParam[R, T] { def zero: R } /** * Create a layer for specialized metric. We cannot add `@specialized` to * `Accumulable/AccumulableParam` because it will break Java source compatibility. */ private[sql] trait SQLMetricValue[T] extends Serializable { def value: T override def toString: String = value.toString } /** * A wrapper of Long to avoid boxing and unboxing when using Accumulator */ private[sql] class LongSQLMetricValue(private var _value : Long) extends SQLMetricValue[Long] { def add(incr: Long): LongSQLMetricValue = { _value += incr this } // Although there is a boxing here, it's fine because it's only called in SQLListener override def value: Long = _value } /** * A wrapper of Int to avoid boxing and unboxing when using Accumulator */ private[sql] class IntSQLMetricValue(private var _value: Int) extends SQLMetricValue[Int] { def add(term: Int): IntSQLMetricValue = { _value += term this } // Although there is a boxing here, it's fine because it's only called in SQLListener override def value: Int = _value } /** * A specialized long Accumulable to avoid boxing and unboxing when using Accumulator's * `+=` and `add`. */ private[sql] class LongSQLMetric private[metric](name: String) extends SQLMetric[LongSQLMetricValue, Long](name, LongSQLMetricParam) { override def +=(term: Long): Unit = { localValue.add(term) } override def add(term: Long): Unit = { localValue.add(term) } } /** * A specialized int Accumulable to avoid boxing and unboxing when using Accumulator's * `+=` and `add`. */ private[sql] class IntSQLMetric private[metric](name: String) extends SQLMetric[IntSQLMetricValue, Int](name, IntSQLMetricParam) { override def +=(term: Int): Unit = { localValue.add(term) } override def add(term: Int): Unit = { localValue.add(term) } } private object LongSQLMetricParam extends SQLMetricParam[LongSQLMetricValue, Long] { override def addAccumulator(r: LongSQLMetricValue, t: Long): LongSQLMetricValue = r.add(t) override def addInPlace(r1: LongSQLMetricValue, r2: LongSQLMetricValue): LongSQLMetricValue = r1.add(r2.value) override def zero(initialValue: LongSQLMetricValue): LongSQLMetricValue = zero override def zero: LongSQLMetricValue = new LongSQLMetricValue(0L) } private object IntSQLMetricParam extends SQLMetricParam[IntSQLMetricValue, Int] { override def addAccumulator(r: IntSQLMetricValue, t: Int): IntSQLMetricValue = r.add(t) override def addInPlace(r1: IntSQLMetricValue, r2: IntSQLMetricValue): IntSQLMetricValue = r1.add(r2.value) override def zero(initialValue: IntSQLMetricValue): IntSQLMetricValue = zero override def zero: IntSQLMetricValue = new IntSQLMetricValue(0) } private[sql] object SQLMetrics { def createIntMetric(sc: SparkContext, name: String): IntSQLMetric = { val acc = new IntSQLMetric(name) sc.cleaner.foreach(_.registerAccumulatorForCleanup(acc)) acc } def createLongMetric(sc: SparkContext, name: String): LongSQLMetric = { val acc = new LongSQLMetric(name) sc.cleaner.foreach(_.registerAccumulatorForCleanup(acc)) acc } /** * A metric that its value will be ignored. Use this one when we need a metric parameter but don't * care about the value. */ val nullLongMetric = new LongSQLMetric("null") }
andrewor14/iolap
sql/core/src/main/scala/org/apache/spark/sql/metric/SQLMetrics.scala
Scala
apache-2.0
4,928
package com.equalinformation.scala.programs.list /** * Created by bpupadhyaya on 6/21/16. */ object SimpleListApp_AI_1 { def main(args: Array[String]): Unit = { val paragraph = scala.io.StdIn.readLine("Please enter three word English sentences: ") val words = paragraph.split(" ") val sentences = Nil val currentSentence = Nil for(word <- words) { if(word.endsWith(".")) { word.stripSuffix(".") //TODO list concatenation with constant time, do not use ListBuffer // currentSentence = word :: currentSentence currentSentence.reverse // sentences = currentSentence :: sentences } else { // currentSentence = word :: currentSentence } } sentences.reverse //TODO now each sentence is a list of words, apply grammar rules for analysis } }
bpupadhyaya/scala-programs-collection
scala-programs-collection/src/main/scala/com/equalinformation/scala/programs/list/SimpleListApp_AI_1.scala
Scala
apache-2.0
845
package animal /** * @author vitchyr */ import vearth.World import vearth.animal.Ant /** Eye create a 1D image of the world. */ class Eye(protected val world: World, protected val ant: Ant) extends Sensor { def getInput = { } }
vpong/vearth
src/main/scala/animal/Eye.scala
Scala
mit
240
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.admin import joptsimple.OptionParser import kafka.utils._ import org.I0Itec.zkclient.ZkClient import org.I0Itec.zkclient.exception.ZkNodeExistsException import kafka.common.{TopicAndPartition, AdminCommandFailedException} object ReassignPartitionsCommand extends Logging { def main(args: Array[String]): Unit = { val parser = new OptionParser val jsonFileOpt = parser.accepts("path-to-json-file", "REQUIRED: The JSON file with the list of partitions and the " + "new replicas they should be reassigned to in the following format - \\n" + "{\\"partitions\\":\\n\\t[{\\"topic\\": \\"foo\\",\\n\\t \\"partition\\": 1,\\n\\t \\"replicas\\": [1,2,3] }]\\n}") .withRequiredArg .describedAs("partition reassignment json file path") .ofType(classOf[String]) val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the " + "form host:port. Multiple URLS can be given to allow fail-over.") .withRequiredArg .describedAs("urls") .ofType(classOf[String]) val options = parser.parse(args : _*) for(arg <- List(jsonFileOpt, zkConnectOpt)) { if(!options.has(arg)) { System.err.println("Missing required argument \\"" + arg + "\\"") parser.printHelpOn(System.err) System.exit(1) } } val jsonFile = options.valueOf(jsonFileOpt) val zkConnect = options.valueOf(zkConnectOpt) val jsonString = Utils.readFileAsString(jsonFile) var zkClient: ZkClient = null try { // read the json file into a string val partitionsToBeReassigned = ZkUtils.parsePartitionReassignmentData(jsonString) if (partitionsToBeReassigned.isEmpty) throw new AdminCommandFailedException("Partition reassignment data file %s is empty".format(jsonFile)) zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer) val reassignPartitionsCommand = new ReassignPartitionsCommand(zkClient, partitionsToBeReassigned) if(reassignPartitionsCommand.reassignPartitions()) println("Successfully started reassignment of partitions %s".format(partitionsToBeReassigned)) else println("Failed to reassign partitions %s".format(partitionsToBeReassigned)) } catch { case e => println("Partitions reassignment failed due to " + e.getMessage) println(Utils.stackTrace(e)) } finally { if (zkClient != null) zkClient.close() } } } class ReassignPartitionsCommand(zkClient: ZkClient, partitions: collection.Map[TopicAndPartition, collection.Seq[Int]]) extends Logging { def reassignPartitions(): Boolean = { try { val validPartitions = partitions.filter(p => validatePartition(zkClient, p._1.topic, p._1.partition)) val jsonReassignmentData = ZkUtils.getPartitionReassignmentZkData(validPartitions) ZkUtils.createPersistentPath(zkClient, ZkUtils.ReassignPartitionsPath, jsonReassignmentData) true } catch { case ze: ZkNodeExistsException => val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient) throw new AdminCommandFailedException("Partition reassignment currently in " + "progress for %s. Aborting operation".format(partitionsBeingReassigned)) case e => error("Admin command failed", e); false } } def validatePartition(zkClient: ZkClient, topic: String, partition: Int): Boolean = { // check if partition exists val partitionsOpt = ZkUtils.getPartitionsForTopics(zkClient, List(topic)).get(topic) partitionsOpt match { case Some(partitions) => if(partitions.contains(partition)) { true }else{ error("Skipping reassignment of partition [%s,%d] ".format(topic, partition) + "since it doesn't exist") false } case None => error("Skipping reassignment of partition " + "[%s,%d] since topic %s doesn't exist".format(topic, partition, topic)) false } } } sealed trait ReassignmentStatus { def status: Int } case object ReassignmentCompleted extends ReassignmentStatus { val status = 1 } case object ReassignmentInProgress extends ReassignmentStatus { val status = 0 } case object ReassignmentFailed extends ReassignmentStatus { val status = -1 }
kavink92/kafka-0.8.0-beta1-src
core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
Scala
apache-2.0
5,116
package iomonad import language.higherKinds import language.implicitConversions /** * Created by ariwaranosai on 16/8/27. * */ trait Functor[F[_]] { self => def map[A, B](a: F[A])(f: A => B): F[B] } trait Applicative[F[_]] extends Functor[F] { self => def unit[A](a: => A): F[A] def apply[A, B](f: F[A => B])(fa: F[A]): F[B] override def map[A, B](a: F[A])(f: (A) => B): F[B] = apply(unit[A => B](f))(a) } trait Monad[F[_]] extends Applicative[F] { def flatMap[A, B](a: F[A])(f: A => F[B]): F[B] override def apply[A, B](f: F[(A) => B])(fa: F[A]): F[B] = flatMap(fa)(x => map(f)(y => y(x))) override def map[A, B](a: F[A])(f: (A) => B): F[B] = flatMap(a)(x => unit(f(x))) def map2[A, B, C](a: F[A], b: F[B])(f: (A, B) => C): F[C] = flatMap(a)(x => map(b)(y => f(x, y))) implicit def toMonadic[A](a: F[A]): Monadic[F, A] = new Monadic[F, A] { val F = Monad.this def get = a } def sequence_[A](fs: Stream[F[A]]): F[Unit] = foreachM(fs)(skip) def sequence_[A](fs: F[A]*): F[Unit] = sequence_(fs.toStream) def replicateM[A](n: Int)(f: F[A]): F[List[A]] = Stream.fill(n)(f).foldRight(unit(List[A]()))(map2(_, _)(_ :: _)) def replicateM_[A](n: Int)(f: F[A]): F[Unit] = foreachM(Stream.fill(n)(f))(skip) def as[A, B](a: F[A])(b: B): F[B] = map(a)(_ => b) def skip[A](a: F[A]): F[Unit] = as(a)(()) def when[A](b: Boolean)(fa: => F[A]): F[Boolean] = if (b) as(fa)(true) else unit(false) def forever[A, B](a: F[A]): F[B] = { lazy val t: F[B] = a flatMap (_ => t) t } def while_(a: F[Boolean])(b: F[Unit]): F[Unit] = { lazy val t: F[Unit] = while_(a)(b) a flatMap (c => skip(when(c)(t))) } def doWhile[A](a: F[A])(cond: A => F[Boolean]): F[Unit] = for { a1 <- a ok <- cond(a1) _ <- if (ok) doWhile(a)(cond) else unit(()) } yield () def foldM[A, B](l: Stream[A])(z: B)(f: (B, A) => F[B]): F[B] = l match { case h #:: t => f(z, h) flatMap (z2 => foldM(t)(z2)(f)) case _ => unit(z) } def foldM_[A, B](l: Stream[A])(z: B)(f: (B, A) => F[B]): F[Unit] = skip { foldM(l)(z)(f) } def foreachM[A](l: Stream[A])(f: A => F[Unit]): F[Unit] = foldM_(l)(())((u, a) => skip(f(a))) def seq[A, B, C](f: A => F[B])(g: B => F[C]): A => F[C] = f andThen (fb => flatMap(fb)(g)) } trait Monadic[F[_], A] { val F: Monad[F] def get: F[A] private val a = get def map[B](f: A => B): F[B] = F.map(a)(f) def flatMap[B](f: A => F[B]): F[B] = F.flatMap(a)(f) def **[B](b: F[B]) = F.map2(a, b)((_, _)) def *>[B](b: F[B]) = F.map2(a, b)((_, b) => b) def map2[B, C](b: F[B])(f: (A, B) => C): F[C] = F.map2(a, b)(f) def as[B](b: B): F[B] = F.as(a)(b) def skip: F[Unit] = F.skip(a) def replicateM(n: Int) = F.replicateM(n)(a) def replicateM_(n: Int) = F.replicateM_(n)(a) }
ariwaranosai/FPinScala
src/main/scala/iomonad/monad.scala
Scala
mit
2,866
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600.v2 import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger} case class B10(value: Int) extends CtBoxIdentifier(name = "Income from which income tax has been deducted") with CtInteger
hmrc/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600/v2/B10.scala
Scala
apache-2.0
817
package opennlp.scalabha.tag.support import org.junit.Assert._ import org.junit.Test import opennlp.scalabha.tag._ import opennlp.scalabha.util.CollectionUtils._ import opennlp.scalabha.util.LogNum import opennlp.scalabha.test.TestUtils._ import opennlp.scalabha.util.CollectionUtil._ class TagDictFactoryTests { @Test def test_SimpleWeightedTagDictFactory_passthroughTransformer() { val td: WeightedTagDict[String, Symbol] = new SimpleWeightedTagDictFactory[String, Symbol]( PassthroughCondCountsTransformer()) .make(Seq(Vector( ("the", 'D), ("the", 'D), ("the", 'D), ("the", 'D), ("the", 'D), ("dog", 'N), ("dog", 'N), ("dog", 'N), ("walks", 'V), ("walks", 'V), ("walks", 'V), ("walks", 'N)))) // TagDict methods assertEquals(Set('D, 'N, 'V), td.defaultSet) assertEquals(Some(Set('D)), td.doGetSet("the")) assertEquals(Some(Set('N)), td.doGetSet("dog")) assertEquals(Some(Set('V, 'N)), td.doGetSet("walks")) assertEquals(None, td.doGetSet("aardvark")) val setIterator: Iterator[(String, Set[Symbol])] = td.setIterator val setIteratorSorted = setIterator.toVector.sortBy(_._1) assertEquals(Vector("dog" -> Set('N), "the" -> Set('D), "walks" -> Set('V, 'N)), setIteratorSorted) assertEquals(Set('D), td.set("the")) assertEquals(Set('N), td.set("dog")) assertEquals(Set('V, 'N), td.set("walks")) assertEquals(Set('D, 'N, 'V), td.set("aardvark")) assertEquals(true, td.contains("the")) assertEquals(true, td.contains("dog")) assertEquals(true, td.contains("walks")) assertEquals(false, td.contains("aardvark")) assertEquals(true, td.contains("the", 'D)) assertEquals(false, td.contains("the", 'N)) assertEquals(false, td.contains("the", 'V)) assertEquals(false, td.contains("the", 'Z)) assertEquals(false, td.contains("dog", 'D)) assertEquals(true, td.contains("dog", 'N)) assertEquals(false, td.contains("dog", 'V)) assertEquals(false, td.contains("dog", 'Z)) assertEquals(false, td.contains("walks", 'D)) assertEquals(true, td.contains("walks", 'N)) assertEquals(true, td.contains("walks", 'V)) assertEquals(false, td.contains("walks", 'Z)) assertEquals(false, td.contains("aardvark", 'D)) assertEquals(false, td.contains("aardvark", 'N)) assertEquals(false, td.contains("aardvark", 'V)) assertEquals(false, td.contains("aardvark", 'Z)) assertEquals(Set("the", "dog", "walks"), td.symbols) assertEquals(Set('D, 'N, 'V), td.allTags) // WeightedTagDict methods assertEqualsSmart(Map('D -> LogNum(5 / 12.), 'N -> LogNum(4 / 12.), 'V -> LogNum(3 / 12.)), td.default) assertEqualsSmart(Some(Map('D -> LogNum(5 / 5.))), td.doGetMap("the")) assertEqualsSmart(Some(Map('N -> LogNum(3 / 3.))), td.doGetMap("dog")) assertEqualsSmart(Some(Map('V -> LogNum(3 / 4.), 'N -> LogNum(1 / 4.))), td.doGetMap("walks")) assertEqualsSmart(None, td.doGetMap("aardvark")) val iterator: Iterator[(String, Map[Symbol, LogNum])] = td.iterator val iteratorSorted = iterator.toVector.sortBy(_._1) assertEqualsSmart(Vector("dog" -> Map('N -> LogNum(3 / 3.)), "the" -> Map('D -> LogNum(5 / 5.)), "walks" -> Map('V -> LogNum(3 / 4.), 'N -> LogNum(1 / 4.))), iteratorSorted) assertEqualsSmart(Map('D -> LogNum(5 / 5.)), td.weights("the")) assertEqualsSmart(Map('N -> LogNum(3 / 3.)), td.weights("dog")) assertEqualsSmart(Map('V -> LogNum(3 / 4.), 'N -> LogNum(1 / 4.)), td.weights("walks")) assertEqualsSmart(Map('D -> LogNum(5 / 12.), 'N -> LogNum(4 / 12.), 'V -> LogNum(3 / 12.)), td.weights("aardvark")) } @Test def test_SimpleWeightedTagDictFactory_addOneSmoothingTransformer() { val td: WeightedTagDict[String, Symbol] = new SimpleWeightedTagDictFactory[String, Symbol]( AddLambdaSmoothingCondCountsTransformer(1)) .make(Seq(Vector( ("the", 'D), ("the", 'D), ("the", 'D), ("the", 'D), ("the", 'D), ("dog", 'N), ("dog", 'N), ("dog", 'N), ("walks", 'V), ("walks", 'V), ("walks", 'V), ("walks", 'N)))) // TagDict methods assertEquals(Set('D, 'N, 'V), td.defaultSet) assertEquals(Some(Set('D, 'N, 'V)), td.doGetSet("the")) assertEquals(Some(Set('D, 'N, 'V)), td.doGetSet("dog")) assertEquals(Some(Set('D, 'N, 'V)), td.doGetSet("walks")) assertEquals(None, td.doGetSet("aardvark")) val setIterator: Iterator[(String, Set[Symbol])] = td.setIterator val setIteratorSorted = setIterator.toVector.sortBy(_._1) assertEquals(Vector("dog" -> Set('D, 'N, 'V), "the" -> Set('D, 'N, 'V), "walks" -> Set('D, 'N, 'V)), setIteratorSorted) assertEquals(Set('D, 'N, 'V), td.set("the")) assertEquals(Set('D, 'N, 'V), td.set("dog")) assertEquals(Set('D, 'N, 'V), td.set("walks")) assertEquals(Set('D, 'N, 'V), td.set("aardvark")) assertEquals(true, td.contains("the")) assertEquals(true, td.contains("dog")) assertEquals(true, td.contains("walks")) assertEquals(false, td.contains("aardvark")) assertEquals(true, td.contains("the", 'D)) assertEquals(true, td.contains("the", 'N)) assertEquals(true, td.contains("the", 'V)) assertEquals(false, td.contains("the", 'Z)) assertEquals(true, td.contains("dog", 'D)) assertEquals(true, td.contains("dog", 'N)) assertEquals(true, td.contains("dog", 'V)) assertEquals(false, td.contains("dog", 'Z)) assertEquals(true, td.contains("walks", 'D)) assertEquals(true, td.contains("walks", 'N)) assertEquals(true, td.contains("walks", 'V)) assertEquals(false, td.contains("walks", 'Z)) assertEquals(false, td.contains("aardvark", 'D)) assertEquals(false, td.contains("aardvark", 'N)) assertEquals(false, td.contains("aardvark", 'V)) assertEquals(false, td.contains("aardvark", 'Z)) assertEquals(Set("the", "dog", "walks"), td.symbols) assertEquals(Set('D, 'N, 'V), td.allTags) // WeightedTagDict methods assertEqualsSmart(Map('D -> LogNum(8 / 24.), 'N -> LogNum(7 / 24.), 'V -> LogNum(6 / 24.)), td.default) assertEqualsSmart(Some(Map('D -> LogNum(6 / 9.), 'N -> LogNum(1 / 9.), 'V -> LogNum(1 / 9.))), td.doGetMap("the")) assertEqualsSmart(Some(Map('D -> LogNum(1 / 7.), 'N -> LogNum(4 / 7.), 'V -> LogNum(1 / 7.))), td.doGetMap("dog")) assertEqualsSmart(Some(Map('D -> LogNum(1 / 8.), 'N -> LogNum(2 / 8.), 'V -> LogNum(4 / 8.))), td.doGetMap("walks")) assertEqualsSmart(None, td.doGetMap("aardvark")) val iterator: Iterator[(String, Map[Symbol, LogNum])] = td.iterator val iteratorSorted = iterator.toVector.sortBy(_._1) assertEqualsSmart(Vector( "dog" -> Map('D -> LogNum(1 / 7.), 'N -> LogNum(4 / 7.), 'V -> LogNum(1 / 7.)), "the" -> Map('D -> LogNum(6 / 9.), 'N -> LogNum(1 / 9.), 'V -> LogNum(1 / 9.)), "walks" -> Map('D -> LogNum(1 / 8.), 'N -> LogNum(2 / 8.), 'V -> LogNum(4 / 8.))), iteratorSorted) assertEqualsSmart(Map('D -> LogNum(6 / 9.), 'N -> LogNum(1 / 9.), 'V -> LogNum(1 / 9.)), td.weights("the")) assertEqualsSmart(Map('D -> LogNum(1 / 7.), 'N -> LogNum(4 / 7.), 'V -> LogNum(1 / 7.)), td.weights("dog")) assertEqualsSmart(Map('D -> LogNum(1 / 8.), 'N -> LogNum(2 / 8.), 'V -> LogNum(4 / 8.)), td.weights("walks")) assertEqualsSmart(Map('D -> LogNum(8 / 24.), 'N -> LogNum(7 / 24.), 'V -> LogNum(6 / 24.)), td.weights("aardvark")) } def assertEqualsSmart[A](expected: Map[A, LogNum], actual: Map[A, LogNum]) { assertEquals(expected.keys.toSet, actual.keys.toSet) for (k <- expected.keys) assertEqualsProb(expected(k), actual(k)) } def assertEqualsSmart[A](expected: Option[Map[A, LogNum]], actual: Option[Map[A, LogNum]]) { assertEquals(expected.isDefined, actual.isDefined) if (expected.isDefined) assertEqualsSmart(expected.get, actual.get) } def assertEqualsSmart[A, B](expected: Vector[(A, Map[B, LogNum])], actual: Vector[(A, Map[B, LogNum])]) { assertEquals(expected.size, actual.size) for (((eA, eB), (aA, aB)) <- expected zip actual) { assertEquals(eA, aA) assertEqualsSmart(eB, aB) } } }
eponvert/Scalabha
src/test/scala/opennlp/scalabha/tag/support/TagDictFactoryTests.scala
Scala
apache-2.0
8,123
package dbtarzan.gui.config import dbtarzan.config.connections.ConnectionData import dbtarzan.config.password.Password import dbtarzan.db.{IdentifierDelimiters, Schema} import dbtarzan.gui.config.connections.ConnectionDataValidation import org.scalatest.flatspec.AnyFlatSpec class ConnectionDataValidationTest extends AnyFlatSpec { "correct connection data" should "give no error" in { val errors = ConnectionDataValidation.validate( ConnectionData( "/testdbs/sqllite/sqlite-jdbc-3.8.11.2.jar", "chinook", "org.sqlite.JDBC", "jdbc:sqlite:/home/andrea/prj/dbtarzan/testdbs/sqllite/Chinook_Sqlite.sqlite", Some(Schema("chinook")), "root", Password("pwd"), Some(false), None, Some(IdentifierDelimiters('[', ']')), Some(300), Some(20), Some(1000), None ) ) assert(errors.length === 0) } "connection data with empty fields " should "give error" in { val errors = ConnectionDataValidation.validate( ConnectionData( "", "", "", "", None, "", Password(""), Some(false), None, None, None, None, None, None ) ) assert(List("Empty name", "Name must be an identifier", "Empty url", "Url must be in URL form", "Empty driver", "Empty jar") === errors) } "connection data with fields with spaces" should "give error" in { val errors = ConnectionDataValidation.validate( ConnectionData( "with spaces", "point.in.the middle", "org.sqlite.JDBC", "url with spaces", None, "", Password(""), Some(false), None, None, None, None, Some (100), None ) ) assert(List("Name must be an identifier", "Url cannot contain spaces", "Url must be in URL form", "Jar cannot contain spaces", "Max field size should be over 200") === errors) } }
aferrandi/dbtarzan
src/test/scala/dbtarzan/gui/config/ConnectionDataValidationTest.scala
Scala
apache-2.0
2,034
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import Retries._ import prop.TableDrivenPropertyChecks._ import org.scalatest.tagobjects.Retryable import scala.annotation.tailrec import SharedHelpers.EventRecordingReporter import collection.mutable.ListBuffer // SKIP-SCALATESTJS,NATIVE-START import org.scalatest.refspec.RefSpec // SKIP-SCALATESTJS,NATIVE-END import org.scalatest.{ featurespec, flatspec, freespec, funspec, funsuite, propspec, wordspec } import org.scalatest.featurespec.AnyFeatureSpec import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.freespec.AnyFreeSpec import org.scalatest.funspec.AnyFunSpec import org.scalatest.funsuite.AnyFunSuite import org.scalatest.propspec.AnyPropSpec import org.scalatest.wordspec.AnyWordSpec class RandomTestOrderSpec extends AnyFunSpec { // SKIP-SCALATESTJS,NATIVE-START class ExampleSpec(listBuffer: ListBuffer[Int]) extends RefSpec with RandomTestOrder { def `test 1`: Unit = { listBuffer += 0 } def `test 2`: Unit = { listBuffer += 1 } def `test 3`: Unit = { listBuffer += 2 } override def newInstance = new ExampleSpec(listBuffer) } // SKIP-SCALATESTJS,NATIVE-END class ExampleFunSuite(listBuffer: ListBuffer[Int]) extends AnyFunSuite with RandomTestOrder { test("test 1") { listBuffer += 0 } test("test 2") { listBuffer += 1 } test("test 3") { listBuffer += 2 } override def newInstance = new ExampleFunSuite(listBuffer) } class ExampleFixtureFunSuite(listBuffer: ListBuffer[Int]) extends funsuite.FixtureAnyFunSuite with StringFixture with RandomTestOrder { test("test 1") { fixture => listBuffer += 0 } test("test 2") { fixture => listBuffer += 1 } test("test 3") { fixture => listBuffer += 2 } override def newInstance = new ExampleFixtureFunSuite(listBuffer) } class ExampleFunSpec(listBuffer: ListBuffer[Int]) extends AnyFunSpec with RandomTestOrder { it("test 1") { listBuffer += 0 } it("test 2") { listBuffer += 1 } it("test 3") { listBuffer += 2 } override def newInstance = new ExampleFunSpec(listBuffer) } class ExampleFixtureFunSpec(listBuffer: ListBuffer[Int]) extends funspec.FixtureAnyFunSpec with StringFixture with RandomTestOrder { it("test 1") { fixture => listBuffer += 0 } it("test 2") { fixture => listBuffer += 1 } it("test 3") { fixture => listBuffer += 2 } override def newInstance = new ExampleFixtureFunSpec(listBuffer) } class ExampleFeatureSpec(listBuffer: ListBuffer[Int]) extends AnyFeatureSpec with RandomTestOrder { Feature("Scope 1") { Scenario("test 1") { listBuffer += 0 } Scenario("test 2") { listBuffer += 1 } Scenario("test 3") { listBuffer += 2 } } override def newInstance = new ExampleFeatureSpec(listBuffer) } class ExampleFixtureFeatureSpec(listBuffer: ListBuffer[Int]) extends featurespec.FixtureAnyFeatureSpec with StringFixture with RandomTestOrder { Feature("Scope 1") { Scenario("test 1") { fixture => listBuffer += 0 } Scenario("test 2") { fixture => listBuffer += 1 } Scenario("test 3") { fixture => listBuffer += 2 } } override def newInstance = new ExampleFixtureFeatureSpec(listBuffer) } class ExampleFlatSpec(listBuffer: ListBuffer[Int]) extends AnyFlatSpec with RandomTestOrder { behavior of "Scope 1" it should "test 1" in { listBuffer += 0 } it should "test 2" in { listBuffer += 1 } it should "test 3" in { listBuffer += 2 } override def newInstance = new ExampleFlatSpec(listBuffer) } class ExampleFixtureFlatSpec(listBuffer: ListBuffer[Int]) extends flatspec.FixtureAnyFlatSpec with StringFixture with RandomTestOrder { behavior of "Scope 1" it should "test 1" in { fixture => listBuffer += 0 } it should "test 2" in { fixture => listBuffer += 1 } it should "test 3" in { fixture => listBuffer += 2 } override def newInstance = new ExampleFixtureFlatSpec(listBuffer) } class ExampleFreeSpec(listBuffer: ListBuffer[Int]) extends AnyFreeSpec with RandomTestOrder { "Scope 1" - { "test 1" in { listBuffer += 0 } "test 2" in { listBuffer += 1 } "test 3" in { listBuffer += 2 } } override def newInstance = new ExampleFreeSpec(listBuffer) } class ExampleFixtureFreeSpec(listBuffer: ListBuffer[Int]) extends freespec.FixtureAnyFreeSpec with StringFixture with RandomTestOrder { "Scope 1" - { "test 1" in { fixture => listBuffer += 0 } "test 2" in { fixture => listBuffer += 1 } "test 3" in { fixture => listBuffer += 2 } } override def newInstance = new ExampleFixtureFreeSpec(listBuffer) } class ExamplePropSpec(listBuffer: ListBuffer[Int]) extends AnyPropSpec with RandomTestOrder { property("test 1") { listBuffer += 0 } property("test 2") { listBuffer += 1 } property("test 3") { listBuffer += 2 } override def newInstance = new ExamplePropSpec(listBuffer) } class ExampleFixturePropSpec(listBuffer: ListBuffer[Int]) extends propspec.FixtureAnyPropSpec with StringFixture with RandomTestOrder { property("test 1") { fixture => listBuffer += 0 } property("test 2") { fixture => listBuffer += 1 } property("test 3") { fixture => listBuffer += 2 } override def newInstance = new ExampleFixturePropSpec(listBuffer) } class ExampleWordSpec(listBuffer: ListBuffer[Int]) extends AnyWordSpec with RandomTestOrder { "Scope 1" should { "test 1" in { listBuffer += 0 } "test 2" in { listBuffer += 1 } "test 3" in { listBuffer += 2 } } override def newInstance = new ExampleWordSpec(listBuffer) } class ExampleFixtureWordSpec(listBuffer: ListBuffer[Int]) extends wordspec.FixtureAnyWordSpec with StringFixture with RandomTestOrder { "Scope 1" should { "test 1" in { fixture => listBuffer += 0 } "test 2" in { fixture => listBuffer += 1 } "test 3" in { fixture => listBuffer += 2 } } override def newInstance = new ExampleFixtureWordSpec(listBuffer) } def examples = Table( ("suite", "test1Name", "test2Name", "test3Name"), // SKIP-SCALATESTJS,NATIVE-START ((buffer: ListBuffer[Int]) => new ExampleSpec(buffer), "test 1", "test 2", "test 3"), // SKIP-SCALATESTJS,NATIVE-END ((buffer: ListBuffer[Int]) => new ExampleFunSuite(buffer), "test 1", "test 2", "test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixtureFunSuite(buffer), "test 1", "test 2", "test 3"), ((buffer: ListBuffer[Int]) => new ExampleFunSpec(buffer), "test 1", "test 2", "test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixtureFunSpec(buffer), "test 1", "test 2", "test 3"), ((buffer: ListBuffer[Int]) => new ExampleFeatureSpec(buffer), "Feature: Scope 1 Scenario: test 1", "Feature: Scope 1 Scenario: test 2", "Feature: Scope 1 Scenario: test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixtureFeatureSpec(buffer), "Feature: Scope 1 Scenario: test 1", "Feature: Scope 1 Scenario: test 2", "Feature: Scope 1 Scenario: test 3"), ((buffer: ListBuffer[Int]) => new ExampleFlatSpec(buffer), "Scope 1 should test 1", "Scope 1 should test 2", "Scope 1 should test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixtureFlatSpec(buffer), "Scope 1 should test 1", "Scope 1 should test 2", "Scope 1 should test 3"), ((buffer: ListBuffer[Int]) => new ExampleFlatSpec(buffer), "Scope 1 should test 1", "Scope 1 should test 2", "Scope 1 should test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixtureFlatSpec(buffer), "Scope 1 should test 1", "Scope 1 should test 2", "Scope 1 should test 3"), ((buffer: ListBuffer[Int]) => new ExampleFreeSpec(buffer), "Scope 1 test 1", "Scope 1 test 2", "Scope 1 test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixtureFreeSpec(buffer), "Scope 1 test 1", "Scope 1 test 2", "Scope 1 test 3"), ((buffer: ListBuffer[Int]) => new ExamplePropSpec(buffer), "test 1", "test 2", "test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixturePropSpec(buffer), "test 1", "test 2", "test 3"), ((buffer: ListBuffer[Int]) => new ExampleWordSpec(buffer), "Scope 1 should test 1", "Scope 1 should test 2", "Scope 1 should test 3"), ((buffer: ListBuffer[Int]) => new ExampleFixtureWordSpec(buffer), "Scope 1 should test 1", "Scope 1 should test 2", "Scope 1 should test 3") ) override def withFixture(test: NoArgTest) = { if (isRetryable(test)) withRetry { super.withFixture(test) } else super.withFixture(test) } describe("RandomTestOrder ") { it("execute tests in random order, but fire events in original order", Retryable) { forAll(examples) { case (specFun, test1Name, test2Name, test3Name) => @tailrec def doUntilOutOfOrder(count: Int = 0): EventRecordingReporter = { val buffer = new ListBuffer[Int] val spec = specFun(buffer) val rep = new EventRecordingReporter spec.run(None, Args(reporter = rep)) if (buffer(0) != 0 || buffer(1) != 1 || buffer(2) != 2) rep else { if (count < 100) doUntilOutOfOrder(count + 1) else fail("Tried 100 times but the order is still not shuffled, it probably never will.") } } val rep = doUntilOutOfOrder() val testStartingList = rep.testStartingEventsReceived assert(testStartingList.size == 3) assert(testStartingList(0).testName == test1Name) assert(testStartingList(1).testName == test2Name) assert(testStartingList(2).testName == test3Name) val testSucceededList = rep.testSucceededEventsReceived assert(testSucceededList.size == 3) assert(testSucceededList(0).testName == test1Name) assert(testSucceededList(1).testName == test2Name) assert(testSucceededList(2).testName == test3Name) } } } }
scalatest/scalatest
jvm/scalatest-test/src/test/scala/org/scalatest/RandomTestOrderSpec.scala
Scala
apache-2.0
10,994
/* * Copyright (c) 2014-2015 by its authors. Some rights reserved. * See the project homepage at: http://www.monifu.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monifu.reactive.internals.operators import monifu.concurrent.extensions._ import monifu.reactive.Ack.Continue import monifu.reactive.subjects.PublishSubject import monifu.reactive.{Observer, Observable} import concurrent.duration._ import scala.concurrent.Future object SampleRepeatedSuite extends BaseOperatorSuite { def waitNext = 500.millis def waitFirst = 500.millis def createObservable(sourceCount: Int) = Some { val o = Observable.create[Long](_.onNext(1L)) .sampleRepeated(500.millis) .take(sourceCount) .scan(0L)((acc, _) => acc + 1) Sample(o, count(sourceCount), sum(sourceCount), waitFirst, waitNext) } def sum(sourceCount: Int) = { sourceCount * (sourceCount + 1) / 2 } def count(sourceCount: Int) = { sourceCount } def observableInError(sourceCount: Int, ex: Throwable) = Some { val source = Observable.intervalAtFixedRate(1.second) .take(sourceCount) val o = createObservableEndingInError(source, ex).sampleRepeated(500.millis) Sample(o, count(sourceCount), sum(sourceCount), waitFirst, waitNext) } def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = None test("specified period should be respected if consumer is responsive") { implicit s => val sub = PublishSubject[Long]() val obs = sub.sampleRepeated(500.millis) var onNextCount = 0 var received = 0 var wasCompleted = false obs.onSubscribe(new Observer[Long] { def onNext(elem: Long) = { onNextCount += 1 Future.delayedResult(100.millis) { received += 1 Continue } } def onError(ex: Throwable) = () def onComplete() = wasCompleted = true }) sub.onNext(1) s.tick() assertEquals(onNextCount, 0) assertEquals(received, 0) s.tick(500.millis) assertEquals(onNextCount, 1) assertEquals(received, 0) s.tick(100.millis) assertEquals(onNextCount, 1) assertEquals(received, 1) sub.onComplete() s.tick() assert(!wasCompleted) s.tick(500.millis) assert(wasCompleted) assertEquals(onNextCount, 1) assertEquals(received, 1) } test("specified period should not be respected if consumer is not responsive") { implicit s => val sub = PublishSubject[Long]() val obs = sub.sampleRepeated(500.millis) var onNextCalls = 0 var received = 0 var wasCompleted = false obs.onSubscribe(new Observer[Long] { def onNext(elem: Long) = { onNextCalls += 1 Future.delayedResult(1000.millis) { received += 1 Continue } } def onError(ex: Throwable) = () def onComplete() = { wasCompleted = true } }) sub.onNext(1) s.tick() assertEquals(onNextCalls, 0) assertEquals(received, 0) s.tick(500.millis) assertEquals(onNextCalls, 1) assertEquals(received, 0) s.tick(500.millis) assertEquals(onNextCalls, 1) assertEquals(received, 0) s.tick(500.millis) assertEquals(onNextCalls, 2) assertEquals(received, 1) sub.onComplete() s.tick() assert(!wasCompleted) s.tick(1.second) assertEquals(onNextCalls, 2) assertEquals(received, 2) assert(wasCompleted) } }
sergius/monifu
monifu/shared/src/test/scala/monifu/reactive/internals/operators/SampleRepeatedSuite.scala
Scala
apache-2.0
3,964
/* Copyright (c) 2013 Anthony Mulcahy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package com.dragongate_technologies.glfuncplot import _root_.android.app.Activity import _root_.android.graphics.{Bitmap, BitmapFactory, Color} import _root_.android.content.Context import _root_.android.opengl.{GLES20, GLSurfaceView, GLUtils, Matrix} import _root_.android.os.{Bundle, SystemClock} import _root_.android.util.{AttributeSet, Log} import _root_.android.view.MotionEvent import _root_.android.widget.LinearLayout import java.io.IOException; import java.io.InputStream import java.nio.ByteBuffer import java.nio.ByteOrder import java.nio.FloatBuffer import java.nio.ShortBuffer import javax.microedition.khronos.egl.EGLConfig import javax.microedition.khronos.opengles.GL10 import scala.math._ class glFuncPlotRenderer extends GLSurfaceView.Renderer { val TAG: String = "glFuncPlotRenderer" var funcAry: Array[Array[Double]] = Array.fill(10, 10)(0d) //todo var dMax = 0.0 var plotVertBuffer: FloatBuffer = _ var plotColourBuffer: FloatBuffer = _ var plotIndexBuffer: ShortBuffer = _ var indexLength: Int = _ var mProgram: Int = _ var maPositionHandle: Int = _ var maColourHandle: Int = _ var muMVPMatrixHandle: Int = _ val mMVPMatrix: Array[Float] = new Array[Float](16) val mMMatrix: Array[Float] = new Array[Float](16) val mVMatrix: Array[Float] = new Array[Float](16) val mProjMatrix: Array[Float] = new Array[Float](16) val vertexShaderCode: String = "uniform mat4 uMVPMatrix; \\n" + "attribute vec4 vPosition; \\n" + "attribute vec4 vColour; \\n" + "varying vec4 fColour; \\n" + "void main(){ \\n" + " fColour = vColour; \\n" + " gl_Position = uMVPMatrix * vPosition;\\n" + "} \\n"; val fragmentShaderCode: String = "precision mediump float; \\n" + "varying vec4 fColour; \\n" + "void main(){ \\n" + " gl_FragColor = fColour; \\n" + //" gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0); \\n" + "} \\n"; private def loadShader(iType: Int, shaderCode: String): Int = { val shader: Int = GLES20.glCreateShader(iType); GLES20.glShaderSource(shader, shaderCode); GLES20.glCompileShader(shader); return shader; } def makeFloatBuffer(aryFloat: Array[Float]): FloatBuffer = { val floatBuffer: FloatBuffer = ByteBuffer.allocateDirect(aryFloat.length * 4).order(ByteOrder.nativeOrder()).asFloatBuffer() floatBuffer.put(aryFloat).position(0) floatBuffer } def makeShortBuffer(aryShort: Array[Short]): ShortBuffer = { val shortBuffer: ShortBuffer = ByteBuffer.allocateDirect(aryShort.length * 4).order(ByteOrder.nativeOrder()).asShortBuffer() shortBuffer.put(aryShort).position(0) shortBuffer } def normalizeCoord(coord: Int, max: Int, scalingFactor: Float = 2f): Float = { (coord.toFloat*2f/max.toFloat-1f)*scalingFactor } def normalizeMatrix(matrix: Array[Array[Double]]): Array[Array[Float]] = { Log.e(TAG, "normalizeMatrix") val iSize = matrix.length val jSize = matrix(0).length var i = 0 dMax = 0.0d while (i < iSize) { var j = 0 while (j < jSize) { dMax = max(dMax, matrix(i)(j)) j += 1 } i += 1 } val normMatrix = Array.ofDim[Float](iSize, jSize) i = 0 while (i < iSize) { var j = 0 while (j < jSize) { normMatrix(i)(j) = ((matrix(i)(j))/(dMax)).toFloat*4.0f-2.0f j += 1 } i += 1 } normMatrix } def makeCoordsArray(matrix: Array[Array[Float]]): Array[Float] = { Log.e(TAG, "makeCoordsArray") val iSize = matrix.length val jSize = matrix(0).length val plotCoords: Array[Float] = new Array[Float](iSize*jSize*3) var i = 0 while (i < iSize) { var j = 0 while (j < jSize) { val index = (i*jSize+j)*3 //plotCoords(index) = normalizeCoord(i, iSize-1) plotCoords(index) = normalizeCoord(0, iSize-1) plotCoords(index+1) = matrix(i)(j) plotCoords(index+2) = normalizeCoord(j, jSize-1) j += 1 } i += 1 } plotCoords } def makeColorArray(matrix: Array[Array[Float]]): Array[Float] = { Log.e(TAG, "makeColorArray") val iSize = matrix.length val jSize = matrix(0).length val plotColours: Array[Float] = new Array[Float](iSize*jSize*4) var i = 0 while (i < iSize) { var j = 0 while (j < jSize) { val indexCol = (i*jSize+j)*4 val color = Color.HSVToColor(Array[Float]((((((matrix(i)(j))+2.0)/4.0)*359.0)%359.0).toFloat, 1.0f, 1.0f)) val red = Color.red(color).toFloat/255f val green = Color.green(color).toFloat/255f val blue = Color.blue(color).toFloat/255f plotColours(indexCol) = red plotColours(indexCol+1) = green plotColours(indexCol+2) = blue plotColours(indexCol+3) = 1f j += 1 } i += 1 } plotColours } def makeLineIndexArray(iSize: Int, jSize: Int): Array[Short] = { Log.e(TAG, "makeLineIndexArray: "+iSize+" , "+jSize) val plotIndices: Array[Short] = new Array[Short]((iSize)*(jSize)*2) var index2 = 0 var i = 0 while (i < iSize) { var j = 0 while (j < jSize-1) { val index = (i*(jSize)+j)*2 plotIndices(index) = index2.toShort plotIndices(index+1) = (index2+1).toShort index2 += 1 j += 1 } index2 += 1 i += 1 } plotIndices } def init4(_funcAry: Array[Array[Double]]) { Log.e(TAG, "init4 Start") funcAry = _funcAry Matrix.setIdentityM(mMMatrix, 0) Matrix.rotateM(mMMatrix, 0, -90, 0, 1, 0) val iSize: Int = funcAry.length val jSize: Int = funcAry(0).length val normMatrix: Array[Array[Float]] = normalizeMatrix(funcAry) val plotCoords: Array[Float] = makeCoordsArray(normMatrix) val plotColours: Array[Float] = makeColorArray(normMatrix) val plotIndices: Array[Short] = makeLineIndexArray(iSize, jSize) plotVertBuffer = makeFloatBuffer(plotCoords) plotColourBuffer = makeFloatBuffer(plotColours) plotIndexBuffer = makeShortBuffer(plotIndices) indexLength = plotIndices.length Log.e(TAG, "init4 End") } def initView() { Log.e(TAG, "initView") Matrix.setIdentityM(mMMatrix, 0) Matrix.rotateM(mMMatrix, 0, -90, 0, 1, 0) } override def onSurfaceCreated(glUnused: GL10, config: EGLConfig) { Log.e(TAG, "onSurfaceCreated") GLES20.glClearColor(0.0f, 0.0f, 0.0f, 1) GLES20.glEnable(GLES20.GL_DEPTH_TEST) GLES20.glDepthFunc(GLES20.GL_LEQUAL) //Log.e(TAG, "initShapes3 Start") val iSize: Int = funcAry.length val jSize: Int = funcAry(0).length val normMatrix: Array[Array[Float]] = normalizeMatrix(funcAry) val plotCoords: Array[Float] = makeCoordsArray(normMatrix) val plotColours: Array[Float] = makeColorArray(normMatrix) val plotIndices: Array[Short] = makeLineIndexArray(iSize, jSize) plotVertBuffer = makeFloatBuffer(plotCoords) plotColourBuffer = makeFloatBuffer(plotColours) plotIndexBuffer = makeShortBuffer(plotIndices) indexLength = plotIndices.length //Log.e(TAG, "initShapes3 End") val vertexShader = loadShader(GLES20.GL_VERTEX_SHADER, vertexShaderCode) val fragmentShader = loadShader(GLES20.GL_FRAGMENT_SHADER, fragmentShaderCode) mProgram = GLES20.glCreateProgram() GLES20.glAttachShader(mProgram, vertexShader) GLES20.glAttachShader(mProgram, fragmentShader) GLES20.glLinkProgram(mProgram) maPositionHandle = GLES20.glGetAttribLocation(mProgram, "vPosition") maColourHandle = GLES20.glGetAttribLocation(mProgram, "vColour") muMVPMatrixHandle = GLES20.glGetUniformLocation(mProgram, "uMVPMatrix") Matrix.setIdentityM(mMMatrix, 0) Matrix.rotateM(mMMatrix, 0, -45, 1, 0, -1) initView } override def onSurfaceChanged(glUnused: GL10, width: Int, height: Int) { Log.e(TAG, "onSurfaceChanged") GLES20.glViewport(0, 0, width, height) val ratio: Float = width.toFloat / height.toFloat //Matrix.frustumM(mProjMatrix, 0, -ratio, ratio, -1, 1, 1, 10) Matrix.orthoM(mProjMatrix, 0, -2.1f, 2.1f, -2.1f, 2.1f, 1, 10) //Matrix.setLookAtM(mVMatrix, 0, 0.0f, 0.0f, -5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f) Matrix.setLookAtM(mVMatrix, 0, 0.0f, 0.0f, -5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f) } override def onDrawFrame(gl: GL10) { //Log.e(TAG, "onDrawFrame") GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT) GLES20.glUseProgram(mProgram) GLES20.glVertexAttribPointer(maPositionHandle, 3, GLES20.GL_FLOAT, false, 0, plotVertBuffer) GLES20.glEnableVertexAttribArray(maPositionHandle) GLES20.glVertexAttribPointer(maColourHandle, 4, GLES20.GL_FLOAT, false, 0, plotColourBuffer) GLES20.glEnableVertexAttribArray(maColourHandle) //Set up MVP Matrix.setIdentityM(mMVPMatrix, 0) Matrix.multiplyMM(mMVPMatrix, 0, mMMatrix, 0, mMVPMatrix, 0) Matrix.multiplyMM(mMVPMatrix, 0, mVMatrix, 0, mMVPMatrix, 0) Matrix.multiplyMM(mMVPMatrix, 0, mProjMatrix, 0, mMVPMatrix, 0) GLES20.glUniformMatrix4fv(muMVPMatrixHandle, 1, false, mMVPMatrix, 0) GLES20.glDrawElements(GLES20.GL_LINES, indexLength, GLES20.GL_UNSIGNED_SHORT, plotIndexBuffer) } }
amulcahy/mcOptCal
src/main/scala/glFuncPlot.scala
Scala
mit
10,259
package gg.uhc.hosts.endpoints.alerts import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.Directives.{complete, handleRejections} import akka.http.scaladsl.server.Route import gg.uhc.hosts.database.Database import gg.uhc.hosts.endpoints.{CustomDirectives, EndpointRejectionHandler} class DeleteAlertRule(customDirectives: CustomDirectives, database: Database) { import customDirectives._ def apply(id: Long): Route = handleRejections(EndpointRejectionHandler()) { requireAuthentication { session => requirePermission("hosting advisor", session.username) { requireSucessfulQuery(database.deleteAlertRule(id)) { case 0 => complete(StatusCodes.NotFound) // None updated case _ => complete(StatusCodes.NoContent) } } } } }
Eluinhost/hosts.uhc.gg
src/main/scala/gg/uhc/hosts/endpoints/alerts/DeleteAlertRule.scala
Scala
mit
831