code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* * Copyright (C) 2017 Michael Dippery <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.mipadi.jupiter.io import java.io.File import java.nio.file.{FileSystems, Path} /** Useful classes for easily working with files and paths. * * Provides an implicit conversion from $file to $richfile, which adds * some useful methods to `File`. It also provides an implicit conversion * from $path to $richpath, which provides a more convenient way to work * with paths in Scala. Finally, it provides a `p` prefix for strings, so * paths can be created directly from a string: * * @define file * `java.io.File` * @define path * `java.nio.file.Path` * @define richfile * `[[com.mipadi.jupiter.io.files.RichFile RichFile]]` * @define richpath * `[[com.mipadi.jupiter.io.files.RichPath RichPath]]` * * {{{ * import com.mipadi.jupiter.io.files._ * val path = p"src/main/scala" * }}} */ package object files { /** Implicitly converts file-like objects and adds some extension * methods. Also allows file-like objects to be ordered based on their * paths. * * @tparam T * The `[[com.mipadi.jupiter.io.files.Locatable Locatable]]` type * @param f * The wrapped file * @param ev * An implicit delegate for handling file-like operations */ implicit class RichFile[T](f: T)(implicit ev: Locatable[T]) extends Ordered[T] { /** The wrapped file's path */ val path = ev.getPath(f) /** The wrapped file's absolute path */ val absolutePath = ev.getAbsolutePath(f) /** `true` if the file is a directory */ def isDirectory = ev.isDirectory(f) /** Compares the receiver's path to `that`'s path. * * Determines a logical sorting for `File` objects. * * @param that * The file whose path the receiver's should be compared to. * @return * - '''< 0''' if `this` comes before `that` * - '''> 0''' if `this` comes after `that` * - '''0''' if `this` is equal to `that` */ override def compare(that: T): Int = ev.getPath(f) compare ev.getPath(that) /** A listing of all files rooted under this path. * * @return * A listing of all files under the path represented by the wrapped * file */ def subtree: FileTree[T] = new FileTree(f) } /** Allows paths to be built using the `/` operator. * * @param path * The wrapped path * @param ev * The path locator delegate */ implicit class RichPath[T](path: T)(implicit ev: Locatable[T]) { /** Create a new path consisting of `that` appended to `path`. * * This allows callers to create new paths like this: * * {{{ * import com.mipadi.jupiter.io.files._ * val start = new File("src").toPath * val path = start / "main" / "scala" / "com" / "mipadi" * }}} * * @param that * The path component to append to `path` * @return * A new path consisting of `that` appended to `path` */ def / (that: String): Path = ev.join(path, that) } /** Allows paths to be built from strings using the `p` prefix: * * {{{ * import com.mipadi.jupiter.io.files._ * val path = p"path/to/file.txt" * }}} * * @param ctx * The wrapped string context */ implicit class PathStringContext(ctx: StringContext) { /** Build a path using the `p` prefix: * * {{{ * import com.mipadi.jupiter.io.files._ * val path = p"path/to/file.txt" * }}} * * @param args * String context arguments * @return * A path represented by the given string context */ def p(args: Any*): Path = FileSystems.getDefault.getPath(ctx.parts(0)) } }
mdippery/jupiter
src/main/scala/com/mipadi/jupiter/io/files/package.scala
Scala
apache-2.0
4,331
package com.socrata.datacoordinator.util.collection import scala.collection.JavaConverters._ import scala.annotation.unchecked.uncheckedVariance import com.socrata.datacoordinator.id.UserColumnId object UserColumnIdMap { def apply[V](kvs: (UserColumnId, V)*): UserColumnIdMap[V] = { val tmp = new MutableUserColumnIdMap[V] tmp ++= kvs tmp.freeze() } def apply[V](orig: Map[UserColumnId, V]): UserColumnIdMap[V] = MutableUserColumnIdMap(orig).freeze() private val EMPTY = UserColumnIdMap[Nothing]() def empty[V]: UserColumnIdMap[V] = EMPTY } class UserColumnIdMap[+V] private[collection](val unsafeUnderlying: java.util.HashMap[String, V @uncheckedVariance]) { @inline def size: Int = unsafeUnderlying.size @inline def isEmpty: Boolean = unsafeUnderlying.isEmpty @inline def nonEmpty: Boolean = !isEmpty @inline def contains(t: UserColumnId): Boolean = unsafeUnderlying.containsKey(t.underlying) @inline def get(t: UserColumnId): Option[V] = { val x = unsafeUnderlying.get(t.underlying) if(x.asInstanceOf[AnyRef] eq null) None else Some(x) } @inline def apply(t: UserColumnId): V = { val x = unsafeUnderlying.get(t.underlying) if(x.asInstanceOf[AnyRef] eq null) throw new NoSuchElementException("No key " + t) x } def iterator = new UserColumnIdMapIterator[V](unsafeUnderlying.entrySet.iterator) def ++[V2 >: V](that: UserColumnIdMap[V2]) = { val tmp = new java.util.HashMap[String, V2](this.unsafeUnderlying) tmp.putAll(that.unsafeUnderlying) new UserColumnIdMap[V2](tmp) } def ++[V2 >: V](that: MutableUserColumnIdMap[V2]) = { val tmp = new java.util.HashMap[String, V2](this.unsafeUnderlying) tmp.putAll(that.underlying) new UserColumnIdMap[V2](tmp) } def ++[V2 >: V](that: Iterable[(UserColumnId, V2)]) = { val tmp = new java.util.HashMap[String, V2](this.unsafeUnderlying) for((k, v) <- that) { tmp.put(k.underlying, v) } new UserColumnIdMap[V2](tmp) } def +[V2 >: V](kv: (UserColumnId, V2)) = { val tmp = new java.util.HashMap[String, V2](this.unsafeUnderlying) tmp.put(kv._1.underlying, kv._2) new UserColumnIdMap[V2](tmp) } def -(k: UserColumnId) = { val tmp = new java.util.HashMap[String, V](this.unsafeUnderlying) tmp.remove(k.underlying) new UserColumnIdMap[V](tmp) } @inline def getOrElse[B >: V](k: UserColumnId, v: => B): B = { val result = unsafeUnderlying.get(k.underlying) if(result == null) v else result } @inline def getOrElseStrict[B >: V](k: UserColumnId, v: B): B = { val result = unsafeUnderlying.get(k.underlying) if(result == null) v else result } def keys: Iterator[UserColumnId] = iterator.map(_._1) def values: Iterable[V] = unsafeUnderlying.values.asScala def keySet = new UserColumnIdSet(unsafeUnderlying.keySet) def mapValuesStrict[V2](f: V => V2) = { val x = new java.util.HashMap[String, V2] val it = unsafeUnderlying.entrySet.iterator while(it.hasNext) { val ent = it.next() x.put(ent.getKey, f(ent.getValue)) } new UserColumnIdMap[V2](x) } def transform[V2](f: (UserColumnId, V) => V2) = { val x = new java.util.HashMap[String, V2] val it = unsafeUnderlying.entrySet.iterator while(it.hasNext) { val ent = it.next() x.put(ent.getKey, f(new UserColumnId(ent.getKey), ent.getValue)) } new UserColumnIdMap[V2](x) } def foldLeft[S](init: S)(f: (S, (UserColumnId, V)) => S): S = { var seed = init val it = unsafeUnderlying.entrySet.iterator while(it.hasNext) { val ent = it.next() seed = f(seed, (new UserColumnId(ent.getKey), ent.getValue)) } seed } override def toString = unsafeUnderlying.toString def toSeq: Seq[(UserColumnId, V)] = { val arr = new Array[(UserColumnId, V)](unsafeUnderlying.size) val it = unsafeUnderlying.entrySet.iterator var i = 0 while(it.hasNext) { val ent = it.next() arr(i) = (new UserColumnId(ent.getKey), ent.getValue) i += 1 } arr } def foreach[U](f: ((UserColumnId, V)) => U) { val it = unsafeUnderlying.entrySet.iterator while(it.hasNext) { val ent = it.next() f((new UserColumnId(ent.getKey), ent.getValue)) } } def foreach[U](f: (UserColumnId, V) => U) { val it = unsafeUnderlying.entrySet.iterator while(it.hasNext) { val ent = it.next() f(new UserColumnId(ent.getKey), ent.getValue) } } def filter(f: (UserColumnId, V) => Boolean) = { val x = new java.util.HashMap[String, V] val it = unsafeUnderlying.entrySet.iterator while(it.hasNext) { val ent = it.next() if(f(new UserColumnId(ent.getKey), ent.getValue)) x.put(ent.getKey, ent.getValue) } new UserColumnIdMap[V](x) } def filterNot(f: (UserColumnId, V) => Boolean) = { val x = new java.util.HashMap[String, V] val it = unsafeUnderlying.entrySet.iterator while(it.hasNext) { val ent = it.next() if(!f(new UserColumnId(ent.getKey), ent.getValue)) x.put(ent.getKey, ent.getValue) } new UserColumnIdMap[V](x) } override def hashCode = unsafeUnderlying.hashCode override def equals(o: Any) = o match { case that: UserColumnIdMap[_] => this.unsafeUnderlying == that.unsafeUnderlying case _ => false } }
socrata-platform/data-coordinator
coordinatorlib/src/main/scala/com/socrata/datacoordinator/util/collection/UserColumnIdMap.scala
Scala
apache-2.0
5,345
package com.acework.js.components.bootstrap import japgolly.scalajs.react._ /** * Created by weiyin on 10/03/15. */ object Accordion { val Accordion = ReactComponentB[Unit]("Accordion") .stateless .render { (P, C, _) => // TODO spread props PanelGroup(PanelGroup.PanelGroup(accordion = true), C) }.buildU def apply(children: ReactNode*) = Accordion(children) }
weiyinteo/scalajs-react-bootstrap
core/src/main/scala/com/acework/js/components/bootstrap/Accordion.scala
Scala
mit
389
/******************************************************************************* * Copyright 2017 Capital One Services, LLC and Bitwise, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package hydrograph.engine.spark.components import java.util import java.util.Properties import hydrograph.engine.core.component.entity.CumulateEntity import hydrograph.engine.core.component.entity.elements.{SchemaField, _} import hydrograph.engine.spark.components.platform.BaseComponentParams import hydrograph.engine.testing.wrapper.{Bucket, DataBuilder, Fields} import org.apache.spark.sql.Row import org.hamcrest.CoreMatchers._ import org.junit.Assert._ import org.junit.Test /** * The Class CumulateComponentTest. * * @author Bitwise * */ class CumulateComponentTest { @Test def CumulateCountOfResultsAndMapFields(): Unit = { val df1 = new DataBuilder(Fields(List("col1", "col2", "col3", "col4")).applyTypes(List(classOf[String], classOf[String], classOf[String], classOf[String]))) .addData(List("C1R1", "C2R1", "C3Rx", "C4R1")) .addData(List("C1R1", "C2R2", "C3Rx", "C4R2")) .addData(List("C1R1", "C2R3", "C3Rx", "C4R3")) .build() val inSocketList = new util.ArrayList[InSocket] val inSocket = new InSocket("id", "name", "in0") inSocket.setFromSocketType("out") inSocket.setInSocketType("in") inSocketList.add(inSocket) val cumulateEntity: CumulateEntity = new CumulateEntity cumulateEntity.setComponentId("CumulateTest") cumulateEntity.setInSocketList(inSocketList) val keyField: KeyField = new KeyField keyField.setName("col1") keyField.setSortOrder("asc") cumulateEntity.setKeyFields(Array[KeyField](keyField)) val operationList: util.ArrayList[Operation] = new util.ArrayList[Operation] val operation: Operation = new Operation operation.setOperationId("operation1") operation.setOperationInputFields(Array[String]("col2")) operation.setOperationOutputFields(Array[String]("count")) operation.setOperationClass("hydrograph.engine.transformation.userfunctions.cumulate.Count") operation.setOperationProperties(new Properties) operationList.add(operation) cumulateEntity.setOperationsList(operationList) cumulateEntity.setNumOperations(1) cumulateEntity.setOperationPresent(true) // create outSocket val outSocket1: OutSocket = new OutSocket("out0") // set map fields val mapFieldsList: util.List[MapField] = new util.ArrayList[MapField] mapFieldsList.add(new MapField("col4", "col4_new", "in0")) outSocket1.setMapFieldsList(mapFieldsList) // set pass through fields val passThroughFieldsList1: util.List[PassThroughField] = new util.ArrayList[PassThroughField] passThroughFieldsList1.add(new PassThroughField("col1", "in0")) passThroughFieldsList1.add(new PassThroughField("col3", "in0")) outSocket1.setPassThroughFieldsList(passThroughFieldsList1) // set Operation Field val operationFieldsList: util.List[OperationField] = new util.ArrayList[OperationField] val operationField: OperationField = new OperationField("count", "operation1") operationFieldsList.add(operationField) outSocket1.setOperationFieldList(operationFieldsList) // add outSocket in list val outSocketList: util.List[OutSocket] = new util.ArrayList[OutSocket] outSocketList.add(outSocket1) cumulateEntity.setOutSocketList(outSocketList) val cp = new BaseComponentParams cp.addinputDataFrame(df1) cp.addSchemaFields(Array(new SchemaField("col1", "java.lang.String"), new SchemaField("count", "java.lang.Long"), new SchemaField("col4_new", "java.lang.String"), new SchemaField("col3", "java.lang.String"))) val cumulateDF = new CumulateComponent(cumulateEntity, cp).createComponent() val rows = Bucket(Fields(List("col1", "count", "col4_new", "col3")), cumulateDF.get("out0").get).result() assertThat(rows.size, is(3)) assertThat(rows(0), is(Row("C1R1", ("1").toLong, "C4R3", "C3Rx"))) assertThat(rows(1), is(Row("C1R1", ("2").toLong, "C4R2", "C3Rx"))) assertThat(rows(2), is(Row("C1R1", ("3").toLong, "C4R1", "C3Rx"))) } @Test def itShouldCumulateAndDoCountAndMapFieldsWithWildCardPassthroughFields(): Unit = { val df1 = new DataBuilder(Fields(List("col1", "col2", "col3", "col4")).applyTypes(List(classOf[String], classOf[String], classOf[String], classOf[String]))) .addData(List("C1R1", "C2R1", "C3Rx", "C4R1")) .addData(List("C1R1", "C2R2", "C3Rx", "C4R2")) .addData(List("C1R1", "C2R3", "C3Rx", "C4R3")) .build() val cumulateEntity: CumulateEntity = new CumulateEntity cumulateEntity.setComponentId("CumulateTest") val keyField: KeyField = new KeyField keyField.setName("col1") keyField.setSortOrder("asc") cumulateEntity.setKeyFields(Array[KeyField](keyField)) val inSocketList = new util.ArrayList[InSocket] val inSocket = new InSocket("id", "name", "in0") inSocket.setFromSocketType("out") inSocket.setInSocketType("in") inSocketList.add(inSocket) val operationList: util.ArrayList[Operation] = new util.ArrayList[Operation] val operation: Operation = new Operation operation.setOperationId("operation1") operation.setOperationInputFields(Array[String]("col2")) operation.setOperationOutputFields(Array[String]("count")) operation.setOperationClass("hydrograph.engine.transformation.userfunctions.cumulate.Count") operation.setOperationProperties(new Properties) operationList.add(operation) cumulateEntity.setOperationsList(operationList) cumulateEntity.setInSocketList(inSocketList) cumulateEntity.setNumOperations(1) cumulateEntity.setOperationPresent(true) // create outSocket val outSocket1: OutSocket = new OutSocket("out0") // set map fields val mapFieldsList: util.List[MapField] = new util.ArrayList[MapField] mapFieldsList.add(new MapField("col4", "col4_new", "in0")) outSocket1.setMapFieldsList(mapFieldsList) // set pass through fields val passThroughFieldsList1: util.List[PassThroughField] = new util.ArrayList[PassThroughField] passThroughFieldsList1.add(new PassThroughField("*", "in0")) outSocket1.setPassThroughFieldsList(passThroughFieldsList1) // set Operation Field val operationFieldsList: util.List[OperationField] = new util.ArrayList[OperationField] val operationField: OperationField = new OperationField("count", "operation1") operationFieldsList.add(operationField) outSocket1.setOperationFieldList(operationFieldsList) // add outSocket in list val outSocketList: util.List[OutSocket] = new util.ArrayList[OutSocket] outSocketList.add(outSocket1) cumulateEntity.setOutSocketList(outSocketList) val cp = new BaseComponentParams cp.addinputDataFrame(df1) cp.addSchemaFields(Array(new SchemaField("col1", "java.lang.String"), new SchemaField("col2", "java.lang.String"), new SchemaField("col3", "java.lang.String"), new SchemaField("col4", "java.lang.String"), new SchemaField("col4_new", "java.lang.String"), new SchemaField("count", "java.lang.Long"))) val cumulateDF = new CumulateComponent(cumulateEntity, cp).createComponent() val rows = Bucket(Fields(List("col1", "col2", "col3", "col4", "col4_new", "count")), cumulateDF.get("out0").get).result() assertThat(rows.size, is(3)) assertThat(rows(0), is(Row("C1R1", "C2R3", "C3Rx", "C4R3", "C4R3", ("1").toLong))) assertThat(rows(1), is(Row("C1R1", "C2R2", "C3Rx", "C4R2", "C4R2", ("2").toLong))) assertThat(rows(2), is(Row("C1R1", "C2R1", "C3Rx", "C4R1", "C4R1", ("3").toLong))) } @Test def itShouldCumulateAndCountWithWildCardPassthroughFieldsWithPriority(): Unit = { val df1 = new DataBuilder(Fields(List("col1", "col2", "col3")).applyTypes(List(classOf[String], classOf[String], classOf[String]))) .addData(List("C1R1", "C2R1", "C3Rx")) .addData(List("C1R1", "C2R2", "C3Rx")) .addData(List("C1R1", "C2R3", "C3Rx")) .build() val cumulateEntity: CumulateEntity = new CumulateEntity cumulateEntity.setComponentId("CumulateTest") val keyField: KeyField = new KeyField keyField.setName("col1") keyField.setSortOrder("asc") cumulateEntity.setKeyFields(Array[KeyField](keyField)) val inSocketList = new util.ArrayList[InSocket] val inSocket = new InSocket("id", "name", "in0") inSocket.setFromSocketType("out") inSocket.setInSocketType("in") inSocketList.add(inSocket) val operationList: util.ArrayList[Operation] = new util.ArrayList[Operation] val operation: Operation = new Operation operation.setOperationId("operation1") operation.setOperationInputFields(Array[String]("col2")) operation.setOperationOutputFields(Array[String]("count")) operation.setOperationClass("hydrograph.engine.transformation.userfunctions.cumulate.Count") operation.setOperationProperties(new Properties) operationList.add(operation) cumulateEntity.setOperationsList(operationList) cumulateEntity.setInSocketList(inSocketList) cumulateEntity.setNumOperations(1) cumulateEntity.setOperationPresent(true) // create outSocket val outSocket1: OutSocket = new OutSocket("out0") // set map fields val mapFieldsList: util.List[MapField] = new util.ArrayList[MapField] mapFieldsList.add(new MapField("col3", "col3_new", "in0")) outSocket1.setMapFieldsList(mapFieldsList) // set pass through fields val passThroughFieldsList1: util.List[PassThroughField] = new util.ArrayList[PassThroughField] passThroughFieldsList1.add(new PassThroughField("*", "in0")) outSocket1.setPassThroughFieldsList(passThroughFieldsList1) // set Operation Field val operationFieldsList: util.List[OperationField] = new util.ArrayList[OperationField] val operationField: OperationField = new OperationField("count", "operation1") operationFieldsList.add(operationField) outSocket1.setOperationFieldList(operationFieldsList) // add outSocket in list val outSocketList: util.List[OutSocket] = new util.ArrayList[OutSocket] outSocketList.add(outSocket1) cumulateEntity.setOutSocketList(outSocketList) val cp = new BaseComponentParams cp.addinputDataFrame(df1) cp.addSchemaFields(Array( new SchemaField("count", "java.lang.Long"),new SchemaField("col3_new", "java.lang.String"),new SchemaField("col1", "java.lang.String"), new SchemaField("col2", "java.lang.String"), new SchemaField("col3", "java.lang.String"))) val cumulateDF = new CumulateComponent(cumulateEntity, cp).createComponent() val rows = Bucket(Fields(List( "count","col3_new","col1", "col2", "col3")), cumulateDF.get("out0").get).result() assertThat(rows.size, is(3)) assertThat(rows(0), is(Row(("1").toLong, "C3Rx", "C1R1", "C2R3", "C3Rx"))) assertThat(rows(2), is(Row(("3").toLong, "C3Rx", "C1R1", "C2R1", "C3Rx"))) } @Test def itShouldRunWithoutOperationInFields(): Unit = { val df1 = new DataBuilder(Fields(List("col1", "col2", "col3")).applyTypes(List(classOf[String], classOf[String], classOf[String]))) .addData(List("C1R1", "C2R1", "C3Rx")) .addData(List("C1R1", "C2R2", "C3Rx")) .addData(List("C1R1", "C2R3", "C3Rx")) .build() val cumulateEntity: CumulateEntity = new CumulateEntity cumulateEntity.setComponentId("CumulateTest") val keyField: KeyField = new KeyField keyField.setName("col1") keyField.setSortOrder("asc") cumulateEntity.setKeyFields(Array[KeyField](keyField)) val inSocketList = new util.ArrayList[InSocket] val inSocket = new InSocket("id", "name", "in0") inSocket.setFromSocketType("out") inSocket.setInSocketType("in") inSocketList.add(inSocket) val operationList: util.ArrayList[Operation] = new util.ArrayList[Operation] val operation: Operation = new Operation operation.setOperationId("operation1") operation.setOperationOutputFields(Array[String]("count")) operation.setOperationClass("hydrograph.engine.transformation.userfunctions.cumulate.CumulateWithoutOperationInFields") operation.setOperationProperties(new Properties) operationList.add(operation) cumulateEntity.setOperationsList(operationList) cumulateEntity.setInSocketList(inSocketList) cumulateEntity.setNumOperations(1) cumulateEntity.setOperationPresent(true) // create outSocket val outSocket1: OutSocket = new OutSocket("out0") // set map fields val mapFieldsList: util.List[MapField] = new util.ArrayList[MapField] mapFieldsList.add(new MapField("col3", "col3_new", "in0")) outSocket1.setMapFieldsList(mapFieldsList) // set pass through fields val passThroughFieldsList1: util.List[PassThroughField] = new util.ArrayList[PassThroughField] passThroughFieldsList1.add(new PassThroughField("*", "in0")) outSocket1.setPassThroughFieldsList(passThroughFieldsList1) // set Operation Field val operationFieldsList: util.List[OperationField] = new util.ArrayList[OperationField] val operationField: OperationField = new OperationField("count", "operation1") operationFieldsList.add(operationField) outSocket1.setOperationFieldList(operationFieldsList) // add outSocket in list val outSocketList: util.List[OutSocket] = new util.ArrayList[OutSocket] outSocketList.add(outSocket1) cumulateEntity.setOutSocketList(outSocketList) val cp = new BaseComponentParams cp.addinputDataFrame(df1) cp.addSchemaFields(Array(new SchemaField("count", "java.lang.Long"),new SchemaField("col3_new", "java.lang.String"),new SchemaField("col1", "java.lang.String"), new SchemaField("col2", "java.lang.String"), new SchemaField("col3", "java.lang.String"))) val cumulateDF = new CumulateComponent(cumulateEntity, cp).createComponent() val rows = Bucket(Fields(List("count", "col3_new","col1", "col2", "col3")), cumulateDF.get("out0").get).result() assertThat(rows.size, is(3)) assertThat(rows(0), is(Row(("0").toLong,"C3Rx", "C1R1", "C2R3", "C3Rx"))) assertThat(rows(2), is(Row(("0").toLong, "C3Rx", "C1R1", "C2R1", "C3Rx"))) } }
capitalone/Hydrograph
hydrograph.engine/hydrograph.engine.spark/src/test/scala/hydrograph/engine/spark/components/CumulateComponentTest.scala
Scala
apache-2.0
14,786
/* * Copyright (c) 2019 Georgios Andreadakis * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.tap.framework.persistence.elastic import java.util import org.elasticsearch.ElasticsearchStatusException import org.elasticsearch.client.RestHighLevelClient import org.tap.domain.Document import org.tap.framework.persistence.elastic.mapping.DocumentIndexRequestMapper /** * Reading a single document. * * @author Georgios Andreadakis ([email protected]) */ class ReadSingleDocOperation(docId: String) extends PersistenceOperation { private var result: Document = _ def getResult: Either[Exception,Document] = Right(result) override def run(client: RestHighLevelClient): Unit = { val mapper = new DocumentIndexRequestMapper(client) try { val getResponse = mapper.readDoc(docId) result = convert(getResponse.getSourceAsMap, mapper) } catch { case e: ElasticsearchStatusException => throw e } } private def convert(sourceMap: util.Map[String,AnyRef], mapper: DocumentIndexRequestMapper): Document = { mapper.convert(sourceMap, docId) } }
GeorgiosAndreadakis/TextAnalyzerPlatform
framework/src/main/scala/org/tap/framework/persistence/elastic/ReadSingleDocOperation.scala
Scala
apache-2.0
1,635
/* ___ _ ___ _ _ *\\ ** / __| |/ (_) | | The SKilL Generator ** ** \\__ \\ ' <| | | |__ (c) 2013 University of Stuttgart ** ** |___/_|\\_\\_|_|____| see LICENSE ** \\* */ package de.ust.skill.generator.ada import scala.collection.JavaConversions._ trait PackageBodyMaker extends GeneralOutputMaker { abstract override def make { super.make val out = open(s"""${packagePrefix}.adb""") out.write(s""" package body ${packagePrefix.capitalize} is function Hash (Element : Short_Short_Integer) return Ada.Containers.Hash_Type is (Ada.Containers.Hash_Type'Mod (Element)); function Hash (Element : Short) return Ada.Containers.Hash_Type is (Ada.Containers.Hash_Type'Mod (Element)); function Hash (Element : Integer) return Ada.Containers.Hash_Type is (Ada.Containers.Hash_Type'Mod (Element)); function Hash (Element : Long) return Ada.Containers.Hash_Type is (Ada.Containers.Hash_Type'Mod (Element)); function Hash (Element : String_Access) return Ada.Containers.Hash_Type is (Ada.Strings.Hash (Element.all)); function "=" (Left, Right : String_Access) return Boolean is (Left.all = Right.all); function Hash (Element : Skill_Type_Access) return Ada.Containers.Hash_Type is function Convert is new Ada.Unchecked_Conversion (Skill_Type_Access, i64); begin return Ada.Containers.Hash_Type'Mod (Convert (Element)); end; ${ var output = ""; /** * Provides the hash function of every type. */ for (d ← IR) { output += s""" function Hash (Element : ${escaped(d.getName)}_Type_Access) return Ada.Containers.Hash_Type is\\r\\n (Hash (Skill_Type_Access (Element)));\\r\\n\\r\\n""" } /** * Provides the accessor functions to the fields of every type. */ for (d ← IR) { d.getAllFields.filter { f ⇒ !f.isIgnored }.foreach({ f ⇒ if (f.isConstant) { output += s""" function Get_${f.getName.capitalize} (Object : ${escaped(d.getName)}_Type) return ${mapType(f.getType, d, f)} is\\r\\n (${f.constantValue});\\r\\n\\r\\n""" } else { output += s""" function Get_${f.getName.capitalize} (Object : ${escaped(d.getName)}_Type) return ${mapType(f.getType, d, f)} is\\r\\n (Object.${f.getSkillName});\\r\\n\\r\\n""" output += s""" procedure Set_${f.getName.capitalize} ( Object : in out ${escaped(d.getName)}_Type; Value : ${mapType(f.getType, d, f)} ) is begin Object.${f.getSkillName} := Value; end Set_${f.getName.capitalize};\\r\\n\\r\\n""" } }) } output.stripLineEnd.stripLineEnd } end ${packagePrefix.capitalize}; """) out.close() } }
XyzNobody/skill
src/main/scala/de/ust/skill/generator/ada/PackageBodyMaker.scala
Scala
bsd-3-clause
2,887
package com.twitter.finagle.memcached.unit import com.twitter.finagle.memcached.MockClient import com.twitter.finagle.memcached.protocol.ClientError import com.twitter.util.Await import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class MockClientTest extends FunSuite { test("correctly perform the GET command") { val memcache = new MockClient(Map("key" -> "value")).withStrings assert(Await.result(memcache.get("key")) === Some("value")) assert(Await.result(memcache.get("unknown")) === None) } test("correctly perform the SET command") { val memcache = new MockClient(Map("key" -> "value")).withStrings assert(Await.result(memcache.set("key", "new value")) === (())) assert(Await.result(memcache.get("key")) === Some("new value")) assert(Await.result(memcache.set("key2", "value2")) === (())) assert(Await.result(memcache.get("key2")) === Some("value2")) assert(Await.result(memcache.set("key2", "value3")) === (())) assert(Await.result(memcache.get("key2")) === Some("value3")) } test("correctly perform the ADD command") { val memcache = new MockClient(Map("key" -> "value")).withStrings assert(!Await.result(memcache.add("key", "new value"))) assert(Await.result(memcache.get("key")) === Some("value")) assert(Await.result(memcache.add("key2", "value2"))) assert(Await.result(memcache.get("key2")) === Some("value2")) assert(!Await.result(memcache.add("key2", "value3"))) assert(Await.result(memcache.get("key2")) === Some("value2")) } test("correctly perform the APPEND command") { val memcache = new MockClient(Map("key" -> "value")).withStrings assert(Await.result(memcache.append("key", "More"))) assert(Await.result(memcache.get("key")) === Some("valueMore")) assert(!Await.result(memcache.append("unknown", "value"))) assert(Await.result(memcache.get("unknown")) === None) } test("correctly perform the PREPEND command") { val memcache = new MockClient(Map("key" -> "value")).withStrings assert(Await.result(memcache.prepend("key", "More"))) assert(Await.result(memcache.get("key")) === Some("Morevalue")) assert(!Await.result(memcache.prepend("unknown", "value"))) assert(Await.result(memcache.get("unknown")) === None) } test("correctly perform the REPLACE command") { val memcache = new MockClient(Map("key" -> "value")).withStrings assert(Await.result(memcache.replace("key", "new value"))) assert(Await.result(memcache.get("key")) === Some("new value")) assert(!Await.result(memcache.replace("unknown", "value"))) assert(Await.result(memcache.get("unknown")) === None) } test("correctly perform the DELETE command") { val memcache = new MockClient(Map("key" -> "value")).withStrings assert(Await.result(memcache.delete("key"))) assert(Await.result(memcache.get("key")) === None) assert(!Await.result(memcache.delete("unknown"))) assert(Await.result(memcache.get("unknown")) === None) } test("correctly perform the INCR command") { val memcache = new MockClient(Map("key" -> "value", "count" -> "1")).withStrings intercept[ClientError] { Await.result(memcache.incr("key")) } assert(Await.result(memcache.get("key")) === Some("value")) assert(Await.result(memcache.incr("count")) === Some(2)) assert(Await.result(memcache.get("count")) === Some("2")) assert(Await.result(memcache.incr("unknown")) === None) assert(Await.result(memcache.get("unknown")) === None) } test("correctly perform the DECR command") { val memcache = new MockClient(Map("key" -> "value", "count" -> "1")).withStrings intercept[ClientError] { Await.result(memcache.decr("key")) } assert(Await.result(memcache.get("key")) === Some("value")) assert(Await.result(memcache.decr("count")) === Some(0)) assert(Await.result(memcache.get("count")) === Some("0")) assert(Await.result(memcache.decr("count")) === Some(0)) assert(Await.result(memcache.get("count")) === Some("0")) assert(Await.result(memcache.decr("unknown")) === None) assert(Await.result(memcache.get("unknown")) === None) } }
Krasnyanskiy/finagle
finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/MockClientTest.scala
Scala
apache-2.0
4,234
package com.twitter.scalding.parquet.scrooge import cascading.scheme.Scheme import com.twitter.scalding._ import com.twitter.scalding.parquet.thrift.ParquetThriftBase import com.twitter.scalding.source.{ DailySuffixSource, HourlySuffixSource } import com.twitter.scrooge.ThriftStruct trait ParquetScrooge[T <: ThriftStruct] extends ParquetThriftBase[T] { override def hdfsScheme = { // See docs in Parquet346ScroogeScheme val scheme = new Parquet346ScroogeScheme[T](this.config) HadoopSchemeInstance(scheme.asInstanceOf[Scheme[_, _, _, _, _]]) } } class DailySuffixParquetScrooge[T <: ThriftStruct]( path: String, dateRange: DateRange)(implicit override val mf: Manifest[T]) extends DailySuffixSource(path, dateRange) with ParquetScrooge[T] class HourlySuffixParquetScrooge[T <: ThriftStruct]( path: String, dateRange: DateRange)(implicit override val mf: Manifest[T]) extends HourlySuffixSource(path, dateRange) with ParquetScrooge[T] class FixedPathParquetScrooge[T <: ThriftStruct](paths: String*)(implicit override val mf: Manifest[T]) extends FixedPathSource(paths: _*) with ParquetScrooge[T]
tglstory/scalding
scalding-parquet-scrooge/src/main/scala/com/twitter/scalding/parquet/scrooge/ParquetScrooge.scala
Scala
apache-2.0
1,135
/* * Licensed to Cloudera, Inc. under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Cloudera, Inc. licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cloudera.hue.livy.server.interactive import com.cloudera.hue.livy.server.SessionFactory import com.cloudera.hue.livy.sessions.SessionKindSerializer import org.json4s.{DefaultFormats, Formats, JValue} import scala.concurrent.Future trait InteractiveSessionFactory extends SessionFactory[InteractiveSession] { override protected implicit def jsonFormats: Formats = DefaultFormats ++ List(SessionKindSerializer) override def create(id: Int, createRequest: JValue) = create(id, createRequest.extract[CreateInteractiveRequest]) def create(id: Int, createRequest: CreateInteractiveRequest): Future[InteractiveSession] }
epssy/hue
apps/spark/java/livy-server/src/main/scala/com/cloudera/hue/livy/server/interactive/InteractiveSessionFactory.scala
Scala
apache-2.0
1,434
package org.jetbrains.plugins.scala.debugger.smartStepInto import java.util.{Collections, List => JList} import com.intellij.debugger.SourcePosition import com.intellij.debugger.actions.{JvmSmartStepIntoHandler, MethodSmartStepTarget, SmartStepTarget} import com.intellij.openapi.fileEditor.FileDocumentManager import com.intellij.openapi.util.TextRange import com.intellij.psi._ import com.intellij.psi.util.PsiTreeUtil import com.intellij.util.Range import com.intellij.util.text.CharArrayUtil import org.jetbrains.plugins.scala.codeInspection.collections.{MethodRepr, stripped} import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._ import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScParameterizedTypeElement, ScSimpleTypeElement, ScTypeElement} import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScMethodLike} import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunctionDefinition import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass import org.jetbrains.plugins.scala.lang.psi.api.{ScalaFile, ScalaRecursiveElementVisitor} import scala.annotation.tailrec import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer /** * User: Alexander Podkhalyuzin * Date: 26.01.12 */ class ScalaSmartStepIntoHandler extends JvmSmartStepIntoHandler { override def findSmartStepTargets(position: SourcePosition): JList[SmartStepTarget] = { val line: Int = position.getLine if (line < 0) { return Collections.emptyList[SmartStepTarget] } val (element, doc) = (for { sf @ (_sf: ScalaFile) <- position.getFile.toOption if !sf.isCompiled vFile <- sf.getVirtualFile.toOption doc <- FileDocumentManager.getInstance().getDocument(vFile).toOption if doc.getLineCount > line } yield { val startOffset: Int = doc.getLineStartOffset(line) val offset: Int = CharArrayUtil.shiftForward(doc.getCharsSequence, startOffset, " \\t{") val element: PsiElement = sf.findElementAt(offset) (element, doc) }) match { case Some((null, _)) => return Collections.emptyList[SmartStepTarget] case Some((e, d)) => (e, d) case _ => return Collections.emptyList[SmartStepTarget] } val lineStart = doc.getLineStartOffset(line) val lineRange = new TextRange(lineStart, doc.getLineEndOffset(line)) val maxElement = maxElementOnLine(element, lineStart) val lineToSkip = new Range[Integer](line, line) def intersectsWithLineRange(elem: PsiElement) = { lineRange.intersects(elem.getTextRange) } val collector = new TargetCollector(lineToSkip, intersectsWithLineRange) maxElement.accept(collector) maxElement.nextSiblings .takeWhile(intersectsWithLineRange) .foreach(_.accept(collector)) collector.result.sortBy(_.getHighlightElement.getTextOffset).asJava } def isAvailable(position: SourcePosition): Boolean = { val file: PsiFile = position.getFile file.isInstanceOf[ScalaFile] } override def createMethodFilter(stepTarget: SmartStepTarget) = { stepTarget match { case methodTarget: MethodSmartStepTarget => val scalaFilter = methodTarget.getMethod match { case f @ (_: ScMethodLike | _: FakeAnonymousClassConstructor) if stepTarget.needsBreakpointRequest() => ScalaBreakpointMethodFilter.from(f, stepTarget.getCallingExpressionLines) case fun: ScMethodLike => Some(new ScalaMethodFilter(fun, stepTarget.getCallingExpressionLines)) case _ => None } scalaFilter.getOrElse(super.createMethodFilter(stepTarget)) case ScalaFunExprSmartStepTarget(fExpr, stmts) => ScalaBreakpointMethodFilter.from(None, stmts, stepTarget.getCallingExpressionLines) .getOrElse(super.createMethodFilter(stepTarget)) case _ => super.createMethodFilter(stepTarget) } } @tailrec private def maxElementOnLine(startElem: PsiElement, lineStart: Int): PsiElement = { val parent = startElem.getParent parent match { case _: ScBlock | null => startElem case p if p.getTextRange.getStartOffset >= lineStart => maxElementOnLine(parent, lineStart) case _ => startElem } } private class TargetCollector(noStopAtLines: Range[Integer], elementFilter: PsiElement => Boolean) extends ScalaRecursiveElementVisitor { val result = ArrayBuffer[SmartStepTarget]() override def visitNewTemplateDefinition(templ: ScNewTemplateDefinition): Unit = { if (!elementFilter(templ)) return val extBl = templ.extendsBlock var label = "" def findConstructor(typeElem: ScTypeElement): Option[ScConstructor] = typeElem match { case p: ScParameterizedTypeElement => p.findConstructor case s: ScSimpleTypeElement => s.findConstructor case _ => None } def addConstructor(): Unit = { for { tp <- extBl.templateParents typeElem <- tp.typeElements.headOption constr <- findConstructor(typeElem) ref <- constr.reference } yield { label = constr.simpleTypeElement.fold("")(ste => s"new ${ste.getText}.") val generateAnonClass = DebuggerUtil.generatesAnonClass(templ) val method = ref.resolve() match { case m: PsiMethod if !generateAnonClass => m case _ => new FakeAnonymousClassConstructor(templ, ref.refName) } result += new MethodSmartStepTarget(method, "new ", constr, /*needBreakpointRequest = */ generateAnonClass, noStopAtLines) } } def addMethodsIfInArgument(): Unit = { PsiTreeUtil.getParentOfType(templ, classOf[MethodInvocation]) match { case MethodRepr(_, _, _, args) if args.map(stripped).contains(templ) => extBl.templateBody match { case Some(tb) => for { fun @ (_f: ScFunctionDefinition) <- tb.functions body <- fun.body } { result += new MethodSmartStepTarget(fun, label, body, true, noStopAtLines) } case _ => } case _ => } } addConstructor() addMethodsIfInArgument() } override def visitExpression(expr: ScExpression) { if (!elementFilter(expr)) return val implicits = expr.getImplicitConversions()._2 implicits match { case Some(f: PsiMethod) if f.isPhysical => //synthetic conversions are created for implicit classes result += new MethodSmartStepTarget(f, "implicit ", expr, false, noStopAtLines) case _ => } expr match { case ScalaPsiUtil.MethodValue(m) => result += new MethodSmartStepTarget(m, null, expr, true, noStopAtLines) return case FunExpressionTarget(stmts, presentation) => result += new ScalaFunExprSmartStepTarget(expr, stmts, presentation, noStopAtLines) return //stop at function expression case ref: ScReferenceExpression => ref.resolve() match { case fun: ScFunctionDefinition if fun.name == "apply" && ref.refName != "apply" => val prefix = s"${ref.refName}." result += new MethodSmartStepTarget(fun, prefix, ref.nameId, false, noStopAtLines) case Both(f: ScFunctionDefinition, ContainingClass(cl: ScClass)) if cl.getModifierList.hasModifierProperty("implicit") => val isActuallyImplicit = ref.qualifier.exists(_.getImplicitConversions()._2.nonEmpty) val prefix = if (isActuallyImplicit) "implicit " else null result += new MethodSmartStepTarget(f, prefix, ref.nameId, false, noStopAtLines) case fun: PsiMethod => result += new MethodSmartStepTarget(fun, null, ref.nameId, false, noStopAtLines) case _ => } case _ => } super.visitExpression(expr) } override def visitPattern(pat: ScPattern): Unit = { if (!elementFilter(pat)) return val ref = pat match { case cp: ScConstructorPattern => Some(cp.ref) case ip: ScInfixPattern => Some(ip.reference) case _ => None } ref match { case Some(r @ ResolvesTo(f: ScFunctionDefinition)) => val prefix = s"${r.refName}." result += new MethodSmartStepTarget(f, prefix, r.nameId, false, noStopAtLines) case _ => } super.visitPattern(pat) } } }
whorbowicz/intellij-scala
src/org/jetbrains/plugins/scala/debugger/smartStepInto/ScalaSmartStepIntoHandler.scala
Scala
apache-2.0
8,792
package com.lambtors.poker_api.module.poker.application.win import cats.implicits._ import com.lambtors.poker_api.module.poker.domain.{PlayerRepository, PokerGameRepository} import com.lambtors.poker_api.module.poker.domain.error.{GameCannotEndWhenRiverIsNotDealt, PokerGameNotFound} import com.lambtors.poker_api.module.poker.domain.model._ import com.lambtors.poker_api.module.shared.domain.types.ThrowableTypeClasses.MonadErrorThrowable final class GameWinnersFinder[P[_]: MonadErrorThrowable](repository: PokerGameRepository[P], playerRepository: PlayerRepository[P]) { def findWinners(gameId: GameId): P[List[Player]] = repository .search(gameId) .fold[P[List[Player]]](MonadErrorThrowable[P].raiseError(PokerGameNotFound(gameId)))(game => cardsAtTableNumberIsLowerThanFive(game.tableCards).ifM( MonadErrorThrowable[P].raiseError(GameCannotEndWhenRiverIsNotDealt(gameId)), playerRepository.search(gameId).map(findPlayersWithBestCombination(_, game.tableCards)) )) .flatten private def findPlayersWithBestCombination(players: List[Player], tableCards: List[Card]): List[Player] = { val bestCombinations = players.map(findBestCombinationOf(_, tableCards)) val highestCardValueOutOfAllCombinations = bestCombinations .map(_.combination match { case HighCard(card) => card.cardValue }) .sorted(CardValueOrdering.highestValueToLowest) .head val playerBestCombinationsWithHighestCardValue = bestCombinations.filter(_.combination match { case HighCard(card) => card.cardValue == highestCardValueOutOfAllCombinations }) playerBestCombinationsWithHighestCardValue.map(_.player) } private def findBestCombinationOf(player: Player, tableCards: List[Card]): PlayerBestCombination = { // TODO add complex logic here to accept pairs, trios, pokers, flushes, etc as combinations val bestCard = (List(player.firstCard, player.secondCard) ++ tableCards).sorted(CardOrdering.highestValueToLowest).head PlayerBestCombination(player, HighCard(bestCard)) } private def cardsAtTableNumberIsLowerThanFive(tableCards: List[Card]): P[Boolean] = (tableCards.length < 5).pure[P] } case class PlayerBestCombination(player: Player, combination: CardCombination) sealed abstract class CardCombination final case class HighCard(card: Card) extends CardCombination
lambtors/poker-api
src/main/scala/com/lambtors/poker_api/module/poker/application/win/GameWinnersFinder.scala
Scala
mit
2,447
/** * Copyright 2016 Martin Snyder * * Apache License * Version 2.0, January 2004 * http://www.apache.org/licenses/ */ package com.martinsnyder.logger case class SimpleWriter[T](log: List[String], value: T) { def map[B](f: T => B): SimpleWriter[B] = SimpleWriter(log, f(value)) def flatMap[B](f: T => SimpleWriter[B]): SimpleWriter[B] = { val nextWriter = f(value) SimpleWriter(log ::: nextWriter.log, nextWriter.value) } }
MartinSnyder/monadic-logging
src/main/scala/com/martinsnyder/logger/SimpleWriter.scala
Scala
apache-2.0
456
package com.github.aselab.activerecord.experimental import com.github.aselab.activerecord._ import com.github.aselab.activerecord.dsl._ class VersionsSpec extends DatabaseSpecification { "Versions" should { val modelName = "com.github.aselab.activerecord.models.VersionModel" "doUpdateでVersionに保存されること" in { val model = models.VersionModel("str", true, 10, Some("aaa")) model.save() val m1 = model.map("string" -> "bbb", "boolean" -> true) m1.save() val m2 = m1.map("string" -> "bbb", "boolean" -> false) m2.save() Version.all.toList must equalTo(List( Version(modelName, 1, "string", "str", "bbb"), Version(modelName, 1, "boolean", "true", "false") )) } } }
aselab/scala-activerecord
activerecord/src/test/scala/experimemtal/VersionsSpec.scala
Scala
mit
760
package io.coral.api import akka.actor.ActorSystem import akka.actor.Props import akka.io.IO import akka.util.Timeout import io.coral.actors.RuntimeActor import spray.can.Http import scala.concurrent.duration._ import akka.pattern.ask import scala.concurrent.Await import scala.concurrent.ExecutionContext.Implicits.global object Boot extends App { implicit val system = ActorSystem() // create the coral actor val coral = system.actorOf(Props(classOf[RuntimeActor], new DefaultModule(system.settings.config)), "coral") // create and start our service actor val service = system.actorOf(Props[ApiServiceActor], "api") // fetch configuration from resources/application.config val interface = system.settings.config getString "service.interface" val port = system.settings.config getInt "service.port" // start a new HTTP server with our service actor as the handler implicit val timeout = Timeout(5.seconds) val future = (IO(Http) ? Http.Bind(service, interface, port)).map { case _: Http.Bound => true case _ => false } val bounded = Await.result(future, timeout.duration) if (!bounded) { System.exit(-1) } }
daishichao/coral
runtime-api/src/main/scala/io/coral/api/Boot.scala
Scala
apache-2.0
1,159
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.sogou.spark import com.typesafe.config.{Config, ConfigFactory, ConfigValue} /** * Created by Tao Li on 9/30/15. */ class Settings(config: Config) extends Serializable { config.checkValid(ConfigFactory.defaultReference(), "app") val SPARK_APP_NAME = config.getString("app.spark.appName") import scala.collection.JavaConversions._ val sparkConfigMap = if (config.hasPath("app.spark.passthrough")) { config.getConfig("app.spark.passthrough").root map { case (key: String, cv: ConfigValue) => (key, cv.atPath(key).getString(key)) } } else { Map.empty[String, String] } }
litao-buptsse/spark-startup
src/main/scala/com/sogou/spark/Settings.scala
Scala
apache-2.0
1,424
package afronski.playground.akka.actors import akka.actor.Props import akka.actor.Actor import akka.actor.ActorSystem case class DoWork(factor: Int) class RestartException(val message: String) extends Exception(message) class OneTimeWorker extends Actor { override def postRestart(reason: Throwable) = { System.out.println(s"OneTimeWorker restarted because of: '$reason'."); } def receive = { case DoWork(x) => { sender ! x * 2 // Diffrence between PoisonPill and context.stop(self): // // PoisonPill will come after rest of messages gathered in the mailbox. // So if your one time actor receive more then one request for 'do work' like below: // worker ! DoWork(2) // worker ! DoWork(3) // // It will be processed before the PoisonPill, because pill will // be send after the processing first request. System.out.println(s"OneTimeWorker '$self' received '$x' from '$sender' - quiting...") context.stop(self) } case _ => { throw new RestartException("Restarting OneTimeWorker because of unsupported message!") } } } object OneTimeWorker { def spawn(system: ActorSystem, uniqueness: Int) = { system.actorOf(Props[OneTimeWorker], name = s"hashed-id-$uniqueness") } }
afronski/playground-jvm
scala/akka/actor-patterns/src/main/scala/actors/OneTimeWorker.scala
Scala
mit
1,246
package com.lucaswilkins.newtonfractals /** * Trait that allows conversion between pixels and values in [0,1] */ trait PixelValueMapping { def xSize: Int def ySize: Int /** * Converts a pixel coordinate to [0,1] * * @param i pixel x * @param j pixel y * @return a tuple of doubles in [0,1]x[0,1] */ def indexValue(i: Int, j: Int): Tuple2[Double, Double] = (i.toDouble / xSize, j.toDouble / ySize) /** * Converts a value in [0,1] to a pixel * * @param a x value * @param b y value * @return a pixel index * */ def pixelValue(a: Double, b: Double) = { val i = (a*xSize).toInt val j = (b*xSize).toInt (if(i < 0) 0 else if(i >= xSize) xSize-1 else i, if(j < 0) 0 else if(j >= ySize) ySize-1 else j) } }
drlucaswilkins/newtonfractal
NewtonFractal/src/main/scala/com/lucaswilkins/newtonfractals/PixelValueMapping.scala
Scala
gpl-2.0
775
/* * This file is part of CoAnSys project. * Copyright (c) 2012-2015 ICM-UW * * CoAnSys is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * CoAnSys is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with CoAnSys. If not, see <http://www.gnu.org/licenses/>. */ package pl.edu.icm.coansys.citations.util /** * Helper functions and classes for comparing string similarity. * * @author Mateusz Fedoryszak ([email protected]) */ object ngrams { case class NgramStatistics(counter: Map[String, Int], overall: Int) { def similarityTo(other: NgramStatistics): Double = { val c1 = counter val c2 = other.counter val all = overall + other.overall val common = (c1.keySet & c2.keySet).toIterator.map(k => c1(k) min c2(k)).sum if (all > 0) 2 * common.toDouble / all else 0.0 } } object NgramStatistics { def fromString(s: String, n: Int): NgramStatistics = { val counter = s.sliding(n).toIterable.groupBy(identity).mapValues(_.size) val overall = counter.values.sum NgramStatistics(counter, overall) } } def trigramSimilarity(s1: String, s2: String): Double = { val n = 3 NgramStatistics.fromString(s1, n) similarityTo NgramStatistics.fromString(s2, n) } }
acz-icm/coansys
citation-matching/citation-matching-core-code/src/main/scala/pl/edu/icm/coansys/citations/util/ngrams.scala
Scala
agpl-3.0
1,742
package sp.labkit { import sp.domain._ import sp.domain.Logic._ object APILabkit { val service = "Labkit" val topicRequest = "labkitRequests" val topicResponse = "labkitResponse" sealed trait Request sealed trait Response sealed trait API case class OPEvent(name: String, time: String, id: String, resource: String, product: Option[String]) extends API case class OP(start: OPEvent, end: Option[OPEvent], attributes: SPAttributes = SPAttributes()) extends API case class Positions(positions: Map[String,String], time: String) extends API case class OperationStarted(name: String, resource: String, product: String, operationType: String, time: String) extends Response case class OperationFinished(name: String, resource: String, product: String, operationType: String, time: String) extends Response case class ResourcePies(data: Map[String, Map[String, Int]]) extends Response case class ProductPies(data: List[(String, List[(String, Int)])]) extends Response case class ProdStat(name: String, leadtime: Int, processingTime: Int, waitingTime: Int, noOfOperations: Int, noOfPositions: Int) extends Response case class ProductStats(data: List[ProdStat]) extends Response object Formats { import play.api.libs.json._ implicit lazy val fOPEvent: JSFormat[OPEvent] = Json.format[OPEvent] implicit lazy val fOP: JSFormat[OP] = Json.format[OP] implicit lazy val fPositions: JSFormat[Positions] = Json.format[Positions] implicit lazy val fOperationStarted: JSFormat[OperationStarted] = Json.format[OperationStarted] implicit lazy val fOperationFinished: JSFormat[OperationFinished] = Json.format[OperationFinished] implicit lazy val fResourcePies: JSFormat[ResourcePies] = Json.format[ResourcePies] implicit lazy val fProductPies = Json.format[ProductPies] implicit lazy val fProdStat = Json.format[ProdStat] implicit lazy val fProductStats = Json.format[ProductStats] def fLabkitRequest: JSFormat[Request] = Json.format[Request] def fLabkitResponse: JSFormat[Response] = Json.format[Response] def fLabkitAPI: JSFormat[API] = Json.format[API] } object Request { implicit lazy val fLabkitRequest: JSFormat[Request] = Formats.fLabkitRequest } object Response { implicit lazy val fLabkitResponse: JSFormat[Response] = Formats.fLabkitResponse } object API { implicit lazy val fLabkitAPI: JSFormat[API] = Formats.fLabkitAPI } } }
kristoferB/SP
spcontrol/api/src/main/scala/sp/APILabkit.scala
Scala
mit
2,528
/* * Copyright 2018 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package support.steps import cucumber.api.scala.{EN, ScalaDsl} import org.scalatest.{OptionValues, Matchers} import org.scalatest.concurrent.Eventually import org.openqa.selenium.WebDriver import org.scalatest.selenium.WebBrowser trait BaseSteps extends ScalaDsl with EN with Matchers with Eventually with OptionValues
hmrc/worldpay-downloader
test/support/steps/BaseSteps.scala
Scala
apache-2.0
927
package com.mindcandy.data.jobs.trends import argonaut._ import com.datastax.spark.connector._ import com.datastax.spark.connector.types.TypeConverter import com.mindcandy.data.cassandra.converters._ import com.mindcandy.data.jobs.BaseJob import com.mindcandy.data.jobs.trends.model.EventForTrends import com.mindcandy.data.model.Tag import com.twitter.algebird.SpaceSaver import org.apache.spark.SparkContext._ import org.apache.spark.rdd.RDD import org.apache.spark.streaming.StreamingContext._ import org.apache.spark.streaming.dstream.DStream import org.joda.time.DateTime import scala.concurrent.duration._ trait TrendsJob { self: BaseJob => def Bucket: FiniteDuration def Capacity: Int def CF: String def Columns: Seq[SelectableColumnRef] // Needed TypeConverter to create an implicit RowReaderFactory // implicit val DateTimeConverter: TypeConverter[DateTime] = AnyToDateTimeConverter implicit val SpaceSaverConverter: TypeConverter[SpaceSaver[String]] = AnyToSpaceSaverStringConverter // override val Converters: Seq[TypeConverter[_]] = Seq( DateTimeConverter, SpaceSaverConverter, DateTimeToDateConverter, DateTimeToLongConverter, SpaceSaverToArrayByteConverter, SpaceSaverToByteBufferConverter ) def KS: String def extract(input: DStream[String]): DStream[EventForTrends] = input.flatMap(Parse.decodeOption[EventForTrends](_)) def mergeAndStore(data: DStream[(DateTime, SpaceSaver[String])]): Unit = data.foreachRDD { rdd => rdd.cache() val loaded: RDD[(DateTime, SpaceSaver[String])] = rdd.joinWithCassandraTable[(DateTime, SpaceSaver[String])](KS, CF).select(Columns: _*).map { case (_, (time, previous)) => (time, previous) } val output: RDD[(DateTime, SpaceSaver[String])] = rdd.leftOuterJoin(loaded).map { case (time, (current, previous)) => (time, previous.fold(current)(_ ++ current)) } output.saveToCassandra(KS, CF, SomeColumns(Columns: _*)) rdd.unpersist(blocking = false) } def process(data: DStream[String]): DStream[(DateTime, SpaceSaver[String])] = { val extracted: DStream[EventForTrends] = extract(data) val withBuckets: DStream[(DateTime, List[Tag])] = buckets[EventForTrends, List[Tag]](_.time, _.tags, Bucket)(extracted) val keyValue: DStream[(DateTime, SpaceSaver[String])] = withBuckets.flatMap { case (time, tags) => tags.map(tag => (time, SpaceSaver(Capacity, tag.value))) } val reduced: DStream[(DateTime, SpaceSaver[String])] = keyValue.reduceByKey(_ ++ _) reduced } }
lvicentesanchez/fast-gt-perfect
src/main/scala/com/mindcandy/data/jobs/trends/TrendsJob.scala
Scala
mit
2,597
import java.io.File class FileMatcher { private def filesHere = new File(".").listFiles def filesEnding(query: String) = filesMatching(_.endsWith(query)) def filesContaining(query: String) = filesMatching(_.contains(query)) def filesRegex(query: String) = filesMatching(_.matches(query)) def filesMatching(matcher: (String) => Boolean) = for (file <- filesHere if matcher(file.getName)) yield file }
mhotchen/programming-in-scala
src/FileMatcher.scala
Scala
apache-2.0
417
/** * Copyright 2016, deepsense.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.deepsense.commons.resources import scala.language.reflectiveCalls object ManagedResource { def apply[T, Q](c: T {def close(): Unit})(f: (T) => Q): Q = { try { f(c) } finally { c.close() } } }
deepsense-io/seahorse-workflow-executor
commons/src/main/scala/io/deepsense/commons/resources/ManagedResource.scala
Scala
apache-2.0
832
package jp.scaleout.dw.category.models import java.sql.Timestamp import scala.slick.driver.H2Driver.simple.Table case class Page( clFrom: Int, clTo: String, clSortkey: String, clTimestamp: Timestamp, clSortkeyPrefix: String, clCollation: String, clType: String) object Pages extends Table[CategoryLink]("page") { def clFrom = column[Int]("cl_from") def clTo = column[String]("cl_to") def clSortkey = column[String]("cl_sortkey") def clTimestamp = column[Timestamp]("cl_timestamp") def clSortkeyPrefix = column[String]("cl_sortkey_prefix") def clCollation = column[String]("cl_collation") def clType = column[String]("cl_type") def * = clFrom ~ clTo ~ clSortkey ~ clTimestamp ~ clSortkeyPrefix ~ clCollation ~ clType <> (CategoryLink, CategoryLink.unapply _) }
mwsoft/wikipedia_categorizer
src/main/scala/jp/mwsoft/wikipedia/categorizer/models/Pages.scala
Scala
mit
794
package org.bitcoins.script.control import org.bitcoins.marshallers.script.ScriptParser import org.bitcoins.script.result.{ScriptErrorInvalidStackOperation, ScriptErrorOpReturn} import org.bitcoins.script.{ScriptProgram} import org.bitcoins.script.arithmetic.OP_ADD import org.bitcoins.script.bitwise.OP_EQUAL import org.bitcoins.script.constant._ import org.bitcoins.script.reserved.{OP_VER, OP_RESERVED} import org.bitcoins.util._ import org.scalatest.{MustMatchers, FlatSpec} /** * Created by chris on 1/6/16. */ class ControlOperationsInterpreterTest extends FlatSpec with MustMatchers with ControlOperationsInterpreter { "ControlOperationsInterpreter" must "have OP_VERIFY evaluate to true with '1' on the stack" in { val stack = List(OP_TRUE) val script = List(OP_VERIFY) val program = ScriptProgram(TestUtil.testProgram, stack,script) val result = opVerify(program) result.stack.isEmpty must be (true) result.script.isEmpty must be (true) } it must "have OP_VERIFY evaluate to true when there are multiple items on the stack that can be cast to an int" in { //for this test case in bitcoin core's script test suite //https://github.com/bitcoin/bitcoin/blob/master/src/test/data/script_valid.json#L21 val stack = ScriptParser.fromString("0x09 0x00000000 0x00000000 0x10") val script = List(OP_VERIFY) val program = ScriptProgram(TestUtil.testProgram, stack,script) val result = opVerify(program) } it must "have OP_VERIFY evaluate to false with '0' on the stack" in { val stack = List(OP_FALSE) val script = List(OP_VERIFY) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack,script) val result = opVerify(program) result.stackTopIsFalse must be (true) } it must "mark the script as invalid for OP_VERIFY when there is nothing on the stack" in { val stack = List() val script = List(OP_VERIFY) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack,script) val result = ScriptProgramTestUtil.toExecutedScriptProgram(opVerify(program)) result.error must be (Some(ScriptErrorInvalidStackOperation)) } it must "fail for verify when there is nothing on the script stack" in { intercept[IllegalArgumentException] { val stack = List(ScriptConstant("1")) val script = List() val program = ScriptProgram(TestUtil.testProgram, stack,script) val result = opVerify(program) } } it must "find the first index of our OP_ENDIF in a list of script tokens" in { val l = List(OP_ENDIF) findFirstOpEndIf(l) must be (Some(0)) findFirstOpEndIf(List(OP_IF,OP_ELSE,OP_ENDIF,OP_ENDIF)) must be (Some(2)) findFirstOpEndIf(List(OP_0,OP_1,OP_2)) must be (None) findFirstOpEndIf(List(OP_IF, OP_RESERVED, OP_ENDIF, OP_1)) must be (Some(2)) } it must "find the last index of our OP_ENDIF in a list of script tokens" in { val l = List(OP_ENDIF) findLastOpEndIf(l) must be (Some(0)) findLastOpEndIf(List(OP_IF,OP_ELSE,OP_ENDIF,OP_ENDIF)) must be (Some(3)) findLastOpEndIf(List(OP_0,OP_1,OP_2)) must be (None) findLastOpEndIf(List(OP_IF, OP_RESERVED, OP_ENDIF, OP_ENDIF, OP_1)) must be (Some(3)) } it must "find the first indexes of OP_ELSE in a list of script tokens" in { findFirstOpElse(List(OP_ELSE)) must be (Some(0)) findFirstOpElse(List(OP_IF,OP_ELSE,OP_ENDIF,OP_ELSE)) must be (Some(1)) findFirstOpElse(List(OP_0,OP_1,OP_2)) must be (None) } it must "find the first indexes of OP_ELSE and OP_ENDIF in a list of script tokens" in { findFirstIndexesOpElseOpEndIf(List(OP_ELSE,OP_ENDIF)) must be (Some(0),Some(1)) findFirstIndexesOpElseOpEndIf(List(OP_IF, OP_ELSE,OP_ENDIF, OP_IF,OP_ELSE,OP_ENDIF)) must be (Some(1),Some(2)) findFirstIndexesOpElseOpEndIf(List(OP_IF,OP_IF)) must be (None,None) } it must "remove the first OP_IF expression in a script" in { removeFirstOpIf(List(OP_IF,OP_ELSE,OP_ENDIF)) must be (List(OP_ELSE,OP_ENDIF)) removeFirstOpIf(List(OP_ELSE,OP_ENDIF)) must be (List(OP_ELSE,OP_ENDIF)) removeFirstOpIf(List(OP_IF, OP_1,OP_ELSE, OP_2, OP_ELSE, OP_3, OP_ENDIF)) must be (List(OP_ELSE, OP_2, OP_ELSE, OP_3, OP_ENDIF)) removeFirstOpIf(List(OP_IF,OP_ENDIF)) must be (List(OP_ENDIF)) } it must "remove the first OP_ELSE expression in a script" in { removeFirstOpElse(List(OP_IF,OP_ELSE,OP_ENDIF)) must be (List(OP_IF,OP_ENDIF)) removeFirstOpElse(List(OP_IF,OP_ENDIF)) must be (List(OP_IF,OP_ENDIF)) removeFirstOpElse(List(OP_IF, OP_1,OP_ELSE, OP_2, OP_ELSE, OP_3, OP_ENDIF)) must be (List(OP_IF, OP_1, OP_ELSE, OP_3, OP_ENDIF)) } it must "remove the first OP_ELSE in a binary tree" in { val script1 = List(OP_IF,OP_ELSE,OP_ENDIF) val bTree1 = parseBinaryTree(script1) removeFirstOpElse(bTree1).toSeq must be (List(OP_IF)) val script2 = List(OP_IF,OP_ENDIF) val bTree2 = parseBinaryTree(script2) removeFirstOpElse(bTree2).toSeq must be (script2) val script3 = List(OP_IF, OP_1,OP_ELSE, OP_2, OP_ELSE, OP_3, OP_ENDIF) val bTree3 = parseBinaryTree(script3) removeFirstOpElse(bTree3).toSeq must be (List(OP_IF, OP_1, OP_ELSE, OP_3, OP_ENDIF)) } it must "find a matching OP_ENDIF for an OP_IF" in { //https://gist.github.com/Christewart/381dc1dbbb07e62501c3 val script = List(OP_IF, OP_1, OP_IF, OP_RETURN, OP_ELSE, OP_RETURN, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ELSE, OP_1, OP_IF, OP_1, OP_ELSE, OP_RETURN, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL) findMatchingOpEndIf(script) must be (20) } it must "parse a script as a binary tree then convert it back to the original list" in { val script0 = List(OP_IF,OP_ENDIF) parseBinaryTree(script0).toSeq must be (script0) val script1 = List(OP_IF,OP_0,OP_ELSE,OP_1,OP_ENDIF) val bTree1 = parseBinaryTree(script1) bTree1.toSeq must be (script1) val script2 = List(OP_IF,OP_ELSE, OP_ELSE,OP_ENDIF) parseBinaryTree(script2).toSeq must be (script2) val script3 = List(OP_IF, OP_1, OP_ELSE, OP_0, OP_ENDIF) val bTree3 = parseBinaryTree(script3) bTree3.toSeq must be (script3) val script4 = List(OP_IF, OP_IF, OP_0, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_IF, OP_2, OP_ELSE, OP_3, OP_ENDIF, OP_ENDIF) val bTree4 = parseBinaryTree(script4) bTree4.toSeq must be (script4) val script5 = List(OP_IF, OP_1,OP_ELSE, OP_2, OP_ELSE, OP_3, OP_ENDIF) parseBinaryTree(script5).toSeq must be (script5) } it must "parse a script into a binary tree and have the OP_IF expression on the left branch and the OP_ELSE expression on the right branch"in { val script = List(OP_IF,OP_0,OP_ELSE,OP_1,OP_ENDIF) val bTree = parseBinaryTree(script) bTree.value.get must be (OP_IF) bTree.left.isDefined must be (true) bTree.left.get.value must be (Some(OP_0)) bTree.right.isDefined must be (true) bTree.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.isDefined must be (true) bTree.right.get.left.get.value must be (Some(OP_1)) bTree.right.get.right.isDefined must be (true) bTree.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) } it must "parse nested OP_ELSE statements into the same branch" in { val script = List(OP_IF, OP_1,OP_ELSE, OP_2, OP_ELSE, OP_3, OP_ENDIF) val bTree = parseBinaryTree(script) bTree.value.get must be (OP_IF) bTree.left.isDefined must be (true) bTree.left.get.value must be (Some(OP_1)) bTree.right.isDefined must be (true) bTree.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.isDefined must be (true) bTree.right.get.left.get.value must be (Some(OP_2)) bTree.right.get.right.isDefined must be (true) bTree.right.get.right.get.value must be (Some(OP_ELSE)) bTree.right.get.right.get.left.isDefined must be (true) bTree.right.get.right.get.left.get.value must be (Some(OP_3)) bTree.right.get.right.get.right.isDefined must be (true) bTree.right.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) bTree.toSeq must be (script) } it must "parse a binary tree from a script with nested OP_IFs and OP_ELSES on both branches" in { val script = List(OP_IF, OP_IF, OP_0, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_IF, OP_2, OP_ELSE, OP_3, OP_ENDIF, OP_ENDIF) val bTree = parseBinaryTree(script) bTree.value must be (Some(OP_IF)) bTree.left.get.value must be (Some(OP_IF)) bTree.left.get.right.get.value must be (Some(OP_ELSE)) bTree.left.get.right.get.left.get.value must be (Some(OP_1)) bTree.left.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) bTree.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.get.value must be (Some(OP_IF)) bTree.right.get.left.get.left.get.value must be (Some(OP_2)) bTree.right.get.left.get.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.get.right.get.left.get.value must be (Some(OP_3)) bTree.right.get.left.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) } it must "parse a binary tree from a script where constants are nested inside of OP_IF OP_ELSE branches" in { //"0" "IF 1 IF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 1 IF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL" val script = List(OP_IF, OP_1,OP_IF,OP_RETURN,OP_ELSE,OP_RETURN,OP_ELSE,OP_RETURN, OP_ENDIF, OP_ELSE, OP_1, OP_IF, OP_1, OP_ELSE, OP_RETURN, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL) val bTree = parseBinaryTree(script) bTree.toSeq must be (script) bTree.right.get.right.get.left.get.left.get.left.get.value must be (Some(OP_ADD)) bTree.right.get.right.get.left.get.left.get.left.get.left.get.value must be (Some(OP_2)) bTree.right.get.right.get.left.get.left.get.left.get.left.get.left.get.value must be (Some(OP_EQUAL)) } it must "parse a binary tree where there are nested OP_ELSES in the outer most OP_ELSE" in { //https://gist.github.com/Christewart/a5253cf708903323ddc6 val script = List(OP_IF,OP_1, OP_IF,OP_RETURN, OP_ELSE, OP_RETURN, OP_ELSE, OP_RETURN,OP_ENDIF, OP_ELSE, OP_1,OP_IF,OP_1,OP_ELSE, OP_RETURN,OP_ELSE,OP_1,OP_ENDIF, OP_ELSE,OP_RETURN,OP_ENDIF,OP_ADD,OP_2,OP_EQUAL) val bTree = parseBinaryTree(script) bTree.toSeq must be (script) bTree.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.get.value must be (Some(OP_1)) bTree.right.get.right.get.value must be (Some(OP_ELSE)) bTree.right.get.right.get.left.get.value must be (Some(OP_RETURN)) bTree.right.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) bTree.right.get.right.get.left.get.left.get.left.get.value must be (Some(OP_ADD)) } it must "parse a binary tree that has OP_NOTIFs" in { val script = List(OP_NOTIF, OP_1,OP_ELSE, OP_2, OP_ELSE, OP_3, OP_ENDIF) val bTree = parseBinaryTree(script) bTree.value.get must be (OP_NOTIF) bTree.left.isDefined must be (true) bTree.left.get.value must be (Some(OP_1)) bTree.right.isDefined must be (true) bTree.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.isDefined must be (true) bTree.right.get.left.get.value must be (Some(OP_2)) bTree.right.get.right.isDefined must be (true) bTree.right.get.right.get.value must be (Some(OP_ELSE)) bTree.right.get.right.get.left.isDefined must be (true) bTree.right.get.right.get.left.get.value must be (Some(OP_3)) bTree.right.get.right.get.right.isDefined must be (true) bTree.right.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) bTree.toSeq must be (script) } it must "parse a binary tree with nested OP_NOTIFs" in { val script = List(OP_NOTIF, OP_NOTIF, OP_0, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_NOTIF, OP_2, OP_ELSE, OP_3, OP_ENDIF, OP_ENDIF) val bTree = parseBinaryTree(script) bTree.value must be (Some(OP_NOTIF)) bTree.left.get.value must be (Some(OP_NOTIF)) bTree.left.get.right.get.value must be (Some(OP_ELSE)) bTree.left.get.right.get.left.get.value must be (Some(OP_1)) bTree.left.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) bTree.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.get.value must be (Some(OP_NOTIF)) bTree.right.get.left.get.left.get.value must be (Some(OP_2)) bTree.right.get.left.get.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.get.right.get.left.get.value must be (Some(OP_3)) bTree.right.get.left.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) } it must "evaluate an OP_IF correctly" in { val stack = List(OP_0) val script = List(OP_IF, OP_RESERVED, OP_ENDIF, OP_1) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opIf(program) newProgram.stack.isEmpty must be (true) newProgram.script must be (List(OP_ENDIF,OP_1)) } it must "evaluate an OP_IF OP_ELSE OP_ENDIF block" in { val stack = List(OP_0) val script = List(OP_IF, OP_VER, OP_ELSE, OP_1, OP_ENDIF) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opIf(program) newProgram.script must be (List(OP_ELSE,OP_1,OP_ENDIF)) } it must "check that every OP_IF has a matching OP_ENDIF" in { val script0 = List() checkMatchingOpIfOpNotIfOpEndIf(script0) must be (true) val script1 = List(OP_IF, OP_ENDIF) checkMatchingOpIfOpNotIfOpEndIf(script1) must be (true) val script2 = List(OP_IF) checkMatchingOpIfOpNotIfOpEndIf(script2) must be (false) val script3 = List(OP_IF,OP_IF,OP_NOTIF,OP_ELSE,OP_ELSE,OP_ELSE,OP_ENDIF,OP_ENDIF,OP_ENDIF) checkMatchingOpIfOpNotIfOpEndIf(script3) must be (true) } it must "evaluate an OP_IF block correctly if the stack top is true" in { val stack = List(OP_1) val script = List(OP_IF, OP_1, OP_ELSE, OP_0, OP_ENDIF) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opIf(program) newProgram.stack must be (List()) newProgram.script must be (List(OP_1)) } it must "evaluate a weird case using multiple OP_ELSEs" in { val stack = List(ScriptNumber(1)) val script = List(OP_IF, OP_ELSE, OP_0, OP_ELSE, OP_1, OP_ENDIF) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opIf(program) newProgram.script must be (List(OP_ELSE,OP_1,OP_ENDIF)) } it must "evaluate nested OP_IFS correctly" in { val stack = List(OP_1) val script = List(OP_IF, OP_IF, OP_0, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_IF, OP_2, OP_ELSE, OP_3, OP_ENDIF, OP_ENDIF) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opIf(program) newProgram.stack.isEmpty must be (true) newProgram.script must be (List(OP_IF,OP_0,OP_ELSE,OP_1,OP_ENDIF)) } it must "evaluate a nested OP_IFs OP_ELSES correctly when the stack top is 0" in { //https://gist.github.com/Christewart/381dc1dbbb07e62501c3 val stack = List(OP_0) //"0", "IF 1 IF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 1 IF 1 ELSE // RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL" val script = List(OP_IF,OP_1, OP_IF,OP_RETURN, OP_ELSE, OP_RETURN, OP_ELSE, OP_RETURN,OP_ENDIF, OP_ELSE, OP_1, OP_IF,OP_1,OP_ELSE, OP_RETURN,OP_ELSE,OP_1,OP_ENDIF, OP_ELSE,OP_RETURN,OP_ENDIF,OP_ADD,OP_2,OP_EQUAL) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opIf(program) newProgram.stack.isEmpty must be (true) newProgram.script must be (List(OP_ELSE, OP_1,OP_IF,OP_1,OP_ELSE, OP_RETURN,OP_ELSE,OP_1,OP_ENDIF,OP_ELSE, OP_RETURN,OP_ENDIF,OP_ADD,OP_2,OP_EQUAL)) val newProgram1 = opElse(newProgram) newProgram1.stack.isEmpty must be (true) newProgram1.script must be (List(OP_1,OP_IF,OP_1,OP_ELSE, OP_RETURN,OP_ELSE,OP_1,OP_ENDIF,OP_ENDIF,OP_ADD,OP_2,OP_EQUAL)) } it must "remove the first OP_ELSE if the stack top is true for an OP_IF" in { val stack = List(ScriptNumber(1)) val script = List(OP_IF, OP_1, OP_ELSE, OP_RETURN, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opIf(program) newProgram.stack.isEmpty must be (true) newProgram.script must be (List(OP_1,OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL)) } it must "evaluate an OP_ENDIF correctly" in { val stack = List(ScriptNumber(1), ScriptNumber(1)) val script = List(OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opEndIf(program) newProgram.stack must be (stack) newProgram.script must be (script.tail) } it must "parse a partial script correctly" in { val script = List(OP_IF, OP_1, OP_ELSE, OP_RETURN, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL) val bTree = parseBinaryTree(script) bTree.value must be (Some(OP_IF)) bTree.left.get.value must be (Some(OP_1)) bTree.right.get.value must be (Some(OP_ELSE)) bTree.right.get.left.get.value must be (Some(OP_RETURN)) bTree.right.get.right.get.value must be (Some(OP_ELSE)) bTree.right.get.right.get.left.get.value must be (Some(OP_1)) bTree.right.get.right.get.left.get.left.get.value must be (Some(OP_ENDIF)) bTree.right.get.right.get.left.get.left.get.left.get.value must be (Some(OP_ELSE)) } it must "mechanically evaluate this entire script correctly" in { val stack = List(ScriptNumber(1)) val script = List(OP_NOTIF, OP_0, OP_NOTIF, OP_RETURN, OP_ELSE, OP_RETURN, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ELSE, OP_0, OP_NOTIF, OP_1, OP_ELSE, OP_RETURN, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL) val program = ScriptProgram(TestUtil.testProgram, stack,script) val newProgram = opNotIf(program) newProgram.stack.isEmpty must be (true) newProgram.script must be (List(OP_ELSE, OP_0, OP_NOTIF, OP_1, OP_ELSE, OP_RETURN, OP_ELSE, OP_1, OP_ENDIF, OP_ELSE, OP_RETURN, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL)) val newProgram1 = opElse(newProgram) newProgram1.stack.isEmpty must be (true) newProgram1.script must be (List(OP_0, OP_NOTIF, OP_1, OP_ELSE, OP_RETURN, OP_ELSE, OP_1, OP_ENDIF, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL)) val newProgram2 = opNotIf(ScriptProgram(newProgram1,List(OP_0),newProgram1.script.tail)) newProgram2.stack.isEmpty must be (true) newProgram2.script must be (List(OP_1,OP_ELSE, OP_1, OP_ENDIF, OP_ENDIF, OP_ADD, OP_2, OP_EQUAL)) val newProgram3 = opElse(ScriptProgram(newProgram2,List(OP_1),newProgram2.script.tail)) newProgram3.stack must be (List(OP_1)) newProgram3.script must be (List(OP_1,OP_ENDIF,OP_ENDIF,OP_ADD, OP_2, OP_EQUAL)) val newProgram4 = opEndIf(ScriptProgram(newProgram3, newProgram3.script.head :: newProgram3.stack, newProgram3.script.tail)) newProgram4.stack must be (List(OP_1,OP_1)) newProgram4.script must be (List(OP_ENDIF,OP_ADD, OP_2, OP_EQUAL)) val newProgram5 = opEndIf(newProgram4) newProgram5.stack must be (List(OP_1,OP_1)) newProgram5.script must be (List(OP_ADD, OP_2, OP_EQUAL)) } it must "mark a transaction as invalid if it is trying to spend an OP_RETURN output" in { val stack = Seq() val script = Seq(OP_RETURN) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress,stack,script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(opReturn(program)) newProgram.error must be (Some(ScriptErrorOpReturn)) } it must "remove nothing when trying to remove an OP_ELSE if the tree is empty" in { removeFirstOpElse(Empty) must be (Empty) } it must "remove an OP_ELSE from the left branch from a binary tree if an OP_IF DNE on the left branch" in { val tree = Node(OP_0,Node(OP_ELSE,Empty,Empty),Empty) removeFirstOpElse(tree) must be (Node(OP_0,Empty,Empty)) } it must "remove the first OP_IF expression a sequence" in { val asm = List(OP_IF,OP_0,OP_ELSE,OP_1,OP_ENDIF) removeFirstOpIf(asm) must be (Seq(OP_ELSE,OP_1,OP_ENDIF)) } }
Christewart/scalacoin
src/test/scala/org/bitcoins/script/control/ControlOperationsInterpreterTest.scala
Scala
mit
20,412
package inloopio.math.random /** * Deterministic random number generators are repeatable, which can prove * useful for testing and validation. This interface defines an operation * to return the seed data from a repeatable RNG. This seed value can then * be reused to create a random source with identical output. * @author Daniel Dyer */ trait RepeatableRNG { /** * @return The seed data used to initialise this pseudo-random * number generator. */ def getSeed: Array[Byte] }
dcaoyuan/inloopio-libs
inloopio-math/src/main/scala/inloopio/math/random/RepeatableRNG.scala
Scala
bsd-3-clause
499
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.rdd.read.realignment import org.apache.spark.rdd.RDD import org.bdgenomics.adam.models.ReferenceRegion import org.bdgenomics.adam.rdd.ADAMContext._ import org.bdgenomics.adam.rich.RichAlignmentRecord import org.bdgenomics.adam.util.ADAMFunSuite import org.bdgenomics.formats.avro.{ AlignmentRecord, Contig } class IndelRealignmentTargetSuite extends ADAMFunSuite { // Note: this can't be lazy vals because Spark won't find the RDDs after the first test def mason_reads: RDD[RichAlignmentRecord] = { val path = ClassLoader.getSystemClassLoader.getResource("small_realignment_targets.sam").getFile sc.loadAlignments(path).map(RichAlignmentRecord(_)) } def artificial_reads: RDD[RichAlignmentRecord] = { val path = ClassLoader.getSystemClassLoader.getResource("artificial.sam").getFile sc.loadAlignments(path).map(RichAlignmentRecord(_)) } def make_read(start: Long, cigar: String, mdtag: String, length: Int, refLength: Int, id: Int = 0): RichAlignmentRecord = { val sequence: String = "A" * length RichAlignmentRecord(AlignmentRecord.newBuilder() .setReadName("read" + id.toString) .setStart(start) .setReadMapped(true) .setCigar(cigar) .setEnd(start + refLength) .setSequence(sequence) .setReadNegativeStrand(false) .setMapq(60) .setQual(sequence) // no typo, we just don't care .setContig(Contig.newBuilder() .setContigName("1") .build()) .setMismatchingPositions(mdtag) .build()) } sparkTest("checking simple realignment target") { val target1 = new IndelRealignmentTarget(Some(ReferenceRegion("1", 1, 10)), ReferenceRegion("1", 1, 51)) val target2 = new IndelRealignmentTarget(None, ReferenceRegion("1", 60, 91)) assert(target1.readRange.start === 1) assert(target1.readRange.end === 51) assert(TargetOrdering.overlap(target1, target1) === true) assert(TargetOrdering.overlap(target1, target2) === false) assert(target2.readRange.start === 60) assert(target2.readRange.end === 91) assert(!target1.isEmpty) assert(target2.isEmpty) } sparkTest("creating simple target from read with deletion") { val read = make_read(3L, "2M3D2M", "2^AAA2", 4, 7) val read_rdd: RDD[RichAlignmentRecord] = sc.makeRDD(Seq(read), 1) val targets = RealignmentTargetFinder(read_rdd) assert(targets != null) assert(targets.size === 1) assert(targets.head.variation.get.start === 5) assert(targets.head.variation.get.end === 8) assert(targets.head.readRange.start === 3) assert(targets.head.readRange.end === 10) } sparkTest("creating simple target from read with insertion") { val read = make_read(3L, "2M3I2M", "4", 7, 4) val read_rdd: RDD[RichAlignmentRecord] = sc.makeRDD(Seq(read), 1) val targets = RealignmentTargetFinder(read_rdd) assert(targets != null) assert(targets.size === 1) assert(targets.head.variation.get.start === 5) assert(targets.head.variation.get.end === 6) assert(targets.head.readRange.start === 3) assert(targets.head.readRange.end === 7) } sparkTest("joining simple realignment targets on same chr") { val target1 = new IndelRealignmentTarget(Some(ReferenceRegion("1", 10, 16)), ReferenceRegion("1", 1, 21)) val target2 = new IndelRealignmentTarget(Some(ReferenceRegion("1", 10, 16)), ReferenceRegion("1", 6, 26)) val merged_target = target1.merge(target2) assert(merged_target.readRange.start === 1) assert(merged_target.readRange.end === 26) assert(merged_target.variation.get.start === 10) assert(merged_target.variation.get.end === 16) } sparkTest("joining simple realignment targets on different chr throws exception") { val target1 = new IndelRealignmentTarget(Some(ReferenceRegion("1", 10, 16)), ReferenceRegion("1", 1, 21)) val target2 = new IndelRealignmentTarget(Some(ReferenceRegion("2", 10, 16)), ReferenceRegion("2", 6, 26)) intercept[AssertionError] { target1.merge(target2) } } sparkTest("creating targets from three intersecting reads, same indel") { val read1 = make_read(1L, "4M3D2M", "4^AAA2", 6, 9) val read2 = make_read(2L, "3M3D2M", "3^AAA2", 5, 8) val read3 = make_read(3L, "2M3D2M", "2^AAA2", 4, 7) val read_rdd: RDD[RichAlignmentRecord] = sc.makeRDD(Seq(read1, read2, read3), 1) val targets = RealignmentTargetFinder(read_rdd) assert(targets != null) assert(targets.size === 1) assert(targets.head.variation.get.start === 5) assert(targets.head.variation.get.end === 8) assert(targets.head.readRange.start === 1) assert(targets.head.readRange.end === 10) } sparkTest("creating targets from three intersecting reads, two different indel") { val read1 = make_read(1L, "2M2D4M", "2^AA4", 6, 8, 0) val read2 = make_read(1L, "2M2D2M2D2M", "2^AA2^AA2", 6, 10, 1) val read3 = make_read(5L, "2M2D4M", "2^AA4", 6, 8, 2) val read_rdd: RDD[RichAlignmentRecord] = sc.makeRDD(Seq(read1, read2, read3), 1) val targets = RealignmentTargetFinder(read_rdd) assert(targets != null) assert(targets.size === 1) assert(targets.head.variation.get.start === 3) assert(targets.head.variation.get.end === 9) assert(targets.head.readRange.start === 1) assert(targets.head.readRange.end === 13) } sparkTest("creating targets from two disjoint reads") { val read1 = make_read(1L, "2M2D2M", "2^AA2", 4, 6) val read2 = make_read(7L, "2M2D2M", "2^AA2", 4, 6) val read_rdd: RDD[RichAlignmentRecord] = sc.makeRDD(Seq(read1, read2), 1) val targets = RealignmentTargetFinder(read_rdd).toArray assert(targets != null) assert(targets.size === 2) assert(targets(0).variation.get.start === 3) assert(targets(0).variation.get.end === 5) assert(targets(0).readRange.start === 1) assert(targets(0).readRange.end === 7) assert(targets(1).variation.get.start === 9) assert(targets(1).variation.get.end === 11) assert(targets(1).readRange.start === 7) assert(targets(1).readRange.end === 13) } sparkTest("creating targets for artificial reads: one-by-one") { def check_indel(target: IndelRealignmentTarget, read: AlignmentRecord): Boolean = { val indelRange: ReferenceRegion = target.variation.get read.getStart.toLong match { case 5L => (indelRange.start == 34) && (indelRange.end == 44) case 10L => (indelRange.start == 54) && (indelRange.end == 64) case 15L => (indelRange.start == 34) && (indelRange.end == 44) case 20L => (indelRange.start == 54) && (indelRange.end == 64) case 25L => (indelRange.start == 34) && (indelRange.end == 44) case _ => false } } val reads = artificial_reads.collect() reads.foreach( read => { val read_rdd: RDD[RichAlignmentRecord] = sc.makeRDD(Seq(read), 1) val targets = RealignmentTargetFinder(read_rdd) if (read.getStart < 105) { assert(targets != null) assert(targets.size === 1) // the later read mates do not have indels assert(targets.head.readRange.start === read.getStart) assert(targets.head.readRange.end === read.getEnd) assert(check_indel(targets.head, read)) } }) } sparkTest("creating targets for artificial reads: all-at-once (merged)") { val targets_collected: Array[IndelRealignmentTarget] = RealignmentTargetFinder(artificial_reads).toArray assert(targets_collected.size === 1) assert(targets_collected.head.readRange.start === 5) assert(targets_collected.head.readRange.end === 95) assert(targets_collected.head.variation.get.start === 34) assert(targets_collected.head.variation.get.end === 64) } sparkTest("creating indel targets for mason reads") { val targets_collected: Array[IndelRealignmentTarget] = RealignmentTargetFinder(mason_reads).toArray // the first read has no indels // the second read has a one-base deletion and a one-base insertion assert(targets_collected(0).variation.get.start == 702289 && targets_collected(0).variation.get.end == 702324) // the third read has a one base deletion assert(targets_collected(1).variation.get.start == 807755 && targets_collected(1).variation.get.end == 807756) // read 7 has a single 4 bp deletion assert(targets_collected(5).variation.get.length === 4) assert(targets_collected(5).variation.get.start == 869644 && targets_collected(5).variation.get.end == 869648) } }
VinACE/adam
adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTargetSuite.scala
Scala
apache-2.0
9,356
package poly.collection.search import poly.collection._ /** * Represents a simple state space that is specified by a state transition function. * @author Tongfei Chen * @author Yuhuan Jiang * @since 0.1.0 */ trait StateSpace[S] { self => /** Returns the successive states of the specified state under this state space. */ def succ(x: S): Traversable[S] /** Constraints this state space by selecting only the states that satisfy the given predicate. */ def filterKeys(f: S => Boolean): StateSpace[S] = new StateSpaceT.KeyFiltered(self, f) /** Constraints this state space by selecting only the states that satisfy the given predicate. */ def filter(f: S => Boolean) = filterKeys(f) /** * Depth-first traverses this state space from the given starting state. * $LAZY */ def depthFirstTreeTraversal(start: S) = Iterable.ofIterator(new DepthFirstTreeIterator(this, start)) /** Breadth-first traverses this state space from the given starting state. * $LAZY */ def breadthFirstTreeTraversal(start: S) = Iterable.ofIterator(new BreadthFirstTreeIterator(this, start)) } object StateSpace { /** Creates a state space given a state transition function. */ def apply[S](f: S => Traversable[S]): StateSpace[S] = new StateSpaceT.BySucc(f) } private[poly] object StateSpaceT { class BySucc[S](f: S => Traversable[S]) extends StateSpace[S] { def succ(x: S) = f(x) } class KeyFiltered[S](self: StateSpace[S], f: S => Boolean) extends StateSpace[S] { def succ(x: S) = self.succ(x) filter f } }
ctongfei/poly-collection
core/src/main/scala/poly/collection/search/StateSpace.scala
Scala
mit
1,555
package models import java.util.UUID case class Prediction(imageId: UUID, category: String, probability: Double, left: Int, top: Int, right: Int, bottom: Int, predictionId: Int = -1)
SwaggerTagger/octo-tagger-backend
app/models/Prediction.scala
Scala
mit
184
//package io.eels.component.avro // //import org.scalatest.{Matchers, WordSpec} // //class AvroSourceParserTest extends WordSpec with Matchers { // // "AvroSourceParser" should { // "parse avro url" in { // val url = "avro:some/path" // AvroSourceParser(url).get shouldBe AvroSourceBuilder("some/path", Map.empty) // } // // fix in scalax // "parse url with trailing ?" in { // val url = "avro:some/path?" // AvroSourceParser(url).get shouldBe AvroSourceBuilder("some/path", Map.empty) // } // "parse url with options" in { // val url = "avro:some/path?a=b&c=d" // AvroSourceParser(url).get shouldBe AvroSourceBuilder("some/path", Map("a" -> List("b"), "c" -> List("d"))) // } // "not parse url with missing path" in { // AvroSourceParser("avro:?a=b") shouldBe None // AvroSourceParser("avro:") shouldBe None // } // "not parse url with incorrect scheme" in { // AvroSourceParser("csv:some/path") shouldBe None // AvroSourceParser("parquet:some/path") shouldBe None // } // } //}
eel-lib/eel
eel-core/src/test/scala/io/eels/component/avro/AvroSourceParserTest.scala
Scala
mit
1,065
package scrupal.admin import org.specs2.execute.AsResult import play.api.libs.json.{JsArray, JsNull} import play.api.test.FakeRequest import scrupal.core._ import scrupal.test.{ControllerSpecification, SharedTestScrupal} /** Test Cases For AdminController */ class AdminProviderSpec extends ControllerSpecification("AdminProvider") with SharedTestScrupal { class SiteForAdminProviderTest(implicit scrpl: Scrupal) extends Site(new SiteData("foo", domainName="foo.com"))(scrpl) { object adminProvider extends AdminProvider()(scrpl) enable(adminProvider, this) } override def makeSite(implicit scrupal : Scrupal) : Site = { new SiteForAdminProviderTest()(scrupal) } def testCases : Seq[Case] = Seq( Case("POST","/app/admin/site/", Unimplemented, "foo"), Case("GET", "/app/admin/scrupal/",Successful, "<p>help</p>"), Case("GET", "/app/admin/scrupal/help", Successful, "<p>help</p>"), Case("GET", "/app/admin/user/", Successful, "foo"), Case("GET", "/app/admin/site/list", Successful, ""), Case("GET", "/app/admin/site/1", Successful, ""), Case("GET", "/app/admin/site/foo", Successful, ""), Case("GET", "/app/admin/module/", Successful, "foo"), Case("PUT", "/app/admin/foo/", Unlocatable, "foo"), Case("OPTIONS", "/app/admin/foo/", Unlocatable, ""), Case("DELETE", "/app/admin/foo/", Unlocatable, ""), Case("HEAD", "/app/admin/foo/", Unlocatable, ""), Case("GET", "/app/admin/", Successful, "scrupal-jsapp"), Case("GET", "/crapola", Unlocatable, "") ) "AdminProvider" should { "yield None for irrelevant path" in withScrupal("IrrelevantPath") { (scrupal) ⇒ val req = FakeRequest("GET", "/api/foo/bar") val context = Context(scrupal) scrupal.scrupalController.appReactorFor(context, "foo", req) must beEqualTo(None) } "have the name 'admin" in { val ap = new AdminProvider() ap.id must beEqualTo('admin) ap.name must beEqualTo("admin") } "have an index page" in { pending /* def index() = Action.async { request : Request[AnyContent] ⇒ makePage(Ok, Administration.introduction) } */ } "accept Json Site creation data" in { pending /* def postSiteJson() = Action.async { request : Request[AnyContent] ⇒ request.body.asJson match { case Some(json) ⇒ makePage(NotImplemented, p("JSON Creation Of Sites Not Yet Supported")) case None ⇒ makePage(BadRequest, p("Expected JSON content")) } } */ } "have a site creation form" in { pending /* val createSiteForm = Form[CreateSite]( mapping( "name" → nonEmptyText(maxLength=64), "description" → nonEmptyText(maxLength=255), "domainName" → nonEmptyText(maxLength=255), "requireHttps" → boolean )(CreateSite.apply)(CreateSite.unapply) ) def createSite() = Action.async { implicit request : Request[AnyContent] ⇒ createSiteForm.bindFromRequest.fold( formWithErrors => { // binding failure, you retrieve the form containing errors: makePage(BadRequest, Administration.site_form(formWithErrors)) }, data => { /* binding success, you get the actual value. */ val siteData = SiteData(data.name, data.domainName,data.description,data.requireHttps) mapQuery( (schema) ⇒ schema.sites.create(siteData) ) { (id : Long, ec: ExecutionContext) ⇒ val site = Site(siteData.copy(oid=Some(id)))(scrupal) Redirect(router.scrupal.core.routes.AdminController.site(id)) } } ) } */ } "allow site updating" in { pending /* def updateSite(id : Long) = Action.async { implicit request : Request[AnyContent] ⇒ flatMapQuery((schema) ⇒ schema.sites.byId(id)) { case (Some(siteData),ec) ⇒ val boundForm = createSiteForm.bindFromRequest boundForm.fold( formWithErrors ⇒ { val page = Administration.site(siteData)(div(cls:="bg-warning","Save failed.")) makePage(BadRequest, page) }, data ⇒ { val newSiteData : SiteData = siteData.copy(name=data.name, description=data.description, domainName=data.domainName, requireHttps=data.requireHttps) flatMapQuery((schema) ⇒ schema.sites.update(newSiteData)) { (r,ec) ⇒ makePage(Ok, Administration.site(siteData)(div(cls:="bg-success","Saved."))) } } ) case (None,ec) ⇒ makePage(NotFound, Administration.error("Site #$id was not found.")) } } */ } "display a site" in { pending /* def newSite() = Action.async { implicit request : Request[AnyContent] ⇒ makePage(Ok, Administration.site_form(createSiteForm)) } def site(id : Long) = Action.async { implicit request : Request[AnyContent] ⇒ flatMapQuery((schema) ⇒ schema.sites.byId(id)) { case (Some(siteData),ec) ⇒ makePage(Ok, Administration.site(siteData)()) case (None,ec) ⇒ makePage(NotFound, Administration.error(s"Site #$id was not found.")) } } */ } "display a module" in { pending /* def module(id: String) = Action.async { implicit request : Request[AnyContent] ⇒ makePage(Ok, Administration.module()) } */ } } "AdminSiteProvider" should { "return JsNull for no site" in withScrupalSchema("NoSiteGIvesJsNull") { (scrupal, schema) ⇒ val asp = new AdminSiteProvider val stim = scrupal.stimulusForRequest(FakeRequest("GET", "/app/foo")) val future = asp.site(-1)(stim).map { response : RxResponse ⇒ response.disposition must beEqualTo(Unlocatable) response.payload.content must beEqualTo( JsNull) }(scrupal.executionContext) AsResult(await(future)) } "return JsArray() for no site by name" in withScrupalSchema("NoSiteGIvesJsNull") { (scrupal, schema) ⇒ val asp = new AdminSiteProvider val stim = scrupal.stimulusForRequest(FakeRequest("GET", "/app/foo")) val future = asp.site("nada")(stim).map { response : RxResponse ⇒ response.disposition must beEqualTo(Unlocatable) response.payload.content must beEqualTo( JsArray()) }(scrupal.executionContext) AsResult(await(future)) } } }
scrupal/scrupal-core
scrupal-server/src/test/scala/scrupal/admin/AdminProviderSpec.scala
Scala
apache-2.0
6,611
/* * Copyright 2001-2008 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.matchers import org.scalatest._ import org.scalatest.prop.Checkers import org.scalacheck._ import Arbitrary._ import Prop._ import scala.reflect.BeanProperty class ShouldBeMatcherSpec extends Spec with ShouldMatchers with Checkers with ReturnsNormallyThrowsAssertion with BookPropertyMatchers { class OddMatcher extends BeMatcher[Int] { def apply(left: Int): MatchResult = { MatchResult( left % 2 == 1, left.toString + " was even", left.toString + " was odd" ) } } val odd = new OddMatcher val even = not (odd) describe("The BeMatcher syntax") { it("should do nothing if a BeMatcher matches") { 1 should be (odd) 2 should be (even) } it("should throw TestFailedException if a BeMatcher does not match") { val caught1 = intercept[TestFailedException] { 4 should be (odd) } assert(caught1.getMessage === "4 was even") val caught2 = intercept[TestFailedException] { 5 should be (even) } assert(caught2.getMessage === "5 was odd") } it("should do nothing if a BeMatcher does not match, when used with not") { 2 should not be (odd) 1 should not be (even) 22 should not (not (be (even))) 1 should not (not (be (odd))) } it("should throw TestFailedException if a BeMatcher matches, when used with not") { val caught1 = intercept[TestFailedException] { 3 should not be (odd) } assert(caught1.getMessage === "3 was odd") val caught2 = intercept[TestFailedException] { 6 should not be (even) } assert(caught2.getMessage === "6 was even") val caught3 = intercept[TestFailedException] { 6 should not (not (be (odd))) } assert(caught3.getMessage === "6 was even") } it("should do nothing if a BeMatcher matches, when used in a logical-and expression") { 1 should (be (odd) and be (odd)) 1 should (be (odd) and (be (odd))) 2 should (be (even) and be (even)) 2 should (be (even) and (be (even))) } it("should throw TestFailedException if at least one BeMatcher does not match, when used in a logical-or expression") { // both false val caught1 = intercept[TestFailedException] { 2 should (be (odd) and be (odd)) } assert(caught1.getMessage === "2 was even") val caught2 = intercept[TestFailedException] { 2 should (be (odd) and (be (odd))) } assert(caught2.getMessage === "2 was even") val caught3 = intercept[TestFailedException] { 1 should (be (even) and be (even)) } assert(caught3.getMessage === "1 was odd") val caught4 = intercept[TestFailedException] { 1 should (be (even) and (be (even))) } assert(caught4.getMessage === "1 was odd") // first false val caught5 = intercept[TestFailedException] { 1 should (be (even) and be (odd)) } assert(caught5.getMessage === "1 was odd") val caught6 = intercept[TestFailedException] { 1 should (be (even) and (be (odd))) } assert(caught6.getMessage === "1 was odd") val caught7 = intercept[TestFailedException] { 2 should (be (odd) and be (even)) } assert(caught7.getMessage === "2 was even") val caught8 = intercept[TestFailedException] { 2 should (be (odd) and (be (even))) } assert(caught8.getMessage === "2 was even") // TODO: Remember to try a BeMatcher[Any] one, to make sure it works on an Int // second false val caught9 = intercept[TestFailedException] { 1 should (be (odd) and be (even)) } assert(caught9.getMessage === "1 was odd, but 1 was odd") val caught10 = intercept[TestFailedException] { 1 should (be (odd) and (be (even))) } assert(caught10.getMessage === "1 was odd, but 1 was odd") val caught11 = intercept[TestFailedException] { 2 should (be (even) and be (odd)) } assert(caught11.getMessage === "2 was even, but 2 was even") val caught12 = intercept[TestFailedException] { 2 should (be (even) and (be (odd))) } assert(caught12.getMessage === "2 was even, but 2 was even") } it("should do nothing if at least one BeMatcher matches, when used in a logical-or expression") { // both true 1 should (be (odd) or be (odd)) 1 should (be (odd) or (be (odd))) 2 should (be (even) or be (even)) 2 should (be (even) or (be (even))) // first false 1 should (be (even) or be (odd)) 1 should (be (even) or (be (odd))) 2 should (be (odd) or be (even)) 2 should (be (odd) or (be (even))) // second false 1 should (be (odd) or be (even)) 1 should (be (odd) or (be (even))) 2 should (be (even) or be (odd)) 2 should (be (even) or (be (odd))) } it("should throw TestFailedException if a BeMatcher does not match, when used in a logical-or expression") { val caught1 = intercept[TestFailedException] { 2 should (be (odd) or be (odd)) } assert(caught1.getMessage === "2 was even, and 2 was even") val caught2 = intercept[TestFailedException] { 2 should (be (odd) or (be (odd))) } assert(caught2.getMessage === "2 was even, and 2 was even") val caught3 = intercept[TestFailedException] { 1 should (be (even) or be (even)) } assert(caught3.getMessage === "1 was odd, and 1 was odd") val caught4 = intercept[TestFailedException] { 1 should (be (even) or (be (even))) } assert(caught4.getMessage === "1 was odd, and 1 was odd") } it("should do nothing if a BeMatcher does not match, when used in a logical-and expression with not") { 2 should (not be (odd) and not be (odd)) 2 should (not be (odd) and not (be (odd))) 2 should (not be (odd) and (not (be (odd)))) 1 should (not be (even) and not be (even)) 1 should (not be (even) and not (be (even))) 1 should (not be (even) and (not (be (even)))) } it("should throw TestFailedException if at least one BeMatcher matches, when used in a logical-and expression with not") { // both true val caught1 = intercept[TestFailedException] { 1 should (not be (odd) and not be (odd)) } assert(caught1.getMessage === "1 was odd") val caught2 = intercept[TestFailedException] { 1 should (not be (odd) and not (be (odd))) } assert(caught2.getMessage === "1 was odd") val caught3 = intercept[TestFailedException] { 1 should (not be (odd) and (not (be (odd)))) } assert(caught3.getMessage === "1 was odd") val caught4 = intercept[TestFailedException] { 2 should (not be (even) and not be (even)) } assert(caught4.getMessage === "2 was even") val caught5 = intercept[TestFailedException] { 2 should (not be (even) and not (be (even))) } assert(caught5.getMessage === "2 was even") val caught6 = intercept[TestFailedException] { 2 should (not be (even) and (not (be (even)))) } assert(caught6.getMessage === "2 was even") // first false val caught7 = intercept[TestFailedException] { 1 should (not be (even) and not be (odd)) } assert(caught7.getMessage === "1 was odd, but 1 was odd") val caught8 = intercept[TestFailedException] { 1 should (not be (even) and not (be (odd))) } assert(caught8.getMessage === "1 was odd, but 1 was odd") val caught9 = intercept[TestFailedException] { 1 should (not be (even) and (not (be (odd)))) } assert(caught9.getMessage === "1 was odd, but 1 was odd") val caught10 = intercept[TestFailedException] { 2 should (not be (odd) and not be (even)) } assert(caught10.getMessage === "2 was even, but 2 was even") val caught11 = intercept[TestFailedException] { 2 should (not be (odd) and not (be (even))) } assert(caught11.getMessage === "2 was even, but 2 was even") val caught12 = intercept[TestFailedException] { 2 should (not be (odd) and (not (be (even)))) } assert(caught12.getMessage === "2 was even, but 2 was even") // second false val caught13 = intercept[TestFailedException] { 1 should (not be (odd) and not be (even)) } assert(caught13.getMessage === "1 was odd") val caught14 = intercept[TestFailedException] { 1 should (not be (odd) and not (be (even))) } assert(caught14.getMessage === "1 was odd") val caught15 = intercept[TestFailedException] { 1 should (not be (odd) and (not (be (even)))) } assert(caught15.getMessage === "1 was odd") val caught16 = intercept[TestFailedException] { 2 should (not be (even) and not be (odd)) } assert(caught16.getMessage === "2 was even") val caught17 = intercept[TestFailedException] { 2 should (not be (even) and not (be (odd))) } assert(caught17.getMessage === "2 was even") val caught18 = intercept[TestFailedException] { 2 should (not be (even) and (not (be (odd)))) } assert(caught18.getMessage === "2 was even") } it("should do nothing if at least one BeMatcher doesn't match, when used in a logical-or expression when used with not") { // both false 2 should (not be (odd) or not be (odd)) 2 should (not be (odd) or not (be (odd))) 2 should (not be (odd) or (not (be (odd)))) 1 should (not be (even) or not be (even)) 1 should (not be (even) or not (be (even))) 1 should (not be (even) or (not (be (even)))) // first false 1 should (not be (even) or not be (odd)) 1 should (not be (even) or not (be (odd))) 1 should (not be (even) or (not (be (odd)))) 2 should (not be (odd) or not be (even)) 2 should (not be (odd) or not (be (even))) 2 should (not be (odd) or (not (be (even)))) // second false 1 should (not be (odd) or not be (even)) 1 should (not be (odd) or not (be (even))) 1 should (not be (odd) or (not (be (even)))) 2 should (not be (even) or not be (odd)) 2 should (not be (even) or not (be (odd))) 2 should (not be (even) or (not (be (odd)))) } it("should throw TestFailedException if both BeMatcher match, when used in a logical-or expression with not") { val caught1 = intercept[TestFailedException] { 1 should (not be (odd) or not be (odd)) } assert(caught1.getMessage === "1 was odd, and 1 was odd") val caught2 = intercept[TestFailedException] { 1 should (not be (odd) or not (be (odd))) } assert(caught2.getMessage === "1 was odd, and 1 was odd") val caught3 = intercept[TestFailedException] { 1 should (not be (odd) or (not (be (odd)))) } assert(caught3.getMessage === "1 was odd, and 1 was odd") val caught4 = intercept[TestFailedException] { 2 should (not be (even) or not be (even)) } assert(caught4.getMessage === "2 was even, and 2 was even") val caught5 = intercept[TestFailedException] { 2 should (not be (even) or not (be (even))) } assert(caught5.getMessage === "2 was even, and 2 was even") val caught6 = intercept[TestFailedException] { 2 should (not be (even) or (not (be (even)))) } assert(caught6.getMessage === "2 was even, and 2 was even") } it("should work when the types aren't exactly the same") { class UnlikableMatcher extends BeMatcher[Any] { def apply(left: Any): MatchResult = { MatchResult( false, left.toString + " was not to my liking", left.toString + " was to my liking" ) } } val unlikable = new UnlikableMatcher val likable = not (unlikable) 1 should be (likable) 2 should not be (unlikable) val caught1 = intercept[TestFailedException] { 1 should be (unlikable) } assert(caught1.getMessage === "1 was not to my liking") val caught2 = intercept[TestFailedException] { "The dish" should not be (likable) } assert(caught2.getMessage === "The dish was not to my liking") } } }
kevinwright/scalatest
src/test/scala/org/scalatest/matchers/ShouldBeMatcherSpec.scala
Scala
apache-2.0
13,114
/** * Copyright 2011-2016 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.charts.stats.buffers private[stats] trait RunTimes { def minTimestamp: Long def maxTimestamp: Long }
ryez/gatling
gatling-charts/src/main/scala/io/gatling/charts/stats/buffers/RunTimes.scala
Scala
apache-2.0
745
package com.arcusys.valamis.gradebook.service.impl import com.arcusys.learn.liferay.services.MessageBusHelper import com.arcusys.valamis.gradebook.model.{CourseActivityType, CourseGrade} import com.arcusys.valamis.gradebook.service._ import com.arcusys.valamis.gradebook.storage.CourseGradeTableComponent import com.arcusys.valamis.liferay.SocialActivityHelper import com.arcusys.valamis.log.LogSupport import com.arcusys.valamis.persistence.common.{DatabaseLayer, SlickProfile} import org.joda.time.DateTime import slick.driver.JdbcProfile import slick.jdbc.JdbcBackend import scala.concurrent.ExecutionContext class TeacherCourseGradeServiceImpl(val db: JdbcBackend#DatabaseDef, val driver: JdbcProfile, implicit val executionContext: ExecutionContext) extends TeacherCourseGradeService with CourseGradeTableComponent with LogSupport with DatabaseLayer with SlickProfile { import driver.api._ val socialActivityHelper = new SocialActivityHelper(CourseActivityType) def get(courseId: Long, userId: Long): Option[CourseGrade] = execSync { courseGrades .filter(x => x.courseId === courseId && x.userId === userId) .result .headOption } def get(courseIds: Seq[Long], userId: Long): Seq[CourseGrade] = execSync { courseGrades .filter(x => x.userId === userId) .filter(x => x.courseId inSet courseIds) .result } def set(courseId: Long, userId: Long, grade: Float, comment: Option[String], companyId: Long): Unit = execSyncInTransaction { (for { updatedCount <- courseGrades .filter(x => x.courseId === courseId && x.userId === userId) .map(x => (x.grade, x.comment)) .update((Some(grade), comment)) _ <- if (updatedCount == 0) { courseGrades += CourseGrade( courseId, userId, Some(grade), DateTime.now(), comment) } else { DBIO.successful() } } yield ()) map { _ => if (grade > LessonSuccessLimit) { onCourseCompleted(companyId, courseId, userId) } } } def setComment(courseId: Long, userId: Long, comment: String, companyId: Long): Unit = execSyncInTransaction { for { updatedCount <- courseGrades .filter(x => x.courseId === courseId && x.userId === userId) .map(x => x.comment) .update(Some(comment)) _ <- if (updatedCount == 0) { courseGrades += CourseGrade( courseId, userId, None, DateTime.now(), Some(comment) ) } else { DBIO.successful() } } yield () } private def onCourseCompleted(companyId: Long, courseId: Long, userId: Long): Unit = { sendCourseCompleted(courseId, userId) socialActivityHelper.addWithSet( companyId, userId, courseId = Option(courseId.toLong), `type` = Some(CourseActivityType.Completed.id), classPK = Option(courseId), createDate = DateTime.now ) } private def sendCourseCompleted(courseId: Long, userId: Long): Unit = { try { val messageValues = new java.util.HashMap[String, AnyRef]() messageValues.put("state", "completed") messageValues.put("courseId", courseId.toString) messageValues.put("userId", userId.toString) MessageBusHelper.sendAsynchronousMessage("valamis/courses/completed", messageValues) } catch { case ex: Throwable => log.error(s"Failed to send course completed event via MessageBus for " + s"courseId: $courseId; userId: $userId ", ex) } } }
arcusys/Valamis
valamis-gradebook/src/main/scala/com/arcusys/valamis/gradebook/service/impl/TeacherCourseGradeServiceImpl.scala
Scala
gpl-3.0
3,684
package org.jetbrains.sbt.project import java.io.File import com.intellij.openapi.diagnostic.Logger import com.intellij.openapi.externalSystem.model.ProjectSystemId import com.intellij.openapi.externalSystem.settings.ExternalProjectSettings import com.intellij.openapi.externalSystem.test.ExternalSystemImportingTestCase import com.intellij.openapi.projectRoots.impl.JavaAwareProjectJdkTableImpl import com.intellij.openapi.vfs.LocalFileSystem import org.jetbrains.plugins.scala.util.TestUtils import org.jetbrains.sbt.Sbt import org.jetbrains.sbt.project.ProjectStructureDsl._ import org.jetbrains.sbt.project.ProjectStructureMatcher.ProjectComparisonOptions import org.jetbrains.sbt.project.settings.SbtProjectSettings import org.junit.Assert.assertNotNull import scala.annotation.nowarn /** * @author Nikolay Obedin * @since 8/4/15. */ abstract class ImportingTestCase extends ExternalSystemImportingTestCase with ProjectStructureMatcher { val Log = Logger.getInstance(this.getClass) def testProjectDir: File = { val testdataPath = TestUtils.getTestDataPath + "/sbt/projects" new File(testdataPath, getTestName(true)) } def runTest(expected: project) (implicit compareOptions: ProjectComparisonOptions): Unit = { importProject() assertProjectsEqual(expected, myProject) assertNoNotificationsShown(myProject) } def runTestWithSdk(sdk: com.intellij.openapi.projectRoots.Sdk, expected: project) (implicit compareOptions: ProjectComparisonOptions): Unit = { importProject(sdk) assertProjectsEqual(expected, myProject) assertNoNotificationsShown(myProject) } override protected def getExternalSystemId: ProjectSystemId = SbtProjectSystem.Id override protected def getExternalSystemConfigFileName: String = Sbt.BuildFile override protected def getTestsTempDir: String = "" // Use default temp directory override protected def getCurrentExternalProjectSettings: ExternalProjectSettings = { val settings = new SbtProjectSettings val internalSdk = JavaAwareProjectJdkTableImpl.getInstanceEx.getInternalJdk: @nowarn("cat=deprecation") settings.jdk = internalSdk.getName settings } override protected def setUpInWriteAction(): Unit = { super.setUpInWriteAction() setUpProjectDirectory() } private def setUpProjectDirectory(): Unit = { myProjectRoot = LocalFileSystem.getInstance.refreshAndFindFileByIoFile(testProjectDir) assertNotNull("project root was not found: " + testProjectDir, myProjectRoot) } }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/sbt/project/ImportingTestCase.scala
Scala
apache-2.0
2,542
package com.microsoft.partnercatalyst.fortis.spark.transforms.language import org.scalatest.FlatSpec class LocalLanguageDetectorSpec extends FlatSpec { "The language detector" should "detect English" in { val detector = new LocalLanguageDetector() assert(detector.detectLanguage("And I in going, madam, weep o'er my father's death anew: but I must attend his majesty's command, to whom I am now in ward, evermore in subjection.").contains("en")) } it should "detect French" in { val detector = new LocalLanguageDetector() assert(detector.detectLanguage("Je l’avouerai franchement à mes lecteurs ; je n’étais jamais encore sorti de mon trou").contains("fr")) } it should "detect Spanish" in { val detector = new LocalLanguageDetector() assert(detector.detectLanguage("En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero").contains("es")) } it should "detect Chinese" in { val detector = new LocalLanguageDetector() assert(detector.detectLanguage("故经之以五事,校之以计,而索其情,一曰道,二曰天,三曰地,四曰将,五曰法。").contains("zh")) } it should "detect Urdu" in { val detector = new LocalLanguageDetector() assert(detector.detectLanguage("تازہ ترین خبروں، ویڈیوز اور آڈیوز کے لیے بی بی سی اردو پر آئیے۔ بی بی سی اردو دنیا بھر کی خبروں کے حصول کے لیے ایک قابلِ اعتماد ویب سائٹ ہے۔").contains("ur")) } it should "detect gibberish" in { val detector = new LocalLanguageDetector() assert(detector.detectLanguage("Heghlu'meH QaQ jajvam").isEmpty) } }
CatalystCode/project-fortis-spark
src/test/scala/com/microsoft/partnercatalyst/fortis/spark/transforms/language/LocalLanguageDetectorSpec.scala
Scala
mit
1,763
import scala.scalajs.js import scala.scalajs.js.annotation.* class A { @JSExport("value") // error def hello: String = "foo" @JSExport("value") def world: String = "bar" } class B { class Box[T](val x: T) @JSExport // error def ub(x: Box[String]): String = x.x @JSExport def ub(x: Box[Int]): Int = x.x }
dotty-staging/dotty
tests/neg-scalajs/jsexport-double-definition.scala
Scala
apache-2.0
326
package jsky.app.ot.gemini.editor.auxfile import edu.gemini.auxfile.api.AuxFile import edu.gemini.spModel.core.SPProgramID import jsky.app.ot.gemini.editor.ProgramForm import jsky.app.ot.vcs.VcsOtClient import java.text.SimpleDateFormat import java.util.{Collections, Date, TimeZone} import jsky.util.gui.Resources import scala.collection.JavaConverters._ import scala.swing._ import scala.swing.BorderPanel.Position._ import scala.swing.event.{MouseClicked, TableRowsSelected} object AuxFileEditor { private class LabelCellRenderer[T](conf: (Label, T) => Unit) extends Table.AbstractRenderer[T, Label](new Label) { override def configure(t: Table, sel: Boolean, foc: Boolean, value: T, row: Int, col: Int) { conf(component, value) } } private val SizeCellRenderer = new LabelCellRenderer[Integer]((lab, size) => { lab.horizontalAlignment = Alignment.Right lab.text = size.toString }) private val Format = new SimpleDateFormat("MM/dd/yy HH:mm:ss") { setTimeZone(TimeZone.getTimeZone("UTC")) } private val DateCellRenderer = new LabelCellRenderer[Date]((lab, date) => { lab.horizontalAlignment = Alignment.Center lab.text = Format.format(date) }) private val CheckIcon = Resources.getIcon("eclipse/check.gif") private val CheckCellRenderer = new LabelCellRenderer[java.lang.Boolean]((lab, checked) => { lab.text = "" lab.icon = if (checked) CheckIcon else null }) // Action requires a title and button displays the action title. This is // a button with an action but no title. private class ImageButton(a: Action) extends Button(a) { text = null focusable = false } } import AuxFileEditor._ class AuxFileEditor(form: ProgramForm) extends BorderPanel { val model = new AuxFileModel(VcsOtClient.unsafeGetRegistrar) private val addAction = new AddAction(this, model) private val removeAction = new RemoveAction(this, model) private val fetchAction = new FetchAction(this, model) private val openAction = new OpenAction(this, model) private val updateAction = new UpdateAction(this, model) private val descriptionTextField = new TextField() private val describeAction = new DescribeAction(this, model, descriptionTextField) descriptionTextField.action = describeAction private val checkAction = new CheckAction(this, model) form.tabbedPane.insertTab("File Attachment", null, peer, "View/Edit file attachments", 0) form.tabbedPane.setSelectedIndex(0) def update(pid: SPProgramID) { model.init(pid) // this is essentially updateAction.apply() w/o any popups on error AuxFileAction.silentUpdate(model) } layoutManager.setVgap(5) val attachmentTable = new Table() { model = new AttachmentTableModel(Collections.emptyList()) autoResizeMode = Table.AutoResizeMode.LastColumn peer.getTableHeader.setReorderingAllowed(false) override def rendererComponent(sel: Boolean, foc: Boolean, row: Int, col: Int): Component = (col, model.getValueAt(row, col)) match { case (c, size: Integer) if c == AttachmentTableModel.Col.SIZE.ordinal => SizeCellRenderer.componentFor(this, sel, foc, size, row, col) case (c, date: Date) if c == AttachmentTableModel.Col.LAST_MOD.ordinal => DateCellRenderer.componentFor(this, sel, foc, date, row, col) case (c, check: java.lang.Boolean) if c == AttachmentTableModel.Col.CHECKED.ordinal => CheckCellRenderer.componentFor(this, sel, foc, check, row, col) case _ => super.rendererComponent(sel, foc, row, col) } } layout(new ScrollPane(attachmentTable)) = Center class ControlPanel extends BorderPanel { layoutManager.setHgap(10) layout(new GridPanel(1, 0) { hGap = 5 contents += new ImageButton(addAction) contents += new ImageButton(removeAction) contents += new ImageButton(fetchAction) contents += new ImageButton(openAction) contents += new ImageButton(updateAction) }) = West layout(new BorderPanel() { layoutManager.setHgap(5) layout(descriptionTextField) = Center layout(new Button(describeAction)) = East }) = Center layout(new Button(checkAction)) = East } layout(new ControlPanel) = South // Gets the list of AuxFiles corresponding to selected table rows. private def selectedFiles(files: List[AuxFile]): List[AuxFile] = files.zipWithIndex collect { case (f, i) if attachmentTable.selection.rows.contains(i) => f } // Gets the set of tables row indices corresponding to selected aux files. private def selectedIndices(all: List[AuxFile], sel: List[AuxFile]): Set[Int] = (Set.empty[Int]/:sel) { (s, f) => s + all.indexOf(f) } // If all given files contain the same description, return it otherwise "" private def commonDescription(files: List[AuxFile]): String = files.map(f => Option(f.getDescription).getOrElse("")).distinct match { case List(d) => d case _ => "" } // Double click a table row fetches and opens the corresponding file. listenTo(attachmentTable) reactions += { case evt: MouseClicked if evt.clicks == 2 && openAction.enabled => openAction() } // Watch the model in order to reset the table model and selection. listenTo(model) reactions += { case AuxFileStateEvent(evt) => val (files, sel) = evt.map(s => (s.files, s.selection)).getOrElse((Nil, Nil)) deafTo(attachmentTable.selection) attachmentTable.model = new AttachmentTableModel(files.asJava) attachmentTable.selection.rows.clear() attachmentTable.selection.rows ++= selectedIndices(files, sel) if (sel.isEmpty) descriptionTextField.text = "" listenTo(attachmentTable.selection) } // Selecting table rows updates the model to record the selection and let the // actions update their enabled state. listenTo(attachmentTable.selection) reactions += { case TableRowsSelected(_, _, false) => for (pid <- model.currentPid; files <- model.currentFiles) { deafTo(model) val sel = selectedFiles(files) model.select(pid, sel) descriptionTextField.text = commonDescription(sel) listenTo(model) } } }
spakzad/ocs
bundle/jsky.app.ot/src/main/scala/jsky/app/ot/gemini/editor/auxfile/AuxFileEditor.scala
Scala
bsd-3-clause
6,217
package lila.api import play.api.libs.json._ import lila.common.PimpedJson._ import lila.db.api._ import lila.db.Implicits._ import lila.game.GameRepo import lila.hub.actorApi.{ router => R } import lila.rating.Perf import lila.user.tube.userTube import lila.user.{ UserRepo, User, Perfs, Profile } import makeTimeout.short private[api] final class UserApi( jsonView: lila.user.JsonView, relationApi: lila.relation.RelationApi, bookmarkApi: lila.bookmark.BookmarkApi, crosstableApi: lila.game.CrosstableApi, prefApi: lila.pref.PrefApi, makeUrl: String => String, apiToken: String, userIdsSharingIp: String => Fu[List[String]]) { def list( team: Option[String], token: Option[String], nb: Option[Int], engine: Option[Boolean]): Fu[JsObject] = (team match { case Some(teamId) => lila.team.MemberRepo.userIdsByTeam(teamId) flatMap UserRepo.enabledByIds case None => $find(pimpQB($query( UserRepo.enabledSelect ++ (engine ?? UserRepo.engineSelect) )) sort UserRepo.sortPerfDesc(lila.rating.PerfType.Standard.key), makeNb(nb, token)) }) map { users => Json.obj( "list" -> JsArray( users map { u => jsonView(u, extended = team.isDefined) ++ Json.obj("url" -> makeUrl(s"@/${u.username}")) } ) ) } def one(username: String, token: Option[String])(implicit ctx: Context): Fu[Option[JsObject]] = UserRepo named username flatMap { case None => fuccess(none) case Some(u) => GameRepo mostUrgentGame u zip (check(token) ?? (knownEnginesSharingIp(u.id) map (_.some))) zip (ctx.me.filter(u!=) ?? { me => crosstableApi.nbGames(me.id, u.id) }) zip relationApi.nbFollowing(u.id) zip relationApi.nbFollowers(u.id) zip ctx.isAuth.?? { prefApi followable u.id } zip ctx.userId.?? { relationApi.relation(_, u.id) } zip ctx.userId.?? { relationApi.relation(u.id, _) } map { case (((((((gameOption, knownEngines), nbGamesWithMe), following), followers), followable), relation), revRelation) => jsonView(u, extended = true) ++ { Json.obj( "url" -> makeUrl(s"@/$username"), "playing" -> gameOption.map(g => makeUrl(s"${g.gameId}/{$g.color.name}")), "knownEnginesSharingIp" -> knownEngines, "nbFollowing" -> following, "nbFollowers" -> followers, "count" -> Json.obj( "all" -> u.count.game, "rated" -> u.count.rated, "ai" -> u.count.ai, "draw" -> u.count.draw, "drawH" -> u.count.drawH, "loss" -> u.count.loss, "lossH" -> u.count.lossH, "win" -> u.count.win, "winH" -> u.count.winH, "bookmark" -> bookmarkApi.countByUser(u), "me" -> nbGamesWithMe) ) ++ ctx.isAuth.??(Json.obj( "followable" -> followable, "following" -> relation.exists(true ==), "blocking" -> relation.exists(false ==), "followsYou" -> revRelation.exists(true ==) )) }.noNull } map (_.some) } def knownEnginesSharingIp(userId: String): Fu[List[String]] = userIdsSharingIp(userId) flatMap UserRepo.filterByEngine private def makeNb(nb: Option[Int], token: Option[String]) = math.min(check(token) ? 1000 | 100, nb | 10) private def check(token: Option[String]) = token ?? (apiToken==) }
pavelo65/lila
modules/api/src/main/UserApi.scala
Scala
mit
3,523
package com.campudus.tableaux.database.domain import com.campudus.tableaux.RequestContext import com.campudus.tableaux.database.model.FolderModel.FolderId import com.campudus.tableaux.router.auth.permission.{RoleModel, ScopeMedia} import io.vertx.core.json.JsonObject import org.joda.time.DateTime import org.vertx.scala.core.json._ case class Folder( id: FolderId, name: String, description: String, parents: Seq[FolderId], createdAt: Option[DateTime], updatedAt: Option[DateTime] ) extends DomainObject { override def getJson: JsonObject = Json.obj( "id" -> (id match { case 0 => None.orNull case _ => id }), "name" -> name, "description" -> description, "parent" -> parents.lastOption.orNull, // for compatibility "parents" -> compatibilityGet(parents), "createdAt" -> optionToString(createdAt), "updatedAt" -> optionToString(updatedAt) ) } case class ExtendedFolder( folder: Folder, subfolders: Seq[Folder], files: Seq[ExtendedFile] )( implicit requestContext: RequestContext, roleModel: RoleModel ) extends DomainObject { override def getJson: JsonObject = { val folderJson = folder.getJson val extendedFolderJson = folderJson .mergeIn( Json.obj( "subfolders" -> compatibilityGet(subfolders), "files" -> compatibilityGet(files) )) roleModel.enrichDomainObject(extendedFolderJson, ScopeMedia) } }
campudus/tableaux
src/main/scala/com/campudus/tableaux/database/domain/folder.scala
Scala
apache-2.0
1,457
package org.jetbrains.plugins.scala package lang package psi package impl package toplevel package typedef import com.intellij.lang.ASTNode import com.intellij.openapi.progress.ProgressManager import com.intellij.openapi.project.DumbService import com.intellij.psi._ import com.intellij.psi.impl.light.LightField import com.intellij.psi.stubs.StubElement import com.intellij.psi.tree.IElementType import com.intellij.psi.util.PsiTreeUtil import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.icons.Icons import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor import org.jetbrains.plugins.scala.lang.psi.api.base.ScPrimaryConstructor import org.jetbrains.plugins.scala.lang.psi.api.statements._ import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameterClause} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._ import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScTypeParametersOwner, ScTypedDefinition} import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.TypeDefinitionMembers.SignatureNodes import org.jetbrains.plugins.scala.lang.psi.stubs.ScTemplateDefinitionStub import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext} import org.jetbrains.plugins.scala.lang.psi.types.{PhysicalSignature, ScSubstitutor, ScType, ScTypeParameterType} import org.jetbrains.plugins.scala.lang.resolve.processor.BaseProcessor import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount} import scala.collection.mutable import scala.collection.mutable.ArrayBuffer /** * @author Alexander.Podkhalyuzin */ class ScClassImpl private (stub: StubElement[ScTemplateDefinition], nodeType: IElementType, node: ASTNode) extends ScTypeDefinitionImpl(stub, nodeType, node) with ScClass with ScTypeParametersOwner with ScTemplateDefinition { override def accept(visitor: PsiElementVisitor) { visitor match { case visitor: ScalaElementVisitor => visitor.visitClass(this) case _ => super.accept(visitor) } } override def additionalJavaNames: Array[String] = { //do not add all cases with fakeCompanionModule, it will be used in Stubs. if (isCase) fakeCompanionModule.map(_.getName).toArray else Array.empty } def this(node: ASTNode) = {this(null, null, node)} def this(stub: ScTemplateDefinitionStub) = {this(stub, ScalaElementTypes.CLASS_DEF, null)} override def toString: String = "ScClass: " + name override def getIconInner = Icons.CLASS def constructor: Option[ScPrimaryConstructor] = { val stub = getStub if (stub != null) { val array = stub.getChildrenByType(ScalaElementTypes.PRIMARY_CONSTRUCTOR, JavaArrayFactoryUtil.ScPrimaryConstructorFactory) return array.headOption } findChild(classOf[ScPrimaryConstructor]) } def parameters = constructor match { case Some(c) => c.effectiveParameterClauses.flatMap(_.unsafeClassParameters) case None => Seq.empty } override def members = constructor match { case Some(c) => super.members ++ Seq(c) case _ => super.members } import com.intellij.psi.scope.PsiScopeProcessor import com.intellij.psi.{PsiElement, ResolveState} override def processDeclarationsForTemplateBody(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = { if (DumbService.getInstance(getProject).isDumb) return true if (!super[ScTemplateDefinition].processDeclarationsForTemplateBody(processor, state, lastParent, place)) return false constructor match { case Some(constr) if place != null && PsiTreeUtil.isContextAncestor(constr, place, false) => //ignore, should be processed in ScParameters case _ => for (p <- parameters) { ProgressManager.checkCanceled() if (processor.isInstanceOf[BaseProcessor]) { // don't expose class parameters to Java. if (!processor.execute(p, state)) return false } } } super[ScTypeParametersOwner].processDeclarations(processor, state, lastParent, place) } override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = { super[ScTemplateDefinition].processDeclarations(processor, state, lastParent, place) } override def isCase: Boolean = hasModifierProperty("case") override def getMethods: Array[PsiMethod] = { getAllMethods.filter(_.containingClass == this) } override def getAllMethods: Array[PsiMethod] = { val res = new ArrayBuffer[PsiMethod]() val names = new mutable.HashSet[String] res ++= getConstructors TypeDefinitionMembers.SignatureNodes.forAllSignatureNodes(this) { node => val isInterface = node.info.namedElement match { case t: ScTypedDefinition if t.isAbstractMember => true case _ => false } this.processPsiMethodsForNode(node, isStatic = false, isInterface = isInterface)(res += _, names += _) } for (synthetic <- syntheticMethodsNoOverride) { this.processPsiMethodsForNode(new SignatureNodes.Node(new PhysicalSignature(synthetic, ScSubstitutor.empty), ScSubstitutor.empty), isStatic = false, isInterface = isInterface)(res += _, names += _) } if (isCase) { //for Scala this is done in ScalaOIUtil.isProductAbstractMethod, for Java we do it here val caseClassGeneratedFunctions = Array( "def canEqual(that: Any): Boolean = ???", "def equals(that: Any): Boolean = ???", "def productArity: Int = ???", "def productElement(n: Int): Any = ???" ) caseClassGeneratedFunctions.foreach { funText => val fun: ScFunction = ScalaPsiElementFactory.createMethodWithContext(funText, this, this) fun.setSynthetic(this) res += fun } } ScalaPsiUtil.getCompanionModule(this) match { case Some(o: ScObject) => def add(method: PsiMethod) { if (!names.contains(method.getName)) { res += method } } TypeDefinitionMembers.SignatureNodes.forAllSignatureNodes(o) { node => this.processPsiMethodsForNode(node, isStatic = true, isInterface = false)(add) } for (synthetic <- o.syntheticMethodsNoOverride) { this.processPsiMethodsForNode(new SignatureNodes.Node(new PhysicalSignature(synthetic, ScSubstitutor.empty), ScSubstitutor.empty), isStatic = true, isInterface = false)(res += _, names += _) } case _ => } res.toArray } override def getConstructors: Array[PsiMethod] = { val buffer = new ArrayBuffer[PsiMethod] buffer ++= functions.filter(_.isConstructor).flatMap(_.getFunctionWrappers(isStatic = false, isInterface = false, Some(this))) constructor match { case Some(x) => buffer ++= x.getFunctionWrappers case _ => } buffer.toArray } override protected def syntheticMethodsNoOverrideImpl: Seq[PsiMethod] = { val buf = new ArrayBuffer[PsiMethod] if (isCase && !hasModifierProperty("abstract") && parameters.nonEmpty) { constructor match { case Some(x: ScPrimaryConstructor) => val hasCopy = !TypeDefinitionMembers.getSignatures(this).forName("copy")._1.isEmpty val addCopy = !hasCopy && !x.parameterList.clauses.exists(_.hasRepeatedParam) if (addCopy) { try { val method = ScalaPsiElementFactory.createMethodWithContext(copyMethodText, this, this) method.setSynthetic(this) buf += method } catch { case e: Exception => //do not add methods if class has wrong signature. } } case None => } } SyntheticMembersInjector.inject(this) ++: buf.toSeq } private def copyMethodText: String = { val x = constructor.getOrElse(return "") val paramString = (if (x.parameterList.clauses.length == 1 && x.parameterList.clauses.head.isImplicit) "()" else "") + x.parameterList.clauses.map{ c => val start = if (c.isImplicit) "(implicit " else "(" c.parameters.map{ p => val paramType = p.typeElement match { case Some(te) => te.getText case None => "Any" } p.name + " : " + paramType + " = this." + p.name }.mkString(start, ", ", ")") }.mkString("") val returnType = name + typeParameters.map(_.name).mkString("[", ",", "]") "def copy" + typeParamString + paramString + " : " + returnType + " = throw new Error(\"\")" } private def implicitMethodText: String = { val constr = constructor.getOrElse(return "") val returnType = name + typeParametersClause.map(clause => typeParameters.map(_.name). mkString("[", ",", "]")).getOrElse("") val typeParametersText = typeParametersClause.map(tp => { tp.typeParameters.map(tp => { val baseText = tp.typeParameterText if (tp.isContravariant) { val i = baseText.indexOf('-') baseText.substring(i + 1) } else if (tp.isCovariant) { val i = baseText.indexOf('+') baseText.substring(i + 1) } else baseText }).mkString("[", ", ", "]") }).getOrElse("") val parametersText = constr.parameterList.clauses.map { case clause: ScParameterClause => clause.parameters.map { case parameter: ScParameter => val paramText = s"${parameter.name} : ${parameter.typeElement.map(_.getText).getOrElse("Nothing")}" parameter.getDefaultExpression match { case Some(expr) => s"$paramText = ${expr.getText}" case _ => paramText } }.mkString(if (clause.isImplicit) "(implicit " else "(", ", ", ")") }.mkString getModifierList.accessModifier.map(am => am.getText + " ").getOrElse("") + "implicit def " + name + typeParametersText + parametersText + " : " + returnType + " = throw new Error(\"\")" } @Cached(synchronized = false, ModCount.getOutOfCodeBlockModificationCount, this) def getSyntheticImplicitMethod: Option[ScFunction] = { if (hasModifierProperty("implicit")) { constructor match { case Some(x: ScPrimaryConstructor) => try { val method = ScalaPsiElementFactory.createMethodWithContext(implicitMethodText, this.getContext, this) method.setSynthetic(this) Some(method) } catch { case e: Exception => None } case None => None } } else None } override def getFields: Array[PsiField] = { val fields = constructor match { case Some(constr) => constr.parameters.map { param => param.getType(TypingContext.empty) match { case Success(tp: ScTypeParameterType, _) if tp.param.findAnnotation("scala.specialized") != null => val factory: PsiElementFactory = PsiElementFactory.SERVICE.getInstance(getProject) val psiTypeText: String = ScType.toPsi(tp, getProject, getResolveScope).getCanonicalText val text = s"public final $psiTypeText ${param.name};" val elem = new LightField(getManager, factory.createFieldFromText(text, this), this) elem.setNavigationElement(param) Option(elem) case _ => None } } case _ => Seq.empty } super.getFields ++ fields.flatten } override def getTypeParameterList: PsiTypeParameterList = typeParametersClause.orNull override def getInterfaces: Array[PsiClass] = { getSupers.filter(_.isInterface) } }
JetBrains/intellij-scala-historical
src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/typedef/ScClassImpl.scala
Scala
apache-2.0
11,823
package dnd5_dm_db package lang import dnd5_dm_db.model._ object Fr extends Lang with fr.AlignmentText with fr.UnitsText with fr.SpellText with fr.MonsterText with fr.SkillAndLanguageText{ val id: String = "fr" val or : String = "ou" val seeBelow : String = "voir ci-dessous" val monsters : String = "Monstres" val indexes : String = "Indexes" val range: String = "Portée" val level: String = "niveau" val armorClass : String = "Classe d'armure" val hp : String = "PV" val speed : String = "Vitesse" val speedLength : Speed => String = { s => val kind = s match { case Regular(_) => "" case Burrow(_) => "creuser : " case Climb(_) => "escalade : " case Fly(_) => "vol : " case Swim(_) => "nage : " } kind + length(s.speed) } val ability_short : Ability => String = { case Strength => "FOR" case Dexterity => "DEX" case Constitution => "CON" case Intelligence => "INT" case Wisdom => "SAG" case Charisma => "CHA" } val ability_long : Ability => String = { case Strength => "la Force" case Dexterity => "la Dextérité" case Constitution => "la Constitution" case Intelligence => "l'Intelligence" case Wisdom => "la Sagesse" case Charisma => "le Charisme" } val savingThrows : String = "Jets de sauvegarde" val challengeRanking : String = "Facteur de puissance" val xp : String = "PX" val actionName : Action => String = { case aa : AttackAction => aa.kind match { case _ : MeleeAttack => "Attaque d'arme de corps à corps" case _ : RangedAttack | _ : RangedSpecial => "Attaque d'arme à distance" case _ : MeleeOrRange => "Attaque d'arme de corps à corps ou à distance" } case _ : MultiAttack => "Attaque multiple" case sa : SpecialAction => sa.name.value(this) case wa : WeaponAction => actionName(wa.toAttackAction(this)) } val toHit : String = "au toucher" val target : Int => String = "cible" + fr.plural(_) val senses : String = "Sens" val sens : Sens => String = { case PassivePerception(v) => s"Perception passive $v" case BlindSight(r) => s"Perception aveugle ${length(r)}" case DarkVision(r) => s"Visibilité dans l'obscurité ${length(r)}" case Tremorsense(r) => s"Détection des vibrations ${length(r)}" case TrueSight(r) => s"Vision véritable ${length(r)}" } val damageType : DamageType => String = { case FromNonMagicalWeapon(dt) => (dt map damageType mkString ", ") + " d'une arme non magique." case Acid => "acides" case Bludgeoning => "contondants" case Cold => "de froid" case Fire => "de feu" case Force => "de force" case Lightning => "de foudre" case Necrotic => "nécrotiques" case Piercing => "perforants" case Poison => "empoisonnés" case Psychic => "psychiques" case Radiant => "rayonnants" case Slashing => "tranchants" case Thunder => "de tonnerre" } val versatile : Die => String = d => s", ou ${d.average} ($d) dégâts tranchants si utilisé à deux mains." //, or 6 (1d10 + 1) slashing damage if used with two hands. val reach: String = "allonge" val hit : String = "Touche" val damages: String = "dégâts" val hits : Hit => String = { case Damage(die, types, sdesc) => // val types = // types0 map damageType mkString (" ",", ", "") val descStr = sdesc map (desc => formatToHtml(desc.value(this))) getOrElse "" s"${die.average} ($die) $damages ${damageType(types)}$descStr" case SpecialHit(desc) => desc.value(this) } val source : String = "Source" val unknown : String = "inconnue" val atWill : String = "à volonté" val spellLvl : Int => String = { case 0 => "Sorts mineurs" case x => s"Niveau $x" } val slots : Option[Int] => String = { case None => atWill case Some(x) => s"$x emplacement${fr.plural(x)}" } val spells : String = "Sorts" val spellCastingText : (String, SpellCasting) => String = { case (name, sc) => s"Un $name est un lanceur de sorts de niveau ${sc.casterLevel}. Sa caractéristique pour lancer des sorts est ${ability_long(sc.ability)} (sauvegarde DD ${sc.saveDifficultyClass}, ${Die.bonus_str(sc.attackBonus)} au jet d\'attaque). Un $name a les sorts de ${clazz(sc.clazz)} suivants préparés:" } val clazz : DnDClass => String = { case Barbarian => "Barbare" case Bard => "Barde" case Cleric => "Clerc" case Druid => "Druide" case Fighter => "Guerrier" case Monk => "Moine" case Paladin => "Paladin" case Ranger => "Ranger" case Rogue => "Roublard" case Sorcerer => "Ensorceleur" case Warlock => "Sorcier" case Wizard => "Magicien" } val clearScreen : String = "Tout effacer" val createScreen : String = "Nouvel Écran" val deleteScreen : String = "Supprimer" val renameScreen : String = "Renommer" val damageVulnerabilites : String = "Vulnérable aux dégâts" val damageImmunities : String = "Immunisé aux dégâts" val conditionImmunities : String = "Immunisé aux conditions" val resistance : String = "Résistances" val conditions : Condition => String = { case Prone => "À terre" case Grappled => "Agrippé" case Deafened => "Assourdi" case Blinded => "Aveuglé" case Charmed => "Charmé" case Frightened => "Effrayé" case Poisoned => "Empoisonné" case Restrained => "Entravé" case Stunned => "Étourdi" case Incapacitated => "Incapable d'agir" case Unconscious => "Inconscient" case Invisible => "Invisible" case Paralyzed => "Paralysé" case Petrified => "Pétrifié" case Exhaustion => "Épuisement" } }
lorilan/dnd5_dm_db
src/main/scala/dnd5_dm_db/lang/Fr.scala
Scala
gpl-3.0
5,749
package org.scalawiki.wlx.stat import org.scalawiki.dto.markup.Table import org.scalawiki.wlx.{ImageDB, MonumentDB} class MostPopularMonuments(val stat: ContestStat) extends Reporter { def this(imageDbs: Seq[ImageDB], totalImageDb: Option[ImageDB], monumentDb: MonumentDB) = { this(ContestStat( monumentDb.contest, imageDbs.headOption.map(_.contest.year).getOrElse(monumentDb.contest.year), Some(monumentDb), imageDbs.lastOption.orElse(totalImageDb), totalImageDb, imageDbs//.headOption.map(_ => imageDbs.init).getOrElse(Seq.empty) )) } val name = "Most photographed objects" override def category = contest.contestType.name + " in " + contest.country.name def table = mostPopularMonumentsTable(stat.dbsByYear, stat.totalImageDb, stat.monumentDb.get) def mostPopularMonumentsTable(imageDbs: Seq[ImageDB], totalImageDb: Option[ImageDB], monumentDb: MonumentDB): Table = { val imageDbsByYear = imageDbs.groupBy(_.contest.year) val yearSeq = imageDbsByYear.keys.toSeq.sorted val numYears = yearSeq.size val columns = Seq("Id", "Name") ++ totalImageDb.map(_ => Seq(s"$numYears years photos", s"$numYears years authors")).getOrElse(Seq.empty) ++ yearSeq.flatMap(year => Seq(s"$year photos", s"$year authors")) val photosCountTotal = totalImageDb.map(_.imageCountById) val authorsCountTotal = totalImageDb.map(_.authorsCountById) val photoCounts = yearSeq.map(year => imageDbsByYear(year).head.imageCountById) val authorCounts = yearSeq.map(year => imageDbsByYear(year).head.authorsCountById) val counts = Seq(photosCountTotal, authorsCountTotal).flatten ++ (0 until numYears).flatMap(i => Seq(photoCounts(i), authorCounts(i))) val topPhotos = (photosCountTotal.toSet ++ photoCounts).flatMap(topN(12, _).toSet) val topAuthors = (authorsCountTotal.toSet ++ authorCounts).flatMap(topN(12, _).toSet) val allTop = topPhotos ++ topAuthors val allTopOrdered = allTop.toSeq.sorted val rows = allTopOrdered.map { id => val monument = monumentDb.byId(id).get Seq( id, monument.name.replaceAll("\\\\[\\\\[", "[[:uk:") + monument.galleryLink ) ++ counts.map(_.getOrElse(id, 0).toString) } new Table(columns, rows, name) } def topN(n: Int, stat: Map[String, Int]) = stat.toSeq.sortBy(-_._2).take(n).map(_._1) }
intracer/scalawiki
scalawiki-wlx/src/main/scala/org/scalawiki/wlx/stat/MostPopularMonuments.scala
Scala
apache-2.0
2,388
package demo object RingBuffer { def nextPowerOfTwo(value: Int): Int = 1 << (32 - Integer.numberOfLeadingZeros(value - 1)) } class RingBuffer[T: scala.reflect.ClassTag] private (capacity: Int, mask: Int, buffer: Array[T]) { private var tail: Long = 0L private var head: Long = 0L def this(capacity: Int) { this( RingBuffer.nextPowerOfTwo(capacity), RingBuffer.nextPowerOfTwo(capacity) - 1, Array.ofDim[T](RingBuffer.nextPowerOfTwo(capacity)) ) } def offer(e: T): Boolean = { val wrapPoint = tail - capacity if (head <= wrapPoint) false else { val ind = (tail & mask).toInt buffer(ind) = e tail = tail + 1 true } } def poll: Option[T] = if (head >= tail) None else { val index = (head & mask).toInt val element: T = buffer(index) buffer(index) = null.asInstanceOf[T] head = head + 1 Some(element) } def peek: Option[T] = if (head >= tail) None else { val index = head.toInt & mask Some(buffer(index)) } def entries: Array[T] = { var head0 = head val copy = if (tail > capacity) Array.ofDim[T](capacity) else Array.ofDim[T](tail.toInt - head.toInt) var i = 0 while (head0 < tail) { val ind = (head0 & mask).toInt println(head0 + "/" + tail + " - " + ind) copy(i) = buffer(ind) i += 1 head0 += 1 } copy } def size: Int = (tail - head).toInt override def toString = s"nextHead: [$head/${head.toInt & mask}] nextTail:[$tail/${tail.toInt & mask}] buffer: ${buffer.mkString(",")}" }
haghard/docker-compose-akka-cluster
src/main/scala/demo/RingBuffer.scala
Scala
apache-2.0
1,615
package com.ytsebro.beans /** * Created by yegor on 8/21/16. */ case class Node(id: Int, text: String, pos: String, sent: Int)
egorsz/textview
src/main/scala/com/ytsebro/beans/Node.scala
Scala
gpl-3.0
131
/* * Copyright 2015 PagerDuty, Inc. * * Author: Jesse Haber-Kucharsky <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.pagerduty.funhttpclient import akka.actor._ import akka.stream._ import scala.concurrent._ /** * Context for making HTTP requests and loading the contents of response bodies. * * Create a new context via [[HttpContext.apply]]. */ abstract class HttpContext { implicit def actorSystem: ActorSystem implicit def streamMaterializer: Materializer def terminate(): Future[Unit] } object HttpContext { def apply(system: ActorSystem): HttpContext = { new HttpContext { override implicit val actorSystem = system override implicit val streamMaterializer: Materializer = ActorMaterializer() override def terminate() = akka.http.scaladsl.Http().shutdownAllConnectionPools() } } }
PagerDuty/fun-http-client
library/src/main/scala/com/pagerduty/funhttpclient/HttpContext.scala
Scala
apache-2.0
1,415
package tests.rescala.testtools import rescala.interface.RescalaInterface class SetAndExtractTransactionHandle[Api <: RescalaInterface](val api: Api) { import api._ def SetAndExtractTransactionHandle[A, N](source: Source[A], value: A)(implicit engine: Scheduler): Initializer = { engine.forceNewTransaction(source) { implicit t => source.admit(value) t.tx.initializer } } }
guidosalva/REScala
Code/Main/shared/src/test/scala-2/tests/rescala/testtools/SetAndExtractTransactionHandle.scala
Scala
apache-2.0
402
/* * Copyright (C) 2017 LREN CHUV for Human Brain Project * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package ch.chuv.lren.woken.backends.faas.chronos import spray.json.{ DefaultJsonProtocol, DeserializationException, JsString, JsValue, RootJsonFormat } // Adapted from https://github.com/mesos/chronos/blob/v3.0.2/src/main/scala/org/apache/mesos/chronos/scheduler/jobs/Containers.scala // and https://github.com/mesos/chronos/blob/v3.0.2/src/main/scala/org/apache/mesos/chronos/scheduler/jobs/Jobs.scala object VolumeMode extends Enumeration { type VolumeMode = Value // read-write and read-only. val RW, RO = Value } object NetworkMode extends Enumeration { type NetworkMode = Value // Bridged, Host and USER val BRIDGE, HOST, USER = Value } object ContainerType extends Enumeration { type ContainerType = Value // Docker, Mesos val DOCKER, MESOS = Value } object ProtocolType extends Enumeration { type ProtocolType = Value val IPv4, IPv6 = Value } import ch.chuv.lren.woken.backends.faas.chronos.VolumeMode.VolumeMode import ch.chuv.lren.woken.backends.faas.chronos.NetworkMode.NetworkMode import ch.chuv.lren.woken.backends.faas.chronos.ContainerType.ContainerType import ch.chuv.lren.woken.backends.faas.chronos.ProtocolType.ProtocolType /** * Represents an environment variable definition for the job */ case class Label( key: String, value: String ) case class ExternalVolume( name: String, provider: String, options: List[Parameter] ) case class Volume( containerPath: String, hostPath: Option[String], mode: Option[VolumeMode], external: Option[ExternalVolume] ) case class PortMapping(hostPort: Int, containerPort: Int, protocol: Option[String]) @SuppressWarnings(Array("org.wartremover.warts.DefaultArguments")) case class Network(name: String, protocol: Option[ProtocolType] = None, labels: List[Label] = Nil, portMappings: List[PortMapping] = Nil) @SuppressWarnings(Array("org.wartremover.warts.DefaultArguments")) case class Container( `type`: ContainerType, image: String, forcePullImage: Boolean = false, parameters: List[Parameter] = Nil, volumes: List[Volume] = Nil, network: NetworkMode = NetworkMode.HOST, networkInfos: List[Network] = Nil ) /** * Represents an environment variable definition for the job */ case class Parameter( key: String, value: String ) case class EnvironmentVariable( name: String, value: String ) /** * A job to submit to Chronos via its REST API. * * @param name The job name. Must match the following regular expression: ([\w\.-]+) * @param description Description of job * @param command The actual command that will be executed by Chronos * @param arguments Arguments to pass to the command. Ignored if shell is true * @param shell If true, Mesos will execute command by running /bin/sh -c [command] and will ignore arguments. If false, command will be treated as the filename of an executable and arguments will be the arguments passed. If this is a Docker job and shell is true, the entrypoint of the container will be overridden with /bin/sh -c * @param schedule The scheduling for the job, in ISO 8601 format * @param epsilon If Chronos misses the scheduled run time for any reason, it will still run the job if the time is within this interval. Epsilon must be formatted like an ISO 8601 Duration * @param runAsUser Mesos will run the job as this user, if specified * @param container This contains the subfields for the Docker container: type (required), image (required), forcePullImage (optional), network (optional), and volumes (optional) * @param cpus Amount of Mesos CPUs for this job * @param mem Amount of Mesos Memory (in MB) for this job * @param disk Amount of Mesos disk (in MB) for this job * @param owner The email address of the person responsible for the job * @param environmentVariables An array of environment variables passed to the Mesos executor. For Docker containers, these are also passed to Docker using the -e flag */ @SuppressWarnings(Array("org.wartremover.warts.DefaultArguments")) case class ChronosJob( name: String, description: Option[String] = None, command: String, arguments: List[String] = Nil, shell: Boolean = true, schedule: String, epsilon: Option[String] = None, highPriority: Boolean = false, executor: Option[String] = None, executorFlags: Option[String] = None, runAsUser: Option[String] = None, container: Option[Container], cpus: Option[Double] = None, disk: Option[Double] = None, mem: Option[Double] = None, disabled: Boolean = false, owner: Option[String] = None, ownerName: Option[String] = None, environmentVariables: List[EnvironmentVariable], retries: Int = 2 // dataProcessingJobType: Boolean = false, // scheduleTimeZone: Option[String] = None // concurrent: Boolean = false, // successCount: Option[Long] = None, // errorCount: Option[Long] = None, // lastSuccess: Option[String] = None, // lastError: Option[String] = None, // softError: Boolean = false, // errorsSinceLastSuccess: Option[Long] = None, // taskInfoData: Option[String] = None, // fetch: List[Fetch] = List() // constraints: List[Constraint] = List() ) /** * Serialize ChronosJob in the Json format required by Chronos */ object ChronosJob extends DefaultJsonProtocol { class EnumJsonConverter[T <: scala.Enumeration](enu: T) extends RootJsonFormat[T#Value] { override def write(obj: T#Value): JsValue = JsString(obj.toString) override def read(json: JsValue): T#Value = json match { case JsString(txt) => enu.withName(txt) case somethingElse => throw DeserializationException( s"Expected a value from enum $enu instead of $somethingElse" ) } } implicit val volumeModeFormat: EnumJsonConverter[VolumeMode.type] = new EnumJsonConverter[VolumeMode.type](VolumeMode) implicit val NetworkModeFormat: EnumJsonConverter[NetworkMode.type] = new EnumJsonConverter[NetworkMode.type](NetworkMode) implicit val ContainerTypeFormat: EnumJsonConverter[ContainerType.type] = new EnumJsonConverter[ContainerType.type](ContainerType) implicit val ProtocolTypeFormat: EnumJsonConverter[ProtocolType.type] = new EnumJsonConverter[ProtocolType.type](ProtocolType) implicit val labelFormat: RootJsonFormat[Label] = jsonFormat2(Label.apply) implicit val parameterFormat: RootJsonFormat[Parameter] = jsonFormat2(Parameter.apply) implicit val externalVolumeFormat: RootJsonFormat[ExternalVolume] = jsonFormat3( ExternalVolume.apply ) implicit val volumeFormat: RootJsonFormat[Volume] = jsonFormat4(Volume.apply) implicit val portMappingFormat: RootJsonFormat[PortMapping] = jsonFormat3(PortMapping.apply) implicit val networkFormat: RootJsonFormat[Network] = jsonFormat4(Network.apply) implicit val containerFormat: RootJsonFormat[Container] = jsonFormat7(Container.apply) implicit val environmentVariableFormat: RootJsonFormat[EnvironmentVariable] = jsonFormat2( EnvironmentVariable.apply ) implicit val chronosJobFormat: RootJsonFormat[ChronosJob] = jsonFormat20(ChronosJob.apply) }
LREN-CHUV/workflow
src/main/scala/ch/chuv/lren/woken/backends/faas/chronos/ChronosJob.scala
Scala
apache-2.0
8,000
package com.karasiq.shadowcloud.webapp.components.region import akka.util.ByteString import com.karasiq.bootstrap.Bootstrap.default._ import com.karasiq.shadowcloud.config.SerializedProps import com.karasiq.shadowcloud.model.RegionId import com.karasiq.shadowcloud.webapp.components.common.{AppComponents, AppIcons} import com.karasiq.shadowcloud.webapp.components.keys.KeysContext import com.karasiq.shadowcloud.webapp.context.AppContext import com.karasiq.shadowcloud.webapp.context.AppContext.JsExecutionContext import rx.{Rx, Var} import scalaTags.all._ import scala.concurrent.Future object RegionsView { def apply()(implicit context: AppContext, regionContext: RegionContext, keysContext: KeysContext): RegionsView = { new RegionsView } private def newRegionId()(implicit rc: RegionContext): RegionId = { s"region-${rc.regions.now.regions.size}" } private def uniqueRegionId(id: RegionId): RegionId = { def timestampString = "-u" + System.currentTimeMillis().toHexString val regex = "-u\\\\w+$".r val prefix = regex.findFirstMatchIn(id) match { case Some(rm) ⇒ rm.before case None ⇒ id } prefix + timestampString } } class RegionsView(implicit context: AppContext, regionContext: RegionContext, keysContext: KeysContext) extends BootstrapHtmlComponent { def renderTag(md: ModifierT*): TagT = { val regionViewsRx = regionContext.regions.fold(Map.empty[RegionId, Tag]) { case (views, report) ⇒ val newMap = report.regions.map { case (regionId, _) ⇒ regionId → views.getOrElse(regionId, renderRegion(regionId)) } newMap } div( GridSystem.row( GridSystem.col.md(3)(renderAddButton()), GridSystem.col.md(3)(renderExportButton()), GridSystem.col.md(3)(renderImportButton()), GridSystem.col.md(3)(renderSuspendAllButton()) ), Rx(div(regionViewsRx().toSeq.sortBy(_._1).map(_._2))) ) } private[this] def renderAddButton() = { def doCreate(regionId: RegionId) = { val defaultConfig = { // Security patch val cfg = """ |chunk-key = com.karasiq.shadowcloud.storage.utils.mappers.HashNonceHMACKeyMapper |""".stripMargin SerializedProps(SerializedProps.DefaultFormat, ByteString(cfg)) } context.api.createRegion(regionId, defaultConfig).foreach { _ ⇒ regionContext.updateAll() } } def showCreateDialog() = { val newRegionIdRx = Var(RegionsView.newRegionId()) Modal() .withTitle(context.locale.createRegion) .withBody( Form( FormInput.text(context.locale.regionId, newRegionIdRx.reactiveInput)(div(small(context.locale.regionIdHint))) ) ) .withButtons( AppComponents.modalSubmit(onclick := Callback.onClick { _ ⇒ // Utils.toSafeIdentifier(newRegionNameRx.now) doCreate(newRegionIdRx.now) }), Button(ButtonStyle.info)(context.locale.uniqueRegionId, onclick := Callback.onClick { _ ⇒ newRegionIdRx() = RegionsView.uniqueRegionId(newRegionIdRx.now) }), AppComponents.modalClose() ) .show() } Button(ButtonStyle.success, ButtonSize.small, block = true)( AppIcons.create, context.locale.createRegion, onclick := Callback.onClick(_ ⇒ showCreateDialog()) ) } private[this] def renderExportButton() = { Button(ButtonStyle.warning, ButtonSize.small, block = true)( AppIcons.download, context.locale.export, onclick := Callback.onClick(_ ⇒ ExportImportModal.exportDialog()) ) } private[this] def renderImportButton() = { Button(ButtonStyle.danger, ButtonSize.small, block = true)( AppIcons.upload, context.locale.`import`, onclick := Callback.onClick(_ ⇒ ExportImportModal.importDialog()) ) } private[this] def renderSuspendAllButton() = { Button(ButtonStyle.danger, ButtonSize.small, block = true)( AppIcons.suspend, context.locale.suspend, onclick := Callback.onClick { _ => val future = Future.sequence(regionContext.regions.now.regions.keys.map(context.api.suspendRegion)) future.onComplete(_ => regionContext.updateAll()) } ) } private[this] def renderRegion(regionId: RegionId) = { lazy val regionConfigView = RegionConfigView(regionId) AppComponents.dropdown(regionId) { Bootstrap.well(regionConfigView) } } }
Karasiq/shadowcloud
server/webapp/src/main/scala/com/karasiq/shadowcloud/webapp/components/region/RegionsView.scala
Scala
apache-2.0
4,567
package util import java.net.URLDecoder import java.io.IOException import net.sourceforge.plantuml.code.TranscoderUtil object PlantUmlHelper { /** * Build the complete UML source from the compressed source extracted from the HTTP URI. * * @param source * the last part of the URI containing the compressed UML * @return the textual UML source */ def getUmlSource(source: String): String = { var text = URLDecoder.decode(source, "UTF-8") try { text = TranscoderUtil.getDefaultTranscoder.decode(text) } catch { case ioe: IOException => { text = "' unable to decode string" } } if (text.startsWith("@start")) { text } else { val plantUmlSource: StringBuilder = new StringBuilder plantUmlSource.append("@startuml\\n") plantUmlSource.append(text) if (!text.endsWith("\\n")) { plantUmlSource.append("\\n") } plantUmlSource.append("@enduml") plantUmlSource.toString() } } }
grahamar/Giles
app/util/PlantUmlHelper.scala
Scala
apache-2.0
1,004
package org.openapitools.client.model case class GithubRespositoryContainer ( _class: Option[String], _links: Option[GithubRespositoryContainerlinks], _repositories: Option[GithubRepositories] ) object GithubRespositoryContainer { def toStringBody(var_class: Object, var_links: Object, var_repositories: Object) = s""" | { | "class":$var_class,"links":$var_links,"repositories":$var_repositories | } """.stripMargin }
cliffano/swaggy-jenkins
clients/scala-gatling/generated/src/gatling/scala/org/openapitools/client/model/GithubRespositoryContainer.scala
Scala
mit
477
package alwsk object AlwskBuildSharedConst { val prop_alrauneJsCdnUrl = "alrauneJsCdnUrl" }
vovagrechka/fucking-everything
alraune/alraune-wsk/src/sharedWithBuildMetaProject/scala/alwsk/AlwskBuildSharedConst.scala
Scala
apache-2.0
95
/** * Copyright (C) 2019 Inera AB (http://www.inera.se) * * This file is part of statistik (https://github.com/sklintyg/statistik). * * statistik is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * statistik is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package se.inera.statistics.gatling import io.gatling.core.Predef._ object InloggadSjukskrivningslangd { def exec(user: Login.User) = RestCall.get( s"getSickLeaveLengthData: ${user.vardgivare}", s"${Conf.uri}/api/verksamhet/getSickLeaveLengthData?vgid=${user.vardgivare}") }
sklintyg/statistik
gatling/src/test/scala/se/inera/statistics/gatling/InloggadSjukskrivningslangd.scala
Scala
lgpl-3.0
1,069
package xyztr import org.scalatest.{FlatSpec, Matchers} class UserToUserChannelTest extends FlatSpec with Matchers { "FriendRequest" can "be sent to someone" in { val mats = User("Mats Henricson") // Mats sends a friend request to Bengt val friendRequestToBengt = FriendRequest(mats) UserToUserChannel.sendFriendRequest("Bengt Henricson", friendRequestToBengt) // Bengt gets the friend request from Mats val bengt = User("Bengt Henricson") val receivedFriendRequest = UserToUserChannel.getFriendRequest(bengt.name).get bengt.acceptFriendRequest(receivedFriendRequest) // Mats should now be Bengts friend bengt.hasFriend(mats.publicKey.getEncoded) } "FriendResponse" can "be sent to someone" in { val mats = User("Mats Henricson") // Mats sends a friend request to Bengt val friendRequestToBengt = FriendRequest(mats) UserToUserChannel.sendFriendRequest("Bengt Henricson", friendRequestToBengt) // Bengt gets the friend request from Mats val bengt = User("Bengt Henricson") val receivedFriendRequest = UserToUserChannel.getFriendRequest(bengt.name).get val response = bengt.acceptFriendRequest(receivedFriendRequest) // Bengt sends the response back to Mats UserToUserChannel.sendFriendResponse(receivedFriendRequest.encodedPublicKeyOfSender, response) val responseFromBengt = UserToUserChannel.getFriendResponse(mats.publicKey.getEncoded).get mats.handleFriendResponse(responseFromBengt) // Bengt should now be Mats friend bengt.hasFriend(mats.publicKey.getEncoded) } "BubbleHandle" can "be sent to a friend" in { val mats = User("Mats Henricson") val bengt = User("Bengt Henricson") val receivedFriendRequestFromBengt = FriendRequest(bengt) mats.acceptFriendRequest(receivedFriendRequestFromBengt) // Bengt is now Mats Friend // Mats sends a BubbleHandle to Bengt val friend = mats.friends.head val sentBubbleHandle = BubbleHandle("SomeIpfsHash", Crypto.createNewSymmetricEncryptionKey(), friend.publicKey, None) UserToUserChannel.sendBubbleHandle(friend.encodedPublicKeyOfFriend, sentBubbleHandle) // Bengt gets the BubbleHandle and can decrypt the symmetric key val receivedBubbleHandle = UserToUserChannel.getBubbleHandle(bengt.publicKey.getEncoded).getOrElse(throw new IllegalStateException("What?")) val decryptedSymmetricKey = receivedBubbleHandle.decryptSecretKey(bengt.privateKey) decryptedSymmetricKey.getAlgorithm should be("AES") } }
matshenricson/xyztr
src/test/scala/xyztr/UserToUserChannelTest.scala
Scala
gpl-3.0
2,520
package com.bowlingx import java.io.ByteArrayInputStream import java.nio.charset.StandardCharsets import akka.actor.ActorSystem import com.bowlingx.actors.{Answer, Render} import com.bowlingx.providers.ScriptResources import play.api.inject.ApplicationLifecycle import scala.concurrent.{ExecutionContext, Future} import scala.util.Try import akka.pattern.ask import akka.util.Timeout import scala.concurrent.duration._ /** * Renders JavaScript with the nashorn engine. * * @param context implicit execution context * @param vendorFiles vendor files that should be precompiled and executed before the rendering */ class JavascriptEngine( val vendorFiles: ScriptResources, val actorSystem: ActorSystem, val lifecycle: ApplicationLifecycle, watchFiles: Boolean, val renderTimeout:FiniteDuration, val renderInstances:Int )(implicit context: ExecutionContext) extends Engine with EngineWatcher { if (watchFiles) { this.initScheduling() } /** * @return js to bootstrap the VM with */ protected def bootstrap: ByteArrayInputStream = { val pre = """ |var global = global || this, self = self || this, window = window || this; |var console = {}; |var logger = function(type) { | return function () { | for (var i = 0, len = arguments.length; i < len; i++) { | __play_webpack_logger[type](arguments[i]); | } | } |}; |console.debug = logger("debug"); |console.warn = logger("warn"); |console.error = logger("error"); |console.log = logger("info"); |console.trace = logger("trace"); | |global.setTimeout = function(fn, delay) { | return __play_webpack_setTimeout.apply(fn, delay || 0); |}; | |global.clearTimeout = function(timer) { | return __play_webpack_clearTimeout.apply(timer); |}; | |global.setImmediate = function(fn) { | return __play_webpack_setTimeout.apply(fn, 0); |}; | |global.clearImmediate = function(timer) { | return __play_webpack_clearTimeout.apply(timer); |}; """ .stripMargin new ByteArrayInputStream(pre.getBytes(StandardCharsets.UTF_8)) } def render[T <: Any](method: String, arguments: T*): Future[Try[Option[AnyRef]]] = { implicit val timeout = Timeout(renderTimeout) renderer ? Render(method, arguments.toList) map { case Answer(response) => response } } }
BowlingX/play-webpack
src/play-module/src/main/scala/com/bowlingx/JavascriptEngine.scala
Scala
mit
2,688
package de.htwg.zeta.server.model.modelValidator.validator.rules.validatorDsl import de.htwg.zeta.server.model.modelValidator.validator.rules.metaModelDependent.NodeAttributes /** * This file was created by Tobias Droth as part of his master thesis at HTWG Konstanz (03/2017 - 09/2017). */ class AttributesInNodes(nodeType: String) { def areOfTypes(attributeType: Seq[String]): NodeAttributes = new NodeAttributes(nodeType, attributeType) }
Zeta-Project/zeta
api/server/app/de/htwg/zeta/server/model/modelValidator/validator/rules/validatorDsl/AttributesInNodes.scala
Scala
bsd-2-clause
449
package io.prediction.controller import io.prediction.workflow.PersistentModelManifest import io.prediction.workflow.SharedSparkContext import io.prediction.workflow.StopAfterPrepareInterruption import io.prediction.workflow.StopAfterReadInterruption import grizzled.slf4j.Logger import io.prediction.workflow.WorkflowParams import org.apache.spark.rdd.RDD import org.scalatest.Inspectors._ import org.scalatest.Matchers._ import org.scalatest.FunSuite import org.scalatest.Inside import scala.util.Random class EngineSuite extends FunSuite with Inside with SharedSparkContext { import io.prediction.controller.Engine0._ @transient lazy val logger = Logger[this.type] test("Engine.train") { val engine = new Engine( classOf[PDataSource2], classOf[PPreparator1], Map("" -> classOf[PAlgo2]), classOf[LServing1]) val engineParams = EngineParams( dataSourceParams = PDataSource2.Params(0), preparatorParams = PPreparator1.Params(1), algorithmParamsList = Seq(("", PAlgo2.Params(2))), servingParams = LServing1.Params(3)) val models = engine.train( sc, engineParams, engineInstanceId = "", params = WorkflowParams()) val pd = ProcessedData(1, TrainingData(0)) // PAlgo2.Model doesn't have IPersistentModel trait implemented. Hence the // model extract after train is Unit. models should contain theSameElementsAs Seq(Unit) } test("Engine.train persisting PAlgo.Model") { val engine = new Engine( classOf[PDataSource2], classOf[PPreparator1], Map( "PAlgo2" -> classOf[PAlgo2], "PAlgo3" -> classOf[PAlgo3] ), classOf[LServing1]) val engineParams = EngineParams( dataSourceParams = PDataSource2.Params(0), preparatorParams = PPreparator1.Params(1), algorithmParamsList = Seq( ("PAlgo2", PAlgo2.Params(2)), ("PAlgo3", PAlgo3.Params(21)), ("PAlgo3", PAlgo3.Params(22)) ), servingParams = LServing1.Params(3)) val pd = ProcessedData(1, TrainingData(0)) val model21 = PAlgo3.Model(21, pd) val model22 = PAlgo3.Model(22, pd) val models = engine.train( sc, engineParams, engineInstanceId = "", params = WorkflowParams()) val pModel21 = PersistentModelManifest(model21.getClass.getName) val pModel22 = PersistentModelManifest(model22.getClass.getName) models should contain theSameElementsAs Seq(Unit, pModel21, pModel22) } test("Engine.train persisting LAlgo.Model") { val engine = Engine( classOf[LDataSource1], classOf[LPreparator1], Map( "LAlgo1" -> classOf[LAlgo1], "LAlgo2" -> classOf[LAlgo2], "LAlgo3" -> classOf[LAlgo3] ), classOf[LServing1]) val engineParams = EngineParams( dataSourceParams = LDataSource1.Params(0), preparatorParams = LPreparator1.Params(1), algorithmParamsList = Seq( ("LAlgo2", LAlgo2.Params(20)), ("LAlgo2", LAlgo2.Params(21)), ("LAlgo3", LAlgo3.Params(22))), servingParams = LServing1.Params(3)) val pd = ProcessedData(1, TrainingData(0)) val model20 = LAlgo2.Model(20, pd) val model21 = LAlgo2.Model(21, pd) val model22 = LAlgo3.Model(22, pd) //val models = engine.train(sc, engineParams, WorkflowParams()) val models = engine.train( sc, engineParams, engineInstanceId = "", params = WorkflowParams()) val pModel20 = PersistentModelManifest(model20.getClass.getName) val pModel21 = PersistentModelManifest(model21.getClass.getName) models should contain theSameElementsAs Seq(pModel20, pModel21, model22) } test("Engine.train persisting P&NAlgo.Model") { val engine = new Engine( classOf[PDataSource2], classOf[PPreparator1], Map( "PAlgo2" -> classOf[PAlgo2], "PAlgo3" -> classOf[PAlgo3], "NAlgo2" -> classOf[NAlgo2], "NAlgo3" -> classOf[NAlgo3] ), classOf[LServing1]) val engineParams = EngineParams( dataSourceParams = PDataSource2.Params(0), preparatorParams = PPreparator1.Params(1), algorithmParamsList = Seq( ("PAlgo2", PAlgo2.Params(20)), ("PAlgo3", PAlgo3.Params(21)), ("PAlgo3", PAlgo3.Params(22)), ("NAlgo2", NAlgo2.Params(23)), ("NAlgo3", NAlgo3.Params(24)), ("NAlgo3", NAlgo3.Params(25)) ), servingParams = LServing1.Params(3)) val pd = ProcessedData(1, TrainingData(0)) val model21 = PAlgo3.Model(21, pd) val model22 = PAlgo3.Model(22, pd) val model23 = NAlgo2.Model(23, pd) val model24 = NAlgo3.Model(24, pd) val model25 = NAlgo3.Model(25, pd) //val models = engine.train(sc, engineParams, WorkflowParams()) val models = engine.train( sc, engineParams, engineInstanceId = "", params = WorkflowParams()) val pModel21 = PersistentModelManifest(model21.getClass.getName) val pModel22 = PersistentModelManifest(model22.getClass.getName) val pModel23 = PersistentModelManifest(model23.getClass.getName) models should contain theSameElementsAs Seq( Unit, pModel21, pModel22, pModel23, model24, model25) } test("Engine.prepareDeploy PAlgo") { val engine = new Engine( classOf[PDataSource2], classOf[PPreparator1], Map( "PAlgo2" -> classOf[PAlgo2], "PAlgo3" -> classOf[PAlgo3], "NAlgo2" -> classOf[NAlgo2], "NAlgo3" -> classOf[NAlgo3] ), classOf[LServing1]) val engineParams = EngineParams( dataSourceParams = PDataSource2.Params(0), preparatorParams = PPreparator1.Params(1), algorithmParamsList = Seq( ("PAlgo2", PAlgo2.Params(20)), ("PAlgo3", PAlgo3.Params(21)), ("PAlgo3", PAlgo3.Params(22)), ("NAlgo2", NAlgo2.Params(23)), ("NAlgo3", NAlgo3.Params(24)), ("NAlgo3", NAlgo3.Params(25)) ), servingParams = LServing1.Params(3)) val pd = ProcessedData(1, TrainingData(0)) val model20 = PAlgo2.Model(20, pd) val model21 = PAlgo3.Model(21, pd) val model22 = PAlgo3.Model(22, pd) val model23 = NAlgo2.Model(23, pd) val model24 = NAlgo3.Model(24, pd) val model25 = NAlgo3.Model(25, pd) val rand = new Random() val fakeEngineInstanceId = s"FakeInstanceId-${rand.nextLong()}" val persistedModels = engine.train( sc, engineParams, engineInstanceId = fakeEngineInstanceId, params = WorkflowParams() ) val deployableModels = engine.prepareDeploy( sc, engineParams, fakeEngineInstanceId, persistedModels, params = WorkflowParams() ) deployableModels should contain theSameElementsAs Seq( model20, model21, model22, model23, model24, model25) } test("Engine.eval") { val engine = new Engine( classOf[PDataSource2], classOf[PPreparator1], Map("" -> classOf[PAlgo2]), classOf[LServing1]) val qn = 10 val en = 3 val engineParams = EngineParams( dataSourceParams = PDataSource2.Params(id = 0, en = en, qn = qn), preparatorParams = PPreparator1.Params(1), algorithmParamsList = Seq(("", PAlgo2.Params(2))), servingParams = LServing1.Params(3)) val algoCount = engineParams.algorithmParamsList.size val pd = ProcessedData(1, TrainingData(0)) val model0 = PAlgo2.Model(2, pd) val evalDataSet = engine.eval(sc, engineParams, WorkflowParams()) evalDataSet should have size en forAll(evalDataSet.zipWithIndex) { case (evalData, ex) => { val (evalInfo, qpaRDD) = evalData evalInfo shouldBe EvalInfo(0) val qpaSeq: Seq[(Query, Prediction, Actual)] = qpaRDD.collect qpaSeq should have size qn forAll (qpaSeq) { case (q, p, a) => val Query(qId, qEx, qQx, _) = q val Actual(aId, aEx, aQx) = a qId shouldBe aId qEx shouldBe ex aEx shouldBe ex qQx shouldBe aQx inside (p) { case Prediction(pId, pQ, pModels, pPs) => { pId shouldBe 3 pQ shouldBe q pModels shouldBe None pPs should have size algoCount pPs shouldBe Seq( Prediction(id = 2, q = q, models = Some(model0))) }} } }} } } class EngineTrainSuite extends FunSuite with SharedSparkContext { import io.prediction.controller.Engine0._ val defaultWorkflowParams: WorkflowParams = WorkflowParams() test("Parallel DS/P/Algos") { val models = Engine.train( sc, new PDataSource0(0), new PPreparator0(1), Seq( new PAlgo0(2), new PAlgo1(3), new PAlgo0(4)), defaultWorkflowParams ) val pd = ProcessedData(1, TrainingData(0)) models should contain theSameElementsAs Seq( PAlgo0.Model(2, pd), PAlgo1.Model(3, pd), PAlgo0.Model(4, pd)) } test("Local DS/P/Algos") { val models = Engine.train( sc, new LDataSource0(0), new LPreparator0(1), Seq( new LAlgo0(2), new LAlgo1(3), new LAlgo0(4)), defaultWorkflowParams ) val pd = ProcessedData(1, TrainingData(0)) val expectedResults = Seq( LAlgo0.Model(2, pd), LAlgo1.Model(3, pd), LAlgo0.Model(4, pd)) forAll(models.zip(expectedResults)) { case (model, expected) => model shouldBe a [RDD[_]] val localModel = model.asInstanceOf[RDD[_]].collect localModel should contain theSameElementsAs Seq(expected) } } test("P2L DS/P/Algos") { val models = Engine.train( sc, new PDataSource0(0), new PPreparator0(1), Seq( new NAlgo0(2), new NAlgo1(3), new NAlgo0(4)), defaultWorkflowParams ) val pd = ProcessedData(1, TrainingData(0)) models should contain theSameElementsAs Seq( NAlgo0.Model(2, pd), NAlgo1.Model(3, pd), NAlgo0.Model(4, pd)) } test("Parallel DS/P/Algos Stop-After-Read") { val workflowParams = defaultWorkflowParams.copy( stopAfterRead = true) an [StopAfterReadInterruption] should be thrownBy Engine.train( sc, new PDataSource0(0), new PPreparator0(1), Seq( new PAlgo0(2), new PAlgo1(3), new PAlgo0(4)), workflowParams ) } test("Parallel DS/P/Algos Stop-After-Prepare") { val workflowParams = defaultWorkflowParams.copy( stopAfterPrepare = true) an [StopAfterPrepareInterruption] should be thrownBy Engine.train( sc, new PDataSource0(0), new PPreparator0(1), Seq( new PAlgo0(2), new PAlgo1(3), new PAlgo0(4)), workflowParams ) } test("Parallel DS/P/Algos Dirty TrainingData") { val workflowParams = defaultWorkflowParams.copy( skipSanityCheck = false) an [AssertionError] should be thrownBy Engine.train( sc, new PDataSource3(0, error = true), new PPreparator0(1), Seq( new PAlgo0(2), new PAlgo1(3), new PAlgo0(4)), workflowParams ) } test("Parallel DS/P/Algos Dirty TrainingData But Skip Check") { val workflowParams = defaultWorkflowParams.copy( skipSanityCheck = true) val models = Engine.train( sc, new PDataSource3(0, error = true), new PPreparator0(1), Seq( new PAlgo0(2), new PAlgo1(3), new PAlgo0(4)), workflowParams ) val pd = ProcessedData(1, TrainingData(0, error = true)) models should contain theSameElementsAs Seq( PAlgo0.Model(2, pd), PAlgo1.Model(3, pd), PAlgo0.Model(4, pd)) } } class EngineEvalSuite extends FunSuite with Inside with SharedSparkContext { import io.prediction.controller.Engine0._ @transient lazy val logger = Logger[this.type] test("Simple Parallel DS/P/A/S") { val en = 2 val qn = 5 val evalDataSet: Seq[(EvalInfo, RDD[(Query, Prediction, Actual)])] = Engine.eval( sc, new PDataSource1(id = 1, en = en, qn = qn), new PPreparator0(id = 2), Seq(new PAlgo0(id = 3)), new LServing0(id = 10)) val pd = ProcessedData(2, TrainingData(1)) val model0 = PAlgo0.Model(3, pd) forAll(evalDataSet.zipWithIndex) { case (evalData, ex) => { val (evalInfo, qpaRDD) = evalData evalInfo shouldBe EvalInfo(1) val qpaSeq: Seq[(Query, Prediction, Actual)] = qpaRDD.collect forAll (qpaSeq) { case (q, p, a) => val Query(qId, qEx, qQx, _) = q val Actual(aId, aEx, aQx) = a qId shouldBe aId qEx shouldBe ex aEx shouldBe ex qQx shouldBe aQx inside (p) { case Prediction(pId, pQ, pModels, pPs) => { pId shouldBe 10 pQ shouldBe q pModels shouldBe None pPs should have size 1 pPs shouldBe Seq( Prediction(id = 3, q = q, models = Some(model0))) }} } }} } test("Parallel DS/P/A/S") { val en = 2 val qn = 5 val evalDataSet: Seq[(EvalInfo, RDD[(Query, Prediction, Actual)])] = Engine.eval( sc, new PDataSource1(id = 1, en = en, qn = qn), new PPreparator0(id = 2), Seq( new PAlgo0(id = 3), new PAlgo1(id = 4), new NAlgo1(id = 5)), new LServing0(id = 10)) val pd = ProcessedData(2, TrainingData(1)) val model0 = PAlgo0.Model(3, pd) val model1 = PAlgo1.Model(4, pd) val model2 = NAlgo1.Model(5, pd) forAll(evalDataSet.zipWithIndex) { case (evalData, ex) => { val (evalInfo, qpaRDD) = evalData evalInfo shouldBe EvalInfo(1) val qpaSeq: Seq[(Query, Prediction, Actual)] = qpaRDD.collect forAll (qpaSeq) { case (q, p, a) => val Query(qId, qEx, qQx, _) = q val Actual(aId, aEx, aQx) = a qId shouldBe aId qEx shouldBe ex aEx shouldBe ex qQx shouldBe aQx inside (p) { case Prediction(pId, pQ, pModels, pPs) => { pId shouldBe 10 pQ shouldBe q pModels shouldBe None pPs should have size 3 pPs shouldBe Seq( Prediction(id = 3, q = q, models = Some(model0)), Prediction(id = 4, q = q, models = Some(model1)), Prediction(id = 5, q = q, models = Some(model2)) ) }} } }} } test("Parallel DS/P/A/S with Supplemented Query") { val en = 2 val qn = 5 val evalDataSet: Seq[(EvalInfo, RDD[(Query, Prediction, Actual)])] = Engine.eval( sc, new PDataSource1(id = 1, en = en, qn = qn), new PPreparator0(id = 2), Seq( new PAlgo0(id = 3), new PAlgo1(id = 4), new NAlgo1(id = 5)), new LServing2(id = 10)) val pd = ProcessedData(2, TrainingData(1)) val model0 = PAlgo0.Model(3, pd) val model1 = PAlgo1.Model(4, pd) val model2 = NAlgo1.Model(5, pd) forAll(evalDataSet.zipWithIndex) { case (evalData, ex) => { val (evalInfo, qpaRDD) = evalData evalInfo shouldBe EvalInfo(1) val qpaSeq: Seq[(Query, Prediction, Actual)] = qpaRDD.collect forAll (qpaSeq) { case (q, p, a) => val Query(qId, qEx, qQx, qSupp) = q val Actual(aId, aEx, aQx) = a qId shouldBe aId qEx shouldBe ex aEx shouldBe ex qQx shouldBe aQx qSupp shouldBe false inside (p) { case Prediction(pId, pQ, pModels, pPs) => { pId shouldBe 10 pQ shouldBe q pModels shouldBe None pPs should have size 3 // queries inside prediction should have supp set to true, since it // represents what the algorithms see. val qSupp = q.copy(supp = true) pPs shouldBe Seq( Prediction(id = 3, q = qSupp, models = Some(model0)), Prediction(id = 4, q = qSupp, models = Some(model1)), Prediction(id = 5, q = qSupp, models = Some(model2)) ) }} } }} } test("Local DS/P/A/S") { val en = 2 val qn = 5 val evalDataSet: Seq[(EvalInfo, RDD[(Query, Prediction, Actual)])] = Engine.eval( sc, new LDataSource0(id = 1, en = en, qn = qn), new LPreparator0(id = 2), Seq( new LAlgo0(id = 3), new LAlgo1(id = 4), new LAlgo1(id = 5)), new LServing0(id = 10)) val pd = ProcessedData(2, TrainingData(1)) val model0 = LAlgo0.Model(3, pd) val model1 = LAlgo1.Model(4, pd) val model2 = LAlgo1.Model(5, pd) forAll(evalDataSet.zipWithIndex) { case (evalData, ex) => { val (evalInfo, qpaRDD) = evalData evalInfo shouldBe EvalInfo(1) val qpaSeq: Seq[(Query, Prediction, Actual)] = qpaRDD.collect forAll (qpaSeq) { case (q, p, a) => val Query(qId, qEx, qQx, _) = q val Actual(aId, aEx, aQx) = a qId shouldBe aId qEx shouldBe ex aEx shouldBe ex qQx shouldBe aQx inside (p) { case Prediction(pId, pQ, pModels, pPs) => { pId shouldBe 10 pQ shouldBe q pModels shouldBe None pPs should have size 3 pPs shouldBe Seq( Prediction(id = 3, q = q, models = Some(model0)), Prediction(id = 4, q = q, models = Some(model1)), Prediction(id = 5, q = q, models = Some(model2)) ) }} } }} } }
ch33hau/PredictionIO
core/src/test/scala/io/prediction/controller/EngineTest.scala
Scala
apache-2.0
17,440
package org.clulab.serialization.json import java.io.File import org.clulab.processors.DocumentAttachmentBuilderFromJson import org.clulab.processors.{Document, Sentence} import org.clulab.struct.Edge import org.clulab.struct.{DirectedGraph, GraphMap} import org.clulab.utils.FileUtils import org.json4s import org.json4s.JsonDSL._ import org.json4s._ import org.json4s.jackson.JsonMethods._ import org.json4s.jackson.prettyJson /** JSON serialization utilities */ object JSONSerializer { implicit val formats = DefaultFormats def jsonAST(s: String): JValue = parse(s) def jsonAST(f: File): JValue = jsonAST(FileUtils.getTextFromFile(f)) protected def addDocumentAttachments(doc: Document, jValue: JValue): Unit = { // See also DocumentSerializer for text version of nearly the same thing. (jValue \\ DOCUMENT_ATTACHMENTS_KEY) match { case jObject: JObject => val keys = jObject.values.keys keys.foreach { key: String => (jObject \\ key) match { case jObject: JObject => val documentAttachmentBuilderFromJsonClassName = (jObject \\ DOCUMENT_ATTACHMENTS_BUILDER_KEY).extract[String] val clazz = Class.forName(documentAttachmentBuilderFromJsonClassName) val ctor = clazz.getConstructor() val obj = ctor.newInstance() val documentAttachmentBuilder = obj.asInstanceOf[DocumentAttachmentBuilderFromJson] val value = (jObject \\ DOCUMENT_ATTACHMENTS_VALUE_KEY) val documentAttachment = documentAttachmentBuilder.mkDocumentAttachment(value) doc.addAttachment(key, documentAttachment) case jValue: JValue => val text = prettyJson(jValue) throw new RuntimeException(s"ERROR: While deserializing document attachments expected JObject but found this: $text") case _ => // noop. It should never get here. (Famous last words.) } } case _ => // Leave documentAttachments as is: None } } def toDocument(json: JValue): Document = { // recover sentences val sentences = (json \\ "sentences").asInstanceOf[JArray].arr.map(sjson => toSentence(sjson)).toArray // initialize document val d = Document(sentences) // update id d.id = getStringOption(json, "id") // update text d.text = getStringOption(json, "text") addDocumentAttachments(d, json) d } def toDocument(docHash: String, djson: JValue): Document = toDocument(djson \\ docHash) def toDocument(f: File): Document = toDocument(jsonAST(f)) def toDocument(s: String): Document = toDocument(jsonAST(s)) def toSentence(json: JValue): Sentence = { def getLabels(json: JValue, k: String): Option[Array[String]] = json \\ k match { case JNothing => None case contents => Some(contents.extract[Array[String]]) } val s = json.extract[Sentence] // build dependencies val graphs = (json \\ "graphs").extract[JObject].obj.map { case (key, json) => key -> toDirectedGraph(json) }.toMap s.graphs = GraphMap(graphs) // build labels s.tags = getLabels(json, "tags") s.lemmas = getLabels(json, "lemmas") s.entities = getLabels(json, "entities") s.norms = getLabels(json, "norms") s.chunks = getLabels(json, "chunks") s } def toDirectedGraph(json: JValue): DirectedGraph[String] = { val edges = (json \\ "edges").extract[List[Edge[String]]] // The roots remain for backward compatibility, but they are ignored. val roots = (json \\ "roots").extract[Set[Int]] new DirectedGraph(edges) } private def getStringOption(json: JValue, key: String): Option[String] = json \\ key match { case JString(s) => Some(s) case _ => None } }
sistanlp/processors
main/src/main/scala/org/clulab/serialization/json/JSONSerializer.scala
Scala
apache-2.0
3,763
/** * (c) Copyright 2013 WibiData, Inc. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kiji.modeling.examples.ItemItemCF import com.twitter.scalding.Args import com.twitter.scalding.TextLine import cascading.pipe.Pipe import org.kiji.express.flow.ColumnFamilyOutputSpec import org.kiji.express.flow.EntityId import org.kiji.express.flow.KijiJob import org.kiji.express.flow.KijiOutput /** * Populates a table of movie ratings. * * Reads in a file with records of the form: `user`, `movie`, `rating`. * * @param args passed in from the command line. */ class MovieImporter(args: Args) extends KijiJob(args) { // Get user ratings TextLine(args("ratings")) .read .mapTo('line -> ('user, 'movie, 'rating)) { line: String => { val contents: Array[String] = line.split("\\t") // Cast the user and movie into longs, rating into double (contents(0).toLong, contents(1).toLong, contents(2).toDouble) } } // Mark the movieId as the entityId .map('user -> 'entityId) { user: Long => EntityId(user) } .write(KijiOutput.builder .withTableURI(args("table-uri")) .withColumnSpecs(Map( 'rating -> ColumnFamilyOutputSpec.builder .withFamily("ratings") .withQualifierSelector('movie) .build)) .build) }
kijiproject/kiji-modeling
kiji-modeling-examples/src/main/scala/org/kiji/modeling/examples/ItemItemCF/MovieImporter.scala
Scala
apache-2.0
2,021
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.stream import akka.actor.ActorSystem import akka.pattern._ import com.typesafe.config.ConfigFactory import org.scalatest.OptionValues._ import org.scalatest.{FlatSpec, Matchers} import org.squbs.lifecycle.GracefulStop import org.squbs.unicomplex._ import scala.concurrent.{Await, Future} class PerpetualStreamSpec extends FlatSpec with Matchers { val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath it should "throw an IllegalStateException when accessing matValue before stream starts" in { val classPaths = Array("IllegalStateStream") map (dummyJarsDir + "/" + _) val config = ConfigFactory.parseString( s""" |squbs { | actorsystem-name = IllegalStateStream | ${JMX.prefixConfig} = true |} """.stripMargin ) val boot = UnicomplexBoot(config) .createUsing { (name, config) => ActorSystem(name, config) } .scanComponents(classPaths) .start() import Timeouts._ val reportF = (Unicomplex(boot.actorSystem).uniActor ? ReportStatus).mapTo[StatusReport] val StatusReport(state, cubes, _) = Await.result(reportF, awaitMax) state shouldBe Failed cubes.values should have size 1 val InitReports(cubeState, actorReports) = cubes.values.head._2.value cubeState shouldBe Failed the [IllegalStateException] thrownBy actorReports.values.head.value.get should have message "Materialized value not available before streamGraph is started!" Unicomplex(boot.actorSystem).uniActor ! GracefulStop } it should "recover from upstream failure" in { val classPaths = Array("ThrowExceptionStream") map (dummyJarsDir + "/" + _) val config = ConfigFactory.parseString( s""" |squbs { | actorsystem-name = ThrowExceptionStream | ${JMX.prefixConfig} = true |} """.stripMargin ) val boot = UnicomplexBoot(config) .createUsing { (name, config) => ActorSystem(name, config) } .scanComponents(classPaths) .start() import ThrowExceptionStream._ import Timeouts._ import boot.actorSystem val countF = (actorSystem.actorSelection("/user/ThrowExceptionStream/ThrowExceptionStream") ? NotifyWhenDone) .mapTo[Int] val count = Await.result(countF, awaitMax) count shouldBe (limit - 1) recordCount.get shouldBe (limit - 1) Unicomplex(actorSystem).uniActor ! GracefulStop } it should "properly drain the stream on shutdown" in { val classPaths = Array("ProperShutdownStream") map (dummyJarsDir + "/" + _) val config = ConfigFactory.parseString( s""" |squbs { | actorsystem-name = ProperShutdownStream | ${JMX.prefixConfig} = true |} """.stripMargin ) val boot = UnicomplexBoot(config) .createUsing { (name, config) => ActorSystem(name, config) } .scanComponents(classPaths) .start() import ProperShutdownStream._ import Timeouts._ import boot.actorSystem // To avoid map at shutdown so the NotifyWhenDone obtains a Future[Long] right away. // Combined with "ask", we now have a Future[Future[Long]] in countFF. Then we have to do the very short await // to obtain the Future[Long] that will complete at or after shutdown. val countFF = (actorSystem.actorSelection("/user/ProperShutdownStream/ProperShutdownStream") ? NotifyWhenDone) .mapTo[Future[Long]] val countF = Await.result(countFF, awaitMax) Thread.sleep(500) // Let the stream run a bit. Unicomplex(actorSystem).uniActor ! GracefulStop val count = Await.result(countF, awaitMax) println(s"Counts -> src: ${genCount.get} dest: $count") count shouldBe genCount.get } it should "properly drain the stream with KillSwitch shutdown" in { val classPaths = Array("KillSwitchStream") map (dummyJarsDir + "/" + _) val config = ConfigFactory.parseString( s""" |squbs { | actorsystem-name = KillSwitchStream | ${JMX.prefixConfig} = true |} """.stripMargin ) val boot = UnicomplexBoot(config) .createUsing { (name, config) => ActorSystem(name, config) } .scanComponents(classPaths) .start() import KillSwitchStream._ import Timeouts._ import boot.actorSystem // To avoid map at shutdown so the NotifyWhenDone obtains a Future[Long] right away. // Combined with "ask", we now have a Future[Future[Long]] in countFF. Then we have to do the very short await // to obtain the Future[Long] that will complete at or after shutdown. val countFF = (actorSystem.actorSelection("/user/KillSwitchStream/KillSwitchStream") ? NotifyWhenDone) .mapTo[Future[Long]] val countF = Await.result(countFF, awaitMax) Thread.sleep(500) // Let the stream run a bit. Unicomplex(actorSystem).uniActor ! GracefulStop val count = Await.result(countF, awaitMax) println(s"Counts -> src: ${genCount.get} dest: $count") count shouldBe genCount.get } it should "properly drain the stream with KillSwitch shutdown having other child actor" in { val classPaths = Array("KillSwitchWithChildActorStream") map (dummyJarsDir + "/" + _) val config = ConfigFactory.parseString( s""" |squbs { | actorsystem-name = KillSwitchWithChildActorStream | ${JMX.prefixConfig} = true |} """.stripMargin ) val boot = UnicomplexBoot(config) .createUsing { (name, config) => ActorSystem(name, config) } .scanComponents(classPaths) .start() import KillSwitchWithChildActorStream._ import Timeouts._ import boot.actorSystem // To avoid map at shutdown so the NotifyWhenDone obtains a Future[Long] right away. // Combined with "ask", we now have a Future[Future[Long]] in countFF. Then we have to do the very short await // to obtain the Future[Long] that will complete at or after shutdown. val countFF = (actorSystem.actorSelection("/user/KillSwitchWithChildActorStream/KillSwitchWithChildActorStream") ? NotifyWhenDone) .mapTo[Future[Long]] val countF = Await.result(countFF, awaitMax) Thread.sleep(500) // Let the stream run a bit. Unicomplex(actorSystem).uniActor ! GracefulStop val count = Await.result(countF, awaitMax) println(s"Counts -> src: ${genCount.get} dest: $count") count shouldBe genCount.get } }
SarathChandran/squbs
squbs-unicomplex/src/test/scala/org/squbs/stream/PerpetualStreamSpec.scala
Scala
apache-2.0
7,104
/* * Copyright (c) 2013-2015 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 which * accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. */ package org.locationtech.geomesa.filter.expression import org.geotools.filter.expression.FilterVisitorExpressionWrapper import org.geotools.filter.{ExpressionType, FilterVisitor} import org.geotools.util.Converters import org.opengis.feature.simple.SimpleFeature import org.opengis.filter.expression.{ExpressionVisitor, PropertyName} import org.xml.sax.helpers.NamespaceSupport /** * Implementation of property name that looks up the value by index */ class FastPropertyName(name: String) extends PropertyName with org.geotools.filter.Expression { private var index: Int = -1 override def getPropertyName: String = name override def getNamespaceContext: NamespaceSupport = null override def evaluate(obj: AnyRef): AnyRef = { val sf = try { obj.asInstanceOf[SimpleFeature] } catch { case e: Exception => throw new IllegalArgumentException("Only simple features are supported", e) } if (index == -1) { index = sf.getFeatureType.indexOf(name) } sf.getAttribute(index) } override def evaluate[T](obj: AnyRef, target: Class[T]): T = Converters.convert(evaluate(obj), target) override def accept(visitor: ExpressionVisitor, extraData: AnyRef): AnyRef = visitor.visit(this, extraData) // geotools filter methods - deprecated but still sometimes used override def getType: Short = ExpressionType.ATTRIBUTE override def getValue(feature: SimpleFeature): AnyRef = evaluate(feature.asInstanceOf[AnyRef]) override def evaluate(feature: SimpleFeature): AnyRef = evaluate(feature.asInstanceOf[AnyRef]) override def accept(visitor: FilterVisitor): Unit = accept(new FilterVisitorExpressionWrapper(visitor), null) }
mcharles/geomesa
geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/expression/FastPropertyName.scala
Scala
apache-2.0
2,006
package controllers import play.api.mvc.Action import lila.app._ import lila.common.HTTPRequest import lila.game.{ Game => GameModel, GameRepo } import play.api.http.ContentTypes import play.api.libs.iteratee.{ Iteratee, Enumerator } import play.api.mvc.Result import views._ object Export extends LilaController { private def env = Env.game def pgn(id: String) = Open { implicit ctx => OnlyHumans { OptionFuResult(GameRepo game id) { game => (game.pgnImport.ifTrue(~get("as") == "imported") match { case Some(i) => fuccess(i.pgn) case None => for { initialFen <- GameRepo initialFen game pgn = Env.api.pgnDump(game, initialFen) analysis ← (~get("as") != "raw") ?? (Env.analyse.analyser getDone game.id) } yield Env.analyse.annotator(pgn, analysis, gameOpening(game), game.winnerColor, game.status, game.clock).toString }) map { content => Ok(content).withHeaders( CONTENT_TYPE -> ContentTypes.TEXT, CONTENT_DISPOSITION -> ("attachment; filename=" + (Env.api.pgnDump filename game))) } } } } def pdf(id: String) = Open { implicit ctx => OnlyHumans { OptionResult(GameRepo game id) { game => Ok.chunked(Enumerator.outputStream(env.pdfExport(game.id))).withHeaders( CONTENT_TYPE -> "application/pdf", CACHE_CONTROL -> "max-age=7200") } } } def png(id: String) = Open { implicit ctx => OnlyHumansAndFacebook { OptionResult(GameRepo game id) { game => Ok.chunked(Enumerator.outputStream(env.pngExport(game))).withHeaders( CONTENT_TYPE -> "image/png", CACHE_CONTROL -> "max-age=7200") } } } def puzzlePng(id: Int) = Open { implicit ctx => OnlyHumansAndFacebook { OptionResult(Env.puzzle.api.puzzle find id) { puzzle => Ok.chunked(Enumerator.outputStream(Env.puzzle.pngExport(puzzle))).withHeaders( CONTENT_TYPE -> "image/png", CACHE_CONTROL -> "max-age=7200") } } } private def OnlyHumans(result: => Fu[Result])(implicit ctx: lila.api.Context) = if (HTTPRequest isBot ctx.req) fuccess(NotFound) else result private def OnlyHumansAndFacebook(result: => Fu[Result])(implicit ctx: lila.api.Context) = if (HTTPRequest isFacebookBot ctx.req) result else if (HTTPRequest isBot ctx.req) fuccess(NotFound) else result private def gameOpening(game: GameModel) = if (game.fromPosition || game.variant.exotic) none else chess.OpeningExplorer openingOf game.pgnMoves }
r0k3/lila
app/controllers/Export.scala
Scala
mit
2,605
package com.github.pirita import akka.actor.ActorSystem import akka.stream.ActorFlowMaterializer import akka.stream.scaladsl.{Flow, RunnableFlow, Sink, Source} import scala.concurrent.Future import scala.util.Random /** * Basic example of Linear Flow * @author Ignacio Navarro Martín */ object BasicLinearFlow extends App { implicit val system = ActorSystem("Sys") implicit val materializer = ActorFlowMaterializer() val source: Source[Int, Unit] = Source.apply(Stream.continually(Random.nextInt)) val filter: Flow[Int, Int, Unit] = Flow[Int].filter(_ > 0) val sinkConsole: Sink[Int, Future[Unit]] = Sink.foreach[Int](println(_)) val runnableFlow: RunnableFlow[Unit] = source via filter filter(_ % 2 == 0) to sinkConsole runnableFlow.run() //We materialize the flow }
pirita/streams
src/main/scala/com/github/pirita/BasicLinearFlow.scala
Scala
mit
795
package scaffvis.client.components import scaffvis.client.AppMain.{ScaffoldLoc, Loc} import scaffvis.client.components.common.{Svg, SvgProvider} import scaffvis.client.store.model.Scaffolds import scaffvis.layout.Rect import scaffvis.shared.model.{HierarchyLevels, Scaffold, ScaffoldId} import diode.react.ModelProxy import japgolly.scalajs.react._ import japgolly.scalajs.react.extra.router.RouterCtl import japgolly.scalajs.react.vdom.prefix_<^._ import scala.language.implicitConversions object Breadcrumb { case class Props(proxy: ModelProxy[Scaffolds], router: RouterCtl[Loc], currentScaffoldId: ScaffoldId, tooltipControl: TooltipControl) def apply(props: Props) = component(props) val totalHeight = 1000 //divide: 90% items, 10% padding val itemSize: Double = totalHeight * 0.9 / HierarchyLevels.count val itemPadding: Double = totalHeight * 0.1 / Math.max(HierarchyLevels.count - 1, 1) val canvas = Rect(x = 0, y = 0, w = Math.ceil(itemSize), h = totalHeight) def itemY(level: Int) = (level - HierarchyLevels.topLvl) * (itemSize + itemPadding) val component = ReactComponentB[Props]("Breadcrumb") .render_P { case Props(proxy, router, currentScaffoldId, tooltipControl) => { val scaffolds = proxy.value val scaffoldTree = scaffolds.scaffoldHierarchy val svgProvider = scaffolds.svgProvider val currentScaffold = scaffoldTree(currentScaffoldId) val path = scaffoldTree.path(currentScaffold) val navigateTo: ScaffoldId => Callback = (sid) => router.set(ScaffoldLoc(sid)) val triples = path.zipWithIndex.map { case (scaffold, idx) => { val position = Rect(x = 0, y = itemY(idx), w = itemSize, h = itemSize) renderScaffold(scaffold = scaffold, svgProvider = svgProvider, position = position, isCurrent = (scaffold == currentScaffold), navigateTo = navigateTo, tooltipControl = tooltipControl ) } } val threeSeqs = triples.unzip3 val seq = threeSeqs match { case (bgrs, isvgs, fgrs) => bgrs ++ isvgs.flatten ++ fgrs } <.div( ^.id := "Breadcrumb", ^.className := "breadcrumb", <.svg.svg( ^.width := "100%", ^.height := "100%", ^.svg.viewBox := Svg.viewBoxFromRect(canvas, padding = 5), seq.toReactNodeArray ) ) }} .componentDidMount(scope => onComponentRenderedLoadMissingSVG(scope.props)) .componentDidUpdate(scope => onComponentRenderedLoadMissingSVG(scope.currentProps)) .build implicit def positionTagMod(r: Rect): TagMod = Seq(^.svg.x := r.x, ^.svg.y := r.y, ^.svg.width := r.w, ^.svg.height := r.h) private def renderScaffold(scaffold: Scaffold, svgProvider: SvgProvider, position: Rect, isCurrent: Boolean, navigateTo: ScaffoldId => Callback, tooltipControl: TooltipControl ) = { // three layers: // 1) rect with bg color // 2) svg // 3) transparent rect with mouse events val classNameBase: String = { val sb = new StringBuilder sb.append("breadcrumb-molecule") if (isCurrent) sb.append(" current") sb.toString() } val backgroundRect = <.svg.rect( ^.className := s"breadcrumb-molecule-bg $classNameBase", ^.key := scaffold.id * 10 + 1, position ) val foregroundRect = <.svg.rect( ^.className := s"breadcrumb-molecule-fg $classNameBase", ^.key := scaffold.id * 10 + 2, position, ^.onMouseOver ==> tooltipControl.showTooltipScaffold(scaffold), ^.onMouseMove ==> tooltipControl.moveTooltip, ^.onMouseOut --> tooltipControl.hideTooltip, ^.onClick --> navigateTo(scaffold.id) ) val innerSvg = svgProvider.getSvg(scaffold.id).map(svgContent => Svg.svgImageFromSvgContent(position, Svg.moleculeSvgViewBox, svgContent, ^.key := scaffold.id * 10 + 3) ) (backgroundRect, innerSvg, foregroundRect) } /** * loads missing SVGs */ val onComponentRenderedLoadMissingSVG: Props => Callback = (props) => props.proxy.value.svgProvider.loadMissingSvgCallback() }
velkoborsky/scaffvis
client/src/main/scala/scaffvis/client/components/Breadcrumb.scala
Scala
gpl-3.0
4,129
/* * Copyright (C) 2015 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rbmhtechnology.eventuate.chaos import java.util.concurrent.TimeUnit import com.typesafe.config.Config class ChaosSettings(config: Config) { val delayStartMinMillis: Long = config.getDuration("delay.start.min", TimeUnit.MILLISECONDS) val delayStartMaxMillis: Long = config.getDuration("delay.start.max", TimeUnit.MILLISECONDS) val delayStopMinMillis: Long = config.getDuration("delay.stop.min", TimeUnit.MILLISECONDS) val delayStopMaxMillis: Long = config.getDuration("delay.stop.max", TimeUnit.MILLISECONDS) val nodesDownMax: Int = config.getInt("nodes.down.max") val nodesTotal: Int = config.getInt("nodes.total") assert(nodesTotal > nodesDownMax, "num.modes must be > down.max") }
linearregression/eventuate-chaos
src/main/scala/com/rbmhtechnology/eventuate/chaos/ChaosSettings.scala
Scala
apache-2.0
1,410
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.Properties import javax.annotation.concurrent.GuardedBy import scala.collection.mutable.ArrayBuffer import org.apache.spark.executor.TaskMetrics import org.apache.spark.internal.Logging import org.apache.spark.memory.TaskMemoryManager import org.apache.spark.metrics.MetricsSystem import org.apache.spark.metrics.source.Source import org.apache.spark.shuffle.FetchFailedException import org.apache.spark.util._ /** * A [[TaskContext]] implementation. * * A small note on thread safety. The interrupted & fetchFailed fields are volatile, this makes * sure that updates are always visible across threads. The complete & failed flags and their * callbacks are protected by locking on the context instance. For instance, this ensures * that you cannot add a completion listener in one thread while we are completing (and calling * the completion listeners) in another thread. Other state is immutable, however the exposed * `TaskMetrics` & `MetricsSystem` objects are not thread safe. */ private[spark] class TaskContextImpl( override val stageId: Int, override val stageAttemptNumber: Int, override val partitionId: Int, override val taskAttemptId: Long, override val attemptNumber: Int, override val taskMemoryManager: TaskMemoryManager, localProperties: Properties, @transient private val metricsSystem: MetricsSystem, // The default value is only used in tests. override val taskMetrics: TaskMetrics = TaskMetrics.empty) extends TaskContext with Logging { /** List of callback functions to execute when the task completes. */ @transient private val onCompleteCallbacks = new ArrayBuffer[TaskCompletionListener] /** List of callback functions to execute when the task fails. */ @transient private val onFailureCallbacks = new ArrayBuffer[TaskFailureListener] // If defined, the corresponding task has been killed and this option contains the reason. @volatile private var reasonIfKilled: Option[String] = None // Whether the task has completed. private var completed: Boolean = false // Whether the task has failed. private var failed: Boolean = false // Throwable that caused the task to fail private var failure: Throwable = _ // If there was a fetch failure in the task, we store it here, to make sure user-code doesn't // hide the exception. See SPARK-19276 @volatile private var _fetchFailedException: Option[FetchFailedException] = None @GuardedBy("this") override def addTaskCompletionListener(listener: TaskCompletionListener) : this.type = synchronized { if (completed) { listener.onTaskCompletion(this) } else { onCompleteCallbacks += listener } this } @GuardedBy("this") override def addTaskFailureListener(listener: TaskFailureListener) : this.type = synchronized { if (failed) { listener.onTaskFailure(this, failure) } else { onFailureCallbacks += listener } this } /** Marks the task as failed and triggers the failure listeners. */ @GuardedBy("this") private[spark] def markTaskFailed(error: Throwable): Unit = synchronized { if (failed) return failed = true failure = error invokeListeners(onFailureCallbacks, "TaskFailureListener", Option(error)) { _.onTaskFailure(this, error) } } /** Marks the task as completed and triggers the completion listeners. */ @GuardedBy("this") private[spark] def markTaskCompleted(error: Option[Throwable]): Unit = synchronized { if (completed) return completed = true invokeListeners(onCompleteCallbacks, "TaskCompletionListener", error) { _.onTaskCompletion(this) } } private def invokeListeners[T]( listeners: Seq[T], name: String, error: Option[Throwable])( callback: T => Unit): Unit = { val errorMsgs = new ArrayBuffer[String](2) // Process callbacks in the reverse order of registration listeners.reverse.foreach { listener => try { callback(listener) } catch { case e: Throwable => errorMsgs += e.getMessage logError(s"Error in $name", e) } } if (errorMsgs.nonEmpty) { throw new TaskCompletionListenerException(errorMsgs, error) } } /** Marks the task for interruption, i.e. cancellation. */ private[spark] def markInterrupted(reason: String): Unit = { reasonIfKilled = Some(reason) } private[spark] override def killTaskIfInterrupted(): Unit = { val reason = reasonIfKilled if (reason.isDefined) { throw new TaskKilledException(reason.get) } } private[spark] override def getKillReason(): Option[String] = { reasonIfKilled } @GuardedBy("this") override def isCompleted(): Boolean = synchronized(completed) override def isRunningLocally(): Boolean = false override def isInterrupted(): Boolean = reasonIfKilled.isDefined override def getLocalProperty(key: String): String = localProperties.getProperty(key) override def getMetricsSources(sourceName: String): Seq[Source] = metricsSystem.getSourcesByName(sourceName) private[spark] override def registerAccumulator(a: AccumulatorV2[_, _]): Unit = { taskMetrics.registerAccumulator(a) } private[spark] override def setFetchFailed(fetchFailed: FetchFailedException): Unit = { this._fetchFailedException = Option(fetchFailed) } private[spark] def fetchFailed: Option[FetchFailedException] = _fetchFailedException // TODO: shall we publish it and define it in `TaskContext`? private[spark] def getLocalProperties(): Properties = localProperties }
tejasapatil/spark
core/src/main/scala/org/apache/spark/TaskContextImpl.scala
Scala
apache-2.0
6,437
package hasheq package mutable import scala.language.higherKinds /** Witness that for all types `V`, `M[K, V]` represents a mutable map * with keys of type `K` and values of type `V`. */ trait MapRepr[M[_, _], K] { def empty[V]: M[K, V] def fromIterable[V](col: Iterable[(K, V)])(implicit E: Equal[K]): M[K, V] = fromIterator(col.iterator) def fromIterator[V](it: Iterator[(K, V)])(implicit E: Equal[K]): M[K, V] = { val m = empty[V] it.foreach(kv => put(m, kv._1, kv._2)) m } def size[V](m: M[K, V]): Int def isEmpty[V](m: M[K, V]): Boolean = size(m) == 0 def get[V](m: M[K, V], k: K)(implicit E: Equal[K]): Option[V] def iterator[V](m: M[K, V]): Iterator[(K, V)] def keysIterator[V](m: M[K, V]): Iterator[K] def valuesIterator[V](m: M[K, V]): Iterator[V] def keySet[S[_]](m: M[K, _])(implicit ev: SetRepr[S, K]): S[K] = ev.fromIterator(keysIterator(m)) def keys[V](m: M[K, V]): Iterable[K] def values[V](m: M[K, V]): Iterable[V] def put[V](m: M[K, V], k: K, v: V)(implicit E: Equal[K]): Option[V] def update[V](m: M[K, V], k: K, v: V)(combine: (V, V) => V)(implicit E: Equal[K]): Unit = { get(m, k) match { case Some(v0) => put(m, k, combine(v0, v)) case None => put(m, k, v) } } }
TomasMikula/hasheq
src/main/scala/hasheq/mutable/MapRepr.scala
Scala
bsd-3-clause
1,260
package models.jnlp /** * Created by IntelliJ IDEA. * User: Jason * Date: 5/24/12 * Time: 4:43 PM */ abstract class AbstractJar(val jarName: String, val isMain: Boolean, val isLazy: Boolean) class Jar(name: String, iAmLazy: Boolean) extends AbstractJar(name, false, iAmLazy) class MainJar(name: String) extends AbstractJar(name, true, false)
NetLogo/SimServer
app/models/jnlp/Jar.scala
Scala
gpl-2.0
350
package sw.pairrdds import org.apache.spark.{SparkConf, SparkContext} object ThisWillNotCompile extends App { val sparkConf = new SparkConf() .setAppName(this.getClass.getName) .setMaster("local[*]") val sc = new SparkContext(sparkConf) val allShakespeare = sc.textFile("src/main/resources/all-shakespeare.txt") val weird = allShakespeare // .reduceByKey { // case (acc, length) => acc + length // } weird.take(5).foreach(println) sc.stop() }
rabbitonweb/spark-workshop
src/main/scala/sw/pairrdds/CompilationError.scala
Scala
apache-2.0
484
package cassandra.implicits import cassandra.cql.CqlTuple import cassandra.format.CqlFormat import scala.language.implicitConversions trait TupleImplicits { self: LowPriorityImplicits => type CF[T] = CqlFormat[T] implicit def tuple1[T1](implicit tf: CF[T1]): CF[(T1)] = makeFormat(t1 => CqlTuple(tf(t1))) implicit def tuple2[T1, T2](implicit tf1: CF[T1], tf2: CF[T2]): CF[(T1, T2)] = makeFormat(t => CqlTuple(tf1(t._1), tf2(t._2))) implicit def tuple3[T1, T2, T3](implicit tf1: CF[T1], tf2: CF[T2], tf3: CF[T3]): CF[(T1, T2, T3)] = makeFormat(t => CqlTuple(tf1(t._1), tf2(t._2), tf3(t._3))) implicit def tuple4[T1, T2, T3, T4](implicit tf1: CF[T1], tf2: CF[T2], tf3: CF[T3], tf4: CF[T4]): CF[(T1, T2, T3, T4)] = makeFormat(t => CqlTuple(tf1(t._1), tf2(t._2), tf3(t._3), tf4(t._4))) implicit def tuple5[T1, T2, T3, T4, T5](implicit tf1: CF[T1], tf2: CF[T2], tf3: CF[T3], tf4: CF[T4], tf5: CF[T5]): CF[(T1, T2, T3, T4, T5)] = makeFormat(t => CqlTuple(tf1(t._1), tf2(t._2), tf3(t._3), tf4(t._4), tf5(t._5))) implicit def tuple6[T1, T2, T3, T4, T5, T6](implicit tf1: CF[T1], tf2: CF[T2], tf3: CF[T3], tf4: CF[T4], tf5: CF[T5], tf6: CF[T6]): CF[(T1, T2, T3, T4, T5, T6)] = makeFormat(t => CqlTuple(tf1(t._1), tf2(t._2), tf3(t._3), tf4(t._4), tf5(t._5), tf6(t._6))) /* for more add your own */ }
fabianmurariu/cassandra-scala-nuveau-driver
cql/lib/src/main/scala/cassandra/implicits/TupleImplicits.scala
Scala
apache-2.0
1,339
// @SOURCE:/home/eva/PlayResponsiveKamanu/conf/routes // @HASH:191b1e11d2641e11fc2fa287beb6243a788da730 // @DATE:Tue Oct 08 01:09:16 HST 2013 import play.core._ import play.core.Router._ import play.core.j._ import play.api.mvc._ import play.libs.F import Router.queryString object Routes extends Router.Routes { private var _prefix = "/" def setPrefix(prefix: String) { _prefix = prefix List[(String,Routes)]().foreach { case (p, router) => router.setPrefix(prefix + (if(prefix.endsWith("/")) "" else "/") + p) } } def prefix = _prefix lazy val defaultPrefix = { if(Routes.prefix.endsWith("/")) "" else "/" } // @LINE:6 private[this] lazy val controllers_Application_index0 = Route("GET", PathPattern(List(StaticPart(Routes.prefix)))) // @LINE:7 private[this] lazy val controllers_Application_page11 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("page1")))) // @LINE:10 private[this] lazy val controllers_Assets_at2 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("assets/"),DynamicPart("file", """.+""",false)))) def documentation = List(("""GET""", prefix,"""controllers.Application.index()"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """page1""","""controllers.Application.page1()"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """assets/$file<.+>""","""controllers.Assets.at(path:String = "/public", file:String)""")).foldLeft(List.empty[(String,String,String)]) { (s,e) => e.asInstanceOf[Any] match { case r @ (_,_,_) => s :+ r.asInstanceOf[(String,String,String)] case l => s ++ l.asInstanceOf[List[(String,String,String)]] }} def routes:PartialFunction[RequestHeader,Handler] = { // @LINE:6 case controllers_Application_index0(params) => { call { invokeHandler(controllers.Application.index(), HandlerDef(this, "controllers.Application", "index", Nil,"GET", """ Home page""", Routes.prefix + """""")) } } // @LINE:7 case controllers_Application_page11(params) => { call { invokeHandler(controllers.Application.page1(), HandlerDef(this, "controllers.Application", "page1", Nil,"GET", """""", Routes.prefix + """page1""")) } } // @LINE:10 case controllers_Assets_at2(params) => { call(Param[String]("path", Right("/public")), params.fromPath[String]("file", None)) { (path, file) => invokeHandler(controllers.Assets.at(path, file), HandlerDef(this, "controllers.Assets", "at", Seq(classOf[String], classOf[String]),"GET", """ Map static resources from the /public folder to the /assets URL path""", Routes.prefix + """assets/$file<.+>""")) } } } }
evashek/PlayResponsiveKamanu
target/scala-2.10/src_managed/main/routes_routing.scala
Scala
apache-2.0
2,748
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package org.scalajs.testing.common import sbt.testing.TaskDef private[testing] object JSEndpoints { val detectFrameworks: RPCEndpoint.EP[List[List[String]], List[Option[FrameworkInfo]]] = RPCEndpoint[List[List[String]], List[Option[FrameworkInfo]]](2) val createMasterRunner: RPCEndpoint.EP[RunnerArgs, Unit] = RPCEndpoint[RunnerArgs, Unit](3) val createSlaveRunner: RPCEndpoint.EP[RunnerArgs, Unit] = RPCEndpoint[RunnerArgs, Unit](4) val msgSlave: MsgEndpoint.EP[RunMux[String]] = MsgEndpoint[RunMux[String]](5) val msgMaster: MsgEndpoint.EP[RunMux[FrameworkMessage]] = MsgEndpoint[RunMux[FrameworkMessage]](6) val tasks: RPCEndpoint.EP[RunMux[List[TaskDef]], List[TaskInfo]] = RPCEndpoint[RunMux[List[TaskDef]], List[TaskInfo]](7) val execute: RPCEndpoint.EP[RunMux[ExecuteRequest], List[TaskInfo]] = RPCEndpoint[RunMux[ExecuteRequest], List[TaskInfo]](8) val done: RPCEndpoint.EP[RunMux[Unit], String] = RPCEndpoint[RunMux[Unit], String](9) }
gzm0/scala-js
test-common/src/main/scala/org/scalajs/testing/common/JSEndpoints.scala
Scala
apache-2.0
1,277
package controllers.pages import models.Associations import models.core.{ Annotations, AnnotatedThings, Datasets } import play.api.db.slick._ import play.api.mvc.Controller import controllers.AbstractController object DatasetPagesController extends AbstractController { def listAll = loggingAction { implicit session => val datasets = Datasets.countAll() val things = AnnotatedThings.countAll(true) val annotations = Annotations.countAll Ok(views.html.datasetList(datasets, things, annotations)) } def showDataset(id: String) = loggingAction { implicit session => val dataset = Datasets.findById(id) if (dataset.isDefined) { val id = dataset.get.id val things = AnnotatedThings.countByDataset(id) val places = Associations.countPlacesInDataset(id) val annotations = Annotations.countByDataset(id) val supersets = Datasets.findByIds(Datasets.getParentHierarchy(id)) val subsets = Datasets.listSubsets(id) Ok(views.html.datasetDetails(dataset.get, things, annotations, places, supersets, subsets)) } else { NotFound // TODO create decent 'not found' page } } }
pelagios/peripleo
app/controllers/pages/DatasetPagesController.scala
Scala
gpl-3.0
1,156
package org.embulk.parser.firebase_avro.json import io.circe.{Encoder, Json} object CustomEncoder { implicit val mapEncoder: Encoder[Map[String, Any]] = new Encoder[Map[String, Any]] { final def apply(a: Map[String, Any]): Json = { val jsonValues = a.keys.map { key => a(key) match { case v: String => (key, Json.fromString(v)) case v: Double => (key, Json.fromDouble(v).getOrElse(Json.fromString(v.toString))) case v: Long => (key, Json.fromLong(v)) case _ => (key, Json.Null) } } Json.fromFields(jsonValues) } } }
smdmts/embulk-parser-firebase_avro
src/main/scala/org/embulk/parser/firebase_avro/json/CustomEncoder.scala
Scala
mit
652
/** * * CRC16 * Ledger wallet * * Created by Pierre Pollastri on 27/10/15. * * The MIT License (MIT) * * Copyright (c) 2015 Ledger * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ package co.ledger.wallet.core.utils object CRC16 { def apply(data: Array[Byte]): Short = { var crc = 0xFFFF for (i <- 0 until data.length) { crc = ((crc >>> 8) | (crc << 8)) & 0xFFFF crc ^= (data(i) & 0xFF) crc ^= ((crc & 0xFF) >> 4) crc ^= (crc << 12) & 0xFFFF crc ^= ((crc & 0xFF) << 5) & 0xFFFF } crc.toShort } }
LedgerHQ/ledger-wallet-android
app/src/main/scala/co/ledger/wallet/core/utils/CRC16.scala
Scala
mit
1,592
package cat.alex.dominoes import org.scalatest.{Matchers, FlatSpec} class PlayerSpec extends FlatSpec with Matchers { behavior of "A player" def tile1 = Tile(1, 2) it should "calculate points as the sum of all tiles values" in { Player("", List()).points shouldBe 0 Player("", List(tile1, tile1)).points shouldBe 6 } }
alxgarcia/dominoes
src/test/scala/cat/alex/dominoes/PlayerSpec.scala
Scala
lgpl-3.0
340
package scalax.chart package event import scala.swing.event.Event import org.jfree.chart.plot.Plot import org.jfree.chart.title.Title /** Event on a chart. * * @see [[Chart]] */ sealed abstract class ChartEvent protected () extends Event { /** Returns the chart on which the event occurred. */ def chart: Chart } /** Contains concrete chart events. */ object ChartEvent { /** Event indicating a dataset has been updated. */ final case class DatasetUpdated(chart: Chart) extends ChartEvent /** Event indicating a new dataset has been added to the chart. */ final case class NewDataset(chart: Chart) extends ChartEvent /** Event indicating a click on a chart. */ final case class General(chart: Chart) extends ChartEvent } /** Event indicating a plot of the chart has changed. */ final case class PlotChanged(chart: Chart, plot: Plot) extends ChartEvent /** Event indicating a title of the chart has changed. */ final case class TitleChanged(chart: Chart, title: Title) extends ChartEvent
wookietreiber/scala-chart
src/main/scala/scalax/chart/event/ChartEvent.scala
Scala
lgpl-3.0
1,021
package io.iohk.ethereum.db.storage import io.iohk.ethereum.db.dataSource.RocksDbDataSource.IterationError import io.iohk.ethereum.db.dataSource.{DataSource, DataSourceBatchUpdate, DataSourceUpdate} import monix.reactive.Observable /** * Represents transactional key value storage mapping keys of type K to values of type V * Note: all methods methods that perform updates return [[io.iohk.ethereum.db.dataSource.DataSourceBatchUpdate]] * meaning no updates are actually saved in the underlying DataSource until `.commit()` is called. */ trait TransactionalKeyValueStorage[K, V] { val dataSource: DataSource val namespace: IndexedSeq[Byte] def keySerializer: K => IndexedSeq[Byte] def valueSerializer: V => IndexedSeq[Byte] def valueDeserializer: IndexedSeq[Byte] => V def keyDeserializer: IndexedSeq[Byte] => K /** * This function obtains the associated value to a key in the current namespace, if there exists one. * * @param key * @return the value associated with the passed key, if there exists one. */ def get(key: K): Option[V] = dataSource.get(namespace, keySerializer(key)).map(valueDeserializer) /** * This function creates a batch of updates to the KeyValueStorage by deleting, updating and inserting new (key-value) * pairs in the current namespace. The batch should be committed atomically. */ def update(toRemove: Seq[K], toUpsert: Seq[(K, V)]): DataSourceBatchUpdate = { DataSourceBatchUpdate( dataSource, Array( DataSourceUpdate( namespace, toRemove.map(keySerializer), toUpsert.map { case (k, v) => keySerializer(k) -> valueSerializer(v) } ) ) ) } def put(key: K, value: V): DataSourceBatchUpdate = update(Nil, Seq(key -> value)) def remove(key: K): DataSourceBatchUpdate = update(Seq(key), Nil) def emptyBatchUpdate: DataSourceBatchUpdate = DataSourceBatchUpdate(dataSource, Array.empty) def storageContent: Observable[Either[IterationError, (K, V)]] = { dataSource.iterate(namespace).map { result => result.map { case (key, value) => (keyDeserializer(key.toIndexedSeq), valueDeserializer(value)) } } } }
input-output-hk/etc-client
src/main/scala/io/iohk/ethereum/db/storage/TransactionalKeyValueStorage.scala
Scala
mit
2,224
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming import java.io.{File, InterruptedIOException, IOException, UncheckedIOException} import java.nio.channels.ClosedByInterruptException import java.time.ZoneId import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit} import scala.concurrent.TimeoutException import scala.reflect.ClassTag import scala.util.control.ControlThrowable import com.google.common.util.concurrent.UncheckedExecutionException import org.apache.commons.io.FileUtils import org.apache.hadoop.conf.Configuration import org.scalatest.time.SpanSugar._ import org.apache.spark.{SparkConf, SparkContext, TaskContext, TestUtils} import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart} import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.plans.logical.Range import org.apache.spark.sql.catalyst.streaming.{InternalOutputModes, StreamingRelationV2} import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.execution.{LocalLimitExec, SimpleMode, SparkPlan} import org.apache.spark.sql.execution.command.ExplainCommand import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.sources.{ContinuousMemoryStream, MemorySink} import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreConf, StateStoreId, StateStoreProvider} import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.sources.StreamSourceProvider import org.apache.spark.sql.streaming.util.{BlockOnStopSourceProvider, StreamManualClock} import org.apache.spark.sql.types.{IntegerType, LongType, StructField, StructType} import org.apache.spark.util.Utils class StreamSuite extends StreamTest { import testImplicits._ test("map with recovery") { val inputData = MemoryStream[Int] val mapped = inputData.toDS().map(_ + 1) testStream(mapped)( AddData(inputData, 1, 2, 3), StartStream(), CheckAnswer(2, 3, 4), StopStream, AddData(inputData, 4, 5, 6), StartStream(), CheckAnswer(2, 3, 4, 5, 6, 7)) } test("join") { // Make a table and ensure it will be broadcast. val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word") // Join the input stream with a table. val inputData = MemoryStream[Int] val joined = inputData.toDS().toDF().join(smallTable, $"value" === $"number") testStream(joined)( AddData(inputData, 1, 2, 3), CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two")), AddData(inputData, 4), CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two"), Row(4, 4, "four"))) } test("StreamingRelation.computeStats") { withTempDir { dir => val df = spark.readStream.format("csv").schema(StructType(Seq())).load(dir.getCanonicalPath) val streamingRelation = df.logicalPlan collect { case s: StreamingRelation => s } assert(streamingRelation.nonEmpty, "cannot find StreamingRelation") assert( streamingRelation.head.computeStats.sizeInBytes == spark.sessionState.conf.defaultSizeInBytes) } } test("StreamingRelationV2.computeStats") { val streamingRelation = spark.readStream.format("rate").load().logicalPlan collect { case s: StreamingRelationV2 => s } assert(streamingRelation.nonEmpty, "cannot find StreamingRelationV2") assert( streamingRelation.head.computeStats.sizeInBytes == spark.sessionState.conf.defaultSizeInBytes) } test("StreamingExecutionRelation.computeStats") { val memoryStream = MemoryStream[Int] val executionRelation = StreamingExecutionRelation( memoryStream, memoryStream.encoder.schema.toAttributes)(memoryStream.sqlContext.sparkSession) assert(executionRelation.computeStats.sizeInBytes == spark.sessionState.conf.defaultSizeInBytes) } test("explain join with a normal source") { // This test triggers CostBasedJoinReorder to call `computeStats` withSQLConf(SQLConf.CBO_ENABLED.key -> "true", SQLConf.JOIN_REORDER_ENABLED.key -> "true") { val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word") val smallTable2 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word") val smallTable3 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word") // Join the input stream with a table. val df = spark.readStream.format("rate").load() val joined = df.join(smallTable, smallTable("number") === $"value") .join(smallTable2, smallTable2("number") === $"value") .join(smallTable3, smallTable3("number") === $"value") val outputStream = new java.io.ByteArrayOutputStream() Console.withOut(outputStream) { joined.explain(true) } assert(outputStream.toString.contains("StreamingRelation")) } } test("explain join with MemoryStream") { // This test triggers CostBasedJoinReorder to call `computeStats` // Because MemoryStream doesn't use DataSource code path, we need a separate test. withSQLConf(SQLConf.CBO_ENABLED.key -> "true", SQLConf.JOIN_REORDER_ENABLED.key -> "true") { val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word") val smallTable2 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word") val smallTable3 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word") // Join the input stream with a table. val df = MemoryStream[Int].toDF val joined = df.join(smallTable, smallTable("number") === $"value") .join(smallTable2, smallTable2("number") === $"value") .join(smallTable3, smallTable3("number") === $"value") val outputStream = new java.io.ByteArrayOutputStream() Console.withOut(outputStream) { joined.explain(true) } assert(outputStream.toString.contains("StreamingRelation")) } } test("SPARK-20432: union one stream with itself") { val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load().select("a") val unioned = df.union(df) withTempDir { outputDir => withTempDir { checkpointDir => val query = unioned .writeStream.format("parquet") .option("checkpointLocation", checkpointDir.getAbsolutePath) .start(outputDir.getAbsolutePath) try { query.processAllAvailable() val outputDf = spark.read.parquet(outputDir.getAbsolutePath).as[Long] checkDatasetUnorderly[Long](outputDf, (0L to 10L).union((0L to 10L)).toArray: _*) } finally { query.stop() } } } } test("union two streams") { val inputData1 = MemoryStream[Int] val inputData2 = MemoryStream[Int] val unioned = inputData1.toDS().union(inputData2.toDS()) testStream(unioned)( AddData(inputData1, 1, 3, 5), CheckAnswer(1, 3, 5), AddData(inputData2, 2, 4, 6), CheckAnswer(1, 2, 3, 4, 5, 6), StopStream, AddData(inputData1, 7), StartStream(), AddData(inputData2, 8), CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8)) } test("sql queries") { withTempView("stream") { val inputData = MemoryStream[Int] inputData.toDF().createOrReplaceTempView("stream") val evens = sql("SELECT * FROM stream WHERE value % 2 = 0") testStream(evens)( AddData(inputData, 1, 2, 3, 4), CheckAnswer(2, 4)) } } test("DataFrame reuse") { def assertDF(df: DataFrame): Unit = { withTempDir { outputDir => withTempDir { checkpointDir => val query = df.writeStream.format("parquet") .option("checkpointLocation", checkpointDir.getAbsolutePath) .start(outputDir.getAbsolutePath) try { query.processAllAvailable() // Parquet write page-level CRC checksums will change the file size and // affect the data order when reading these files. Please see PARQUET-1746 for details. val outputDf = spark.read.parquet(outputDir.getAbsolutePath).sort('a).as[Long] checkDataset[Long](outputDf, (0L to 10L).toArray: _*) } finally { query.stop() } } } } val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load() Seq("", "parquet").foreach { useV1Source => withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1Source) { assertDF(df) assertDF(df) } } } test("Within the same streaming query, one StreamingRelation should only be transformed to one " + "StreamingExecutionRelation") { val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load() var query: StreamExecution = null try { query = df.union(df) .writeStream .format("memory") .queryName("memory") .start() .asInstanceOf[StreamingQueryWrapper] .streamingQuery query.awaitInitialization(streamingTimeout.toMillis) val executionRelations = query .logicalPlan .collect { case ser: StreamingExecutionRelation => ser } assert(executionRelations.size === 2) assert(executionRelations.distinct.size === 1) } finally { if (query != null) { query.stop() } } } test("unsupported queries") { val streamInput = MemoryStream[Int] val batchInput = Seq(1, 2, 3).toDS() def assertError(expectedMsgs: Seq[String])(body: => Unit): Unit = { val e = intercept[AnalysisException] { body } expectedMsgs.foreach { s => assert(e.getMessage.contains(s)) } } // Running streaming plan as a batch query assertError("start" :: Nil) { streamInput.toDS.map { i => i }.count() } // Running non-streaming plan with as a streaming query assertError("without streaming sources" :: "start" :: Nil) { val ds = batchInput.map { i => i } testStream(ds)() } // Running streaming plan that cannot be incrementalized assertError("not supported" :: "streaming" :: Nil) { val ds = streamInput.toDS.map { i => i }.sort() testStream(ds)() } } test("minimize delay between batch construction and execution") { // For each batch, we would retrieve new data's offsets and log them before we run the execution // This checks whether the key of the offset log is the expected batch id def CheckOffsetLogLatestBatchId(expectedId: Int): AssertOnQuery = AssertOnQuery(_.offsetLog.getLatest().get._1 == expectedId, s"offsetLog's latest should be $expectedId") // Check the latest batchid in the commit log def CheckCommitLogLatestBatchId(expectedId: Int): AssertOnQuery = AssertOnQuery(_.commitLog.getLatest().get._1 == expectedId, s"commitLog's latest should be $expectedId") // Ensure that there has not been an incremental execution after restart def CheckNoIncrementalExecutionCurrentBatchId(): AssertOnQuery = AssertOnQuery(_.lastExecution == null, s"lastExecution not expected to run") // For each batch, we would log the state change during the execution // This checks whether the key of the state change log is the expected batch id def CheckIncrementalExecutionCurrentBatchId(expectedId: Int): AssertOnQuery = AssertOnQuery(_.lastExecution.asInstanceOf[IncrementalExecution].currentBatchId == expectedId, s"lastExecution's currentBatchId should be $expectedId") // For each batch, we would log the sink change after the execution // This checks whether the key of the sink change log is the expected batch id def CheckSinkLatestBatchId(expectedId: Int): AssertOnQuery = AssertOnQuery(_.sink.asInstanceOf[MemorySink].latestBatchId.get == expectedId, s"sink's lastBatchId should be $expectedId") val inputData = MemoryStream[Int] testStream(inputData.toDS())( StartStream(Trigger.ProcessingTime("10 seconds"), new StreamManualClock), /* -- batch 0 ----------------------- */ // Add some data in batch 0 AddData(inputData, 1, 2, 3), AdvanceManualClock(10 * 1000), // 10 seconds /* -- batch 1 ----------------------- */ // Check the results of batch 0 CheckAnswer(1, 2, 3), CheckIncrementalExecutionCurrentBatchId(0), CheckCommitLogLatestBatchId(0), CheckOffsetLogLatestBatchId(0), CheckSinkLatestBatchId(0), // Add some data in batch 1 AddData(inputData, 4, 5, 6), AdvanceManualClock(10 * 1000), /* -- batch _ ----------------------- */ // Check the results of batch 1 CheckAnswer(1, 2, 3, 4, 5, 6), CheckIncrementalExecutionCurrentBatchId(1), CheckCommitLogLatestBatchId(1), CheckOffsetLogLatestBatchId(1), CheckSinkLatestBatchId(1), AdvanceManualClock(10 * 1000), AdvanceManualClock(10 * 1000), AdvanceManualClock(10 * 1000), /* -- batch __ ---------------------- */ // Check the results of batch 1 again; this is to make sure that, when there's no new data, // the currentId does not get logged (e.g. as 2) even if the clock has advanced many times CheckAnswer(1, 2, 3, 4, 5, 6), CheckIncrementalExecutionCurrentBatchId(1), CheckCommitLogLatestBatchId(1), CheckOffsetLogLatestBatchId(1), CheckSinkLatestBatchId(1), /* Stop then restart the Stream */ StopStream, StartStream(Trigger.ProcessingTime("10 seconds"), new StreamManualClock(60 * 1000)), /* -- batch 1 no rerun ----------------- */ // batch 1 would not re-run because the latest batch id logged in commit log is 1 AdvanceManualClock(10 * 1000), CheckNoIncrementalExecutionCurrentBatchId(), /* -- batch 2 ----------------------- */ // Check the results of batch 1 CheckAnswer(1, 2, 3, 4, 5, 6), CheckCommitLogLatestBatchId(1), CheckOffsetLogLatestBatchId(1), CheckSinkLatestBatchId(1), // Add some data in batch 2 AddData(inputData, 7, 8, 9), AdvanceManualClock(10 * 1000), /* -- batch 3 ----------------------- */ // Check the results of batch 2 CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8, 9), CheckIncrementalExecutionCurrentBatchId(2), CheckCommitLogLatestBatchId(2), CheckOffsetLogLatestBatchId(2), CheckSinkLatestBatchId(2)) } test("insert an extraStrategy") { try { spark.experimental.extraStrategies = TestStrategy :: Nil val inputData = MemoryStream[(String, Int)] val df = inputData.toDS().map(_._1).toDF("a") testStream(df)( AddData(inputData, ("so slow", 1)), CheckAnswer("so fast")) } finally { spark.experimental.extraStrategies = Nil } } testQuietly("handle fatal errors thrown from the stream thread") { for (e <- Seq( new VirtualMachineError {}, new ThreadDeath, new LinkageError, new ControlThrowable {} )) { val source = new Source { override def getOffset: Option[Offset] = { throw e } override def getBatch(start: Option[Offset], end: Offset): DataFrame = { throw e } override def schema: StructType = StructType(Array(StructField("value", IntegerType))) override def stop(): Unit = {} } val df = Dataset[Int]( sqlContext.sparkSession, StreamingExecutionRelation(source, sqlContext.sparkSession)) testStream(df)( // `ExpectFailure(isFatalError = true)` verifies two things: // - Fatal errors can be propagated to `StreamingQuery.exception` and // `StreamingQuery.awaitTermination` like non fatal errors. // - Fatal errors can be caught by UncaughtExceptionHandler. ExpectFailure(isFatalError = true)(ClassTag(e.getClass)) ) } } test("output mode API in Scala") { assert(OutputMode.Append === InternalOutputModes.Append) assert(OutputMode.Complete === InternalOutputModes.Complete) assert(OutputMode.Update === InternalOutputModes.Update) } override protected def sparkConf: SparkConf = super.sparkConf .set("spark.redaction.string.regex", "file:/[\\\\w_]+") test("explain - redaction") { val replacement = "*********" val inputData = MemoryStream[String] val df = inputData.toDS().map(_ + "foo").groupBy("value").agg(count("*")) // Test StreamingQuery.display val q = df.writeStream.queryName("memory_explain").outputMode("complete").format("memory") .start() .asInstanceOf[StreamingQueryWrapper] .streamingQuery try { inputData.addData("abc") q.processAllAvailable() val explainWithoutExtended = q.explainInternal(false) assert(explainWithoutExtended.contains(replacement)) assert(explainWithoutExtended.contains("StateStoreRestore")) assert(!explainWithoutExtended.contains("file:/")) val explainWithExtended = q.explainInternal(true) assert(explainWithExtended.contains(replacement)) assert(explainWithExtended.contains("StateStoreRestore")) assert(!explainWithoutExtended.contains("file:/")) } finally { q.stop() } } test("explain") { val inputData = MemoryStream[String] val df = inputData.toDS().map(_ + "foo").groupBy("value").agg(count("*")) // Test `df.explain` val explain = ExplainCommand(df.queryExecution.logical, SimpleMode) val explainString = spark.sessionState .executePlan(explain) .executedPlan .executeCollect() .map(_.getString(0)) .mkString("\\n") assert(explainString.contains("StateStoreRestore")) assert(explainString.contains("StreamingRelation")) assert(!explainString.contains("LocalTableScan")) // Test StreamingQuery.display val q = df.writeStream.queryName("memory_explain").outputMode("complete").format("memory") .start() .asInstanceOf[StreamingQueryWrapper] .streamingQuery try { assert("No physical plan. Waiting for data." === q.explainInternal(false)) assert("No physical plan. Waiting for data." === q.explainInternal(true)) inputData.addData("abc") q.processAllAvailable() val explainWithoutExtended = q.explainInternal(false) // `extended = false` only displays the physical plan. assert("StreamingDataSourceV2Relation".r .findAllMatchIn(explainWithoutExtended).size === 0) assert("BatchScan".r .findAllMatchIn(explainWithoutExtended).size === 1) // Use "StateStoreRestore" to verify that it does output a streaming physical plan assert(explainWithoutExtended.contains("StateStoreRestore")) val explainWithExtended = q.explainInternal(true) // `extended = true` displays 3 logical plans (Parsed/Optimized/Optimized) and 1 physical // plan. assert("StreamingDataSourceV2Relation".r .findAllMatchIn(explainWithExtended).size === 3) assert("BatchScan".r .findAllMatchIn(explainWithExtended).size === 1) // Use "StateStoreRestore" to verify that it does output a streaming physical plan assert(explainWithExtended.contains("StateStoreRestore")) } finally { q.stop() } } test("explain-continuous") { val inputData = ContinuousMemoryStream[Int] val df = inputData.toDS().map(_ * 2).filter(_ > 5) // Test `df.explain` val explain = ExplainCommand(df.queryExecution.logical, SimpleMode) val explainString = spark.sessionState .executePlan(explain) .executedPlan .executeCollect() .map(_.getString(0)) .mkString("\\n") assert(explainString.contains("Filter")) assert(explainString.contains("MapElements")) assert(!explainString.contains("LocalTableScan")) // Test StreamingQuery.display val q = df.writeStream.queryName("memory_continuous_explain") .outputMode(OutputMode.Update()).format("memory") .trigger(Trigger.Continuous("1 seconds")) .start() .asInstanceOf[StreamingQueryWrapper] .streamingQuery try { // in continuous mode, the query will be run even there's no data // sleep a bit to ensure initialization eventually(timeout(2.seconds), interval(100.milliseconds)) { assert(q.lastExecution != null) } val explainWithoutExtended = q.explainInternal(false) // `extended = false` only displays the physical plan. assert("StreamingDataSourceV2Relation".r .findAllMatchIn(explainWithoutExtended).size === 0) assert("ContinuousScan".r .findAllMatchIn(explainWithoutExtended).size === 1) val explainWithExtended = q.explainInternal(true) // `extended = true` displays 3 logical plans (Parsed/Optimized/Optimized) and 1 physical // plan. assert("StreamingDataSourceV2Relation".r .findAllMatchIn(explainWithExtended).size === 3) assert("ContinuousScan".r .findAllMatchIn(explainWithExtended).size === 1) } finally { q.stop() } } test("codegen-microbatch") { val inputData = MemoryStream[Int] val df = inputData.toDS().map(_ * 2).filter(_ > 5) // Test StreamingQuery.codegen val q = df.writeStream.queryName("memory_microbatch_codegen") .outputMode(OutputMode.Update) .format("memory") .trigger(Trigger.ProcessingTime("1 seconds")) .start() try { import org.apache.spark.sql.execution.debug._ assert("No physical plan. Waiting for data." === codegenString(q)) assert(codegenStringSeq(q).isEmpty) inputData.addData(1, 2, 3, 4, 5) q.processAllAvailable() assertDebugCodegenResult(q) } finally { q.stop() } } test("codegen-continuous") { val inputData = ContinuousMemoryStream[Int] val df = inputData.toDS().map(_ * 2).filter(_ > 5) // Test StreamingQuery.codegen val q = df.writeStream.queryName("memory_continuous_codegen") .outputMode(OutputMode.Update) .format("memory") .trigger(Trigger.Continuous("1 seconds")) .start() try { // in continuous mode, the query will be run even there's no data // sleep a bit to ensure initialization eventually(timeout(2.seconds), interval(100.milliseconds)) { assert(q.asInstanceOf[StreamingQueryWrapper].streamingQuery.lastExecution != null) } assertDebugCodegenResult(q) } finally { q.stop() } } private def assertDebugCodegenResult(query: StreamingQuery): Unit = { import org.apache.spark.sql.execution.debug._ val codegenStr = codegenString(query) assert(codegenStr.contains("Found 1 WholeStageCodegen subtrees.")) // assuming that code is generated for the test query assert(codegenStr.contains("Generated code:")) val codegenStrSeq = codegenStringSeq(query) assert(codegenStrSeq.nonEmpty) assert(codegenStrSeq.head._1.contains("*(1)")) assert(codegenStrSeq.head._2.contains("codegenStageId=1")) } test("SPARK-19065: dropDuplicates should not create expressions using the same id") { withTempPath { testPath => val data = Seq((1, 2), (2, 3), (3, 4)) data.toDS.write.mode("overwrite").json(testPath.getCanonicalPath) val schema = spark.read.json(testPath.getCanonicalPath).schema val query = spark .readStream .schema(schema) .json(testPath.getCanonicalPath) .dropDuplicates("_1") .writeStream .format("memory") .queryName("testquery") .outputMode("append") .start() try { query.processAllAvailable() if (query.exception.isDefined) { throw query.exception.get } } finally { query.stop() } } } test("handle IOException when the streaming thread is interrupted (pre Hadoop 2.8)") { // This test uses a fake source to throw the same IOException as pre Hadoop 2.8 when the // streaming thread is interrupted. We should handle it properly by not failing the query. ThrowingIOExceptionLikeHadoop12074.createSourceLatch = new CountDownLatch(1) val query = spark .readStream .format(classOf[ThrowingIOExceptionLikeHadoop12074].getName) .load() .writeStream .format("console") .start() assert(ThrowingIOExceptionLikeHadoop12074.createSourceLatch .await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS), "ThrowingIOExceptionLikeHadoop12074.createSource wasn't called before timeout") query.stop() assert(query.exception.isEmpty) } test("handle InterruptedIOException when the streaming thread is interrupted (Hadoop 2.8+)") { // This test uses a fake source to throw the same InterruptedIOException as Hadoop 2.8+ when the // streaming thread is interrupted. We should handle it properly by not failing the query. ThrowingInterruptedIOException.createSourceLatch = new CountDownLatch(1) val query = spark .readStream .format(classOf[ThrowingInterruptedIOException].getName) .load() .writeStream .format("console") .start() assert(ThrowingInterruptedIOException.createSourceLatch .await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS), "ThrowingInterruptedIOException.createSource wasn't called before timeout") query.stop() assert(query.exception.isEmpty) } test("SPARK-19873: streaming aggregation with change in number of partitions") { val inputData = MemoryStream[(Int, Int)] val agg = inputData.toDS().groupBy("_1").count() testStream(agg, OutputMode.Complete())( AddData(inputData, (1, 0), (2, 0)), StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "2")), CheckAnswer((1, 1), (2, 1)), StopStream, AddData(inputData, (3, 0), (2, 0)), StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "5")), CheckAnswer((1, 1), (2, 2), (3, 1)), StopStream, AddData(inputData, (3, 0), (1, 0)), StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")), CheckAnswer((1, 2), (2, 2), (3, 2))) } testQuietly("recover from a Spark v2.1 checkpoint") { var inputData: MemoryStream[Int] = null var query: DataStreamWriter[Row] = null def prepareMemoryStream(): Unit = { inputData = MemoryStream[Int] inputData.addData(1, 2, 3, 4) inputData.addData(3, 4, 5, 6) inputData.addData(5, 6, 7, 8) query = inputData .toDF() .groupBy($"value") .agg(count("*")) .writeStream .outputMode("complete") .format("memory") } // Get an existing checkpoint generated by Spark v2.1. // v2.1 does not record # shuffle partitions in the offset metadata. val resourceUri = this.getClass.getResource("/structured-streaming/checkpoint-version-2.1.0").toURI val checkpointDir = new File(resourceUri) // 1 - Test if recovery from the checkpoint is successful. prepareMemoryStream() val dir1 = Utils.createTempDir().getCanonicalFile // not using withTempDir {}, makes test flaky // Copy the checkpoint to a temp dir to prevent changes to the original. // Not doing this will lead to the test passing on the first run, but fail subsequent runs. FileUtils.copyDirectory(checkpointDir, dir1) // Checkpoint data was generated by a query with 10 shuffle partitions. // In order to test reading from the checkpoint, the checkpoint must have two or more batches, // since the last batch may be rerun. withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "10") { var streamingQuery: StreamingQuery = null try { streamingQuery = query.queryName("counts").option("checkpointLocation", dir1.getCanonicalPath).start() streamingQuery.processAllAvailable() inputData.addData(9) streamingQuery.processAllAvailable() checkAnswer(spark.table("counts").toDF(), Row(1, 1L) :: Row(2, 1L) :: Row(3, 2L) :: Row(4, 2L) :: Row(5, 2L) :: Row(6, 2L) :: Row(7, 1L) :: Row(8, 1L) :: Row(9, 1L) :: Nil) } finally { if (streamingQuery ne null) { streamingQuery.stop() } } } // 2 - Check recovery with wrong num shuffle partitions prepareMemoryStream() val dir2 = Utils.createTempDir().getCanonicalFile FileUtils.copyDirectory(checkpointDir, dir2) // Since the number of partitions is greater than 10, should throw exception. withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "15") { var streamingQuery: StreamingQuery = null try { intercept[StreamingQueryException] { streamingQuery = query.queryName("badQuery").option("checkpointLocation", dir2.getCanonicalPath).start() streamingQuery.processAllAvailable() } } finally { if (streamingQuery ne null) { streamingQuery.stop() } } } } test("calling stop() on a query cancels related jobs") { val input = MemoryStream[Int] val query = input .toDS() .map { i => while (!TaskContext.get().isInterrupted()) { // keep looping till interrupted by query.stop() Thread.sleep(100) } i } .writeStream .format("console") .start() input.addData(1) // wait for jobs to start eventually(timeout(streamingTimeout)) { assert(sparkContext.statusTracker.getActiveJobIds().nonEmpty) } query.stop() // make sure jobs are stopped eventually(timeout(streamingTimeout)) { assert(sparkContext.statusTracker.getActiveJobIds().isEmpty) } } test("batch id is updated correctly in the job description") { val queryName = "memStream" @volatile var jobDescription: String = null def assertDescContainsQueryNameAnd(batch: Integer): Unit = { // wait for listener event to be processed spark.sparkContext.listenerBus.waitUntilEmpty(streamingTimeout.toMillis) assert(jobDescription.contains(queryName) && jobDescription.contains(s"batch = $batch")) } spark.sparkContext.addSparkListener(new SparkListener { override def onJobStart(jobStart: SparkListenerJobStart): Unit = { jobDescription = jobStart.properties.getProperty(SparkContext.SPARK_JOB_DESCRIPTION) } }) val input = MemoryStream[Int] val query = input .toDS() .map(_ + 1) .writeStream .format("memory") .queryName(queryName) .start() input.addData(1) query.processAllAvailable() assertDescContainsQueryNameAnd(batch = 0) input.addData(2, 3) query.processAllAvailable() assertDescContainsQueryNameAnd(batch = 1) input.addData(4) query.processAllAvailable() assertDescContainsQueryNameAnd(batch = 2) query.stop() } test("should resolve the checkpoint path") { withTempDir { dir => val checkpointLocation = dir.getCanonicalPath assert(!checkpointLocation.startsWith("file:/")) val query = MemoryStream[Int].toDF .writeStream .option("checkpointLocation", checkpointLocation) .format("console") .start() try { val resolvedCheckpointDir = query.asInstanceOf[StreamingQueryWrapper].streamingQuery.resolvedCheckpointRoot assert(resolvedCheckpointDir.startsWith("file:/")) } finally { query.stop() } } } testQuietly("specify custom state store provider") { val providerClassName = classOf[TestStateStoreProvider].getCanonicalName withSQLConf(SQLConf.STATE_STORE_PROVIDER_CLASS.key -> providerClassName) { val input = MemoryStream[Int] val df = input.toDS().groupBy().count() val query = df.writeStream.outputMode("complete").format("memory").queryName("name").start() input.addData(1, 2, 3) val e = intercept[Exception] { query.awaitTermination() } TestUtils.assertExceptionMsg(e, providerClassName) TestUtils.assertExceptionMsg(e, "instantiated") } } testQuietly("custom state store provider read from offset log") { val input = MemoryStream[Int] val df = input.toDS().groupBy().count() val providerConf1 = SQLConf.STATE_STORE_PROVIDER_CLASS.key -> "org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider" val providerConf2 = SQLConf.STATE_STORE_PROVIDER_CLASS.key -> classOf[TestStateStoreProvider].getCanonicalName def runQuery(queryName: String, checkpointLoc: String): Unit = { val query = df.writeStream .outputMode("complete") .format("memory") .queryName(queryName) .option("checkpointLocation", checkpointLoc) .start() input.addData(1, 2, 3) query.processAllAvailable() query.stop() } withTempDir { dir => val checkpointLoc1 = new File(dir, "1").getCanonicalPath withSQLConf(providerConf1) { runQuery("query1", checkpointLoc1) // generate checkpoints } val checkpointLoc2 = new File(dir, "2").getCanonicalPath withSQLConf(providerConf2) { // Verify new query will use new provider that throw error on loading intercept[Exception] { runQuery("query2", checkpointLoc2) } // Verify old query from checkpoint will still use old provider runQuery("query1", checkpointLoc1) } } } test("streaming limit without state") { val inputData1 = MemoryStream[Int] testStream(inputData1.toDF().limit(0))( AddData(inputData1, 1 to 8: _*), CheckAnswer()) val inputData2 = MemoryStream[Int] testStream(inputData2.toDF().limit(4))( AddData(inputData2, 1 to 8: _*), CheckAnswer(1 to 4: _*)) } test("streaming limit with state") { val inputData = MemoryStream[Int] testStream(inputData.toDF().limit(4))( AddData(inputData, 1 to 2: _*), CheckAnswer(1 to 2: _*), AddData(inputData, 3 to 6: _*), CheckAnswer(1 to 4: _*), AddData(inputData, 7 to 9: _*), CheckAnswer(1 to 4: _*)) } test("streaming limit with other operators") { val inputData = MemoryStream[Int] testStream(inputData.toDF().where("value % 2 = 1").limit(4))( AddData(inputData, 1 to 5: _*), CheckAnswer(1, 3, 5), AddData(inputData, 6 to 9: _*), CheckAnswer(1, 3, 5, 7), AddData(inputData, 10 to 12: _*), CheckAnswer(1, 3, 5, 7)) } test("streaming limit with multiple limits") { val inputData1 = MemoryStream[Int] testStream(inputData1.toDF().limit(4).limit(2))( AddData(inputData1, 1), CheckAnswer(1), AddData(inputData1, 2 to 8: _*), CheckAnswer(1, 2)) val inputData2 = MemoryStream[Int] testStream(inputData2.toDF().limit(4).limit(100).limit(3))( AddData(inputData2, 1, 2), CheckAnswer(1, 2), AddData(inputData2, 3 to 8: _*), CheckAnswer(1 to 3: _*)) } test("SPARK-30658: streaming limit before agg in complete mode") { val inputData = MemoryStream[Int] val limited = inputData.toDF().limit(5).groupBy("value").count() testStream(limited, OutputMode.Complete())( AddData(inputData, 1 to 3: _*), CheckAnswer(Row(1, 1), Row(2, 1), Row(3, 1)), AddData(inputData, 1 to 9: _*), CheckAnswer(Row(1, 2), Row(2, 2), Row(3, 1))) } test("SPARK-30658: streaming limits before and after agg in complete mode " + "(after limit < before limit)") { val inputData = MemoryStream[Int] val limited = inputData.toDF().limit(4).groupBy("value").count().orderBy("value").limit(3) testStream(limited, OutputMode.Complete())( StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")), AddData(inputData, 1 to 9: _*), // only 1 to 4 should be allowed to aggregate, and counts for only 1 to 3 should be output CheckAnswer(Row(1, 1), Row(2, 1), Row(3, 1)), AddData(inputData, 2 to 6: _*), // None of the new values should be allowed to aggregate, same 3 counts should be output CheckAnswer(Row(1, 1), Row(2, 1), Row(3, 1))) } test("SPARK-30658: streaming limits before and after agg in complete mode " + "(before limit < after limit)") { val inputData = MemoryStream[Int] val limited = inputData.toDF().limit(2).groupBy("value").count().orderBy("value").limit(3) testStream(limited, OutputMode.Complete())( StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")), AddData(inputData, 1 to 9: _*), CheckAnswer(Row(1, 1), Row(2, 1)), AddData(inputData, 2 to 6: _*), CheckAnswer(Row(1, 1), Row(2, 1))) } test("SPARK-30657: streaming limit after streaming dedup in append mode") { val inputData = MemoryStream[Int] val limited = inputData.toDF().dropDuplicates().limit(1) testStream(limited)( AddData(inputData, 1, 2), CheckAnswer(Row(1)), AddData(inputData, 3, 4), CheckAnswer(Row(1))) } test("streaming limit in update mode") { val inputData = MemoryStream[Int] val e = intercept[AnalysisException] { testStream(inputData.toDF().limit(5), OutputMode.Update())( AddData(inputData, 1 to 3: _*) ) } assert(e.getMessage.contains( "Limits are not supported on streaming DataFrames/Datasets in Update output mode")) } test("streaming limit in multiple partitions") { val inputData = MemoryStream[Int] testStream(inputData.toDF().repartition(2).limit(7))( AddData(inputData, 1 to 10: _*), CheckAnswerRowsByFunc( rows => assert(rows.size == 7 && rows.forall(r => r.getInt(0) <= 10)), false), AddData(inputData, 11 to 20: _*), CheckAnswerRowsByFunc( rows => assert(rows.size == 7 && rows.forall(r => r.getInt(0) <= 10)), false)) } test("streaming limit in multiple partitions by column") { val inputData = MemoryStream[(Int, Int)] val df = inputData.toDF().repartition(2, $"_2").limit(7) testStream(df)( AddData(inputData, (1, 0), (2, 0), (3, 1), (4, 1)), CheckAnswerRowsByFunc( rows => assert(rows.size == 4 && rows.forall(r => r.getInt(0) <= 4)), false), AddData(inputData, (5, 0), (6, 0), (7, 1), (8, 1)), CheckAnswerRowsByFunc( rows => assert(rows.size == 7 && rows.forall(r => r.getInt(0) <= 8)), false)) } test("SPARK-30657: streaming limit should not apply on limits on state subplans") { val streamData = MemoryStream[Int] val streamingDF = streamData.toDF().toDF("value") val staticDF = spark.createDataset(Seq(1)).toDF("value").orderBy("value") testStream(streamingDF.join(staticDF.limit(1), "value"))( AddData(streamData, 1, 2, 3), CheckAnswer(Row(1)), AddData(streamData, 1, 3, 5), CheckAnswer(Row(1), Row(1))) } test("SPARK-30657: streaming limit optimization from StreamingLocalLimitExec to LocalLimitExec") { val inputData = MemoryStream[Int] val inputDF = inputData.toDF() /** Verify whether the local limit in the plan is a streaming limit or is a simple */ def verifyLocalLimit( df: DataFrame, expectStreamingLimit: Boolean, outputMode: OutputMode = OutputMode.Append): Unit = { var execPlan: SparkPlan = null testStream(df, outputMode)( AddData(inputData, 1), AssertOnQuery { q => q.processAllAvailable() execPlan = q.lastExecution.executedPlan true } ) require(execPlan != null) val localLimits = execPlan.collect { case l: LocalLimitExec => l case l: StreamingLocalLimitExec => l } require( localLimits.size == 1, s"Cant verify local limit optimization with this plan:\\n$execPlan") if (expectStreamingLimit) { assert( localLimits.head.isInstanceOf[StreamingLocalLimitExec], s"Local limit was not StreamingLocalLimitExec:\\n$execPlan") } else { assert( localLimits.head.isInstanceOf[LocalLimitExec], s"Local limit was not LocalLimitExec:\\n$execPlan") } } // Should not be optimized, so StreamingLocalLimitExec should be present verifyLocalLimit(inputDF.dropDuplicates().limit(1), expectStreamingLimit = true) // Should be optimized from StreamingLocalLimitExec to LocalLimitExec verifyLocalLimit(inputDF.limit(1), expectStreamingLimit = false) verifyLocalLimit( inputDF.limit(1).groupBy().count(), expectStreamingLimit = false, outputMode = OutputMode.Complete()) // Should be optimized as repartition is sufficient to ensure that the iterators of // StreamingDeduplicationExec should be consumed completely by the repartition exchange. verifyLocalLimit(inputDF.dropDuplicates().repartition(1).limit(1), expectStreamingLimit = false) // Should be LocalLimitExec in the first place, not from optimization of StreamingLocalLimitExec val staticDF = spark.range(2).toDF("value").limit(1) verifyLocalLimit(inputDF.toDF("value").join(staticDF, "value"), expectStreamingLimit = false) verifyLocalLimit( inputDF.groupBy("value").count().limit(1), expectStreamingLimit = false, outputMode = OutputMode.Complete()) } test("is_continuous_processing property should be false for microbatch processing") { val input = MemoryStream[Int] val df = input.toDS() .map(i => TaskContext.get().getLocalProperty(StreamExecution.IS_CONTINUOUS_PROCESSING)) testStream(df) ( AddData(input, 1), CheckAnswer("false") ) } test("is_continuous_processing property should be true for continuous processing") { val input = ContinuousMemoryStream[Int] val stream = input.toDS() .map(i => TaskContext.get().getLocalProperty(StreamExecution.IS_CONTINUOUS_PROCESSING)) .writeStream.format("memory") .queryName("output") .trigger(Trigger.Continuous("1 seconds")) .start() try { input.addData(1) stream.processAllAvailable() } finally { stream.stop() } checkAnswer(spark.sql("select * from output"), Row("true")) } for (e <- Seq( new InterruptedException, new InterruptedIOException, new ClosedByInterruptException, new UncheckedIOException("test", new ClosedByInterruptException), new ExecutionException("test", new InterruptedException), new UncheckedExecutionException("test", new InterruptedException))) { test(s"view ${e.getClass.getSimpleName} as a normal query stop") { ThrowingExceptionInCreateSource.createSourceLatch = new CountDownLatch(1) ThrowingExceptionInCreateSource.exception = e val query = spark .readStream .format(classOf[ThrowingExceptionInCreateSource].getName) .load() .writeStream .format("console") .start() assert(ThrowingExceptionInCreateSource.createSourceLatch .await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS), "ThrowingExceptionInCreateSource.createSource wasn't called before timeout") query.stop() assert(query.exception.isEmpty) } } test("SPARK-26379 Structured Streaming - Exception on adding current_timestamp " + " to Dataset - use v2 sink") { testCurrentTimestampOnStreamingQuery() } test("SPARK-26379 Structured Streaming - Exception on adding current_timestamp " + " to Dataset - use v1 sink") { testCurrentTimestampOnStreamingQuery() } private def testCurrentTimestampOnStreamingQuery(): Unit = { val input = MemoryStream[Int] val df = input.toDS().withColumn("cur_timestamp", lit(current_timestamp())) def assertBatchOutputAndUpdateLastTimestamp( rows: Seq[Row], curTimestamp: Long, curDate: Int, expectedValue: Int): Long = { assert(rows.size === 1) val row = rows.head assert(row.getInt(0) === expectedValue) assert(row.getTimestamp(1).getTime >= curTimestamp) row.getTimestamp(1).getTime } var lastTimestamp = System.currentTimeMillis() val currentDate = DateTimeUtils.microsToDays( DateTimeUtils.millisToMicros(lastTimestamp), ZoneId.systemDefault) testStream(df) ( AddData(input, 1), CheckLastBatch { rows: Seq[Row] => lastTimestamp = assertBatchOutputAndUpdateLastTimestamp(rows, lastTimestamp, currentDate, 1) }, Execute { _ => Thread.sleep(1000) }, AddData(input, 2), CheckLastBatch { rows: Seq[Row] => lastTimestamp = assertBatchOutputAndUpdateLastTimestamp(rows, lastTimestamp, currentDate, 2) } ) } // ProcessingTime trigger generates MicroBatchExecution, and ContinuousTrigger starts a // ContinuousExecution Seq(Trigger.ProcessingTime("1 second"), Trigger.Continuous("1 second")).foreach { trigger => test(s"SPARK-30143: stop waits until timeout if blocked - trigger: $trigger") { BlockOnStopSourceProvider.enableBlocking() val sq = spark.readStream.format(classOf[BlockOnStopSourceProvider].getName) .load() .writeStream .format("console") .trigger(trigger) .start() failAfter(60.seconds) { val startTime = System.nanoTime() withSQLConf(SQLConf.STREAMING_STOP_TIMEOUT.key -> "2000") { val ex = intercept[TimeoutException] { sq.stop() } assert(ex.getMessage.contains(sq.id.toString)) } val duration = (System.nanoTime() - startTime) / 1e6 assert(duration >= 2000, s"Should have waited more than 2000 millis, but waited $duration millis") BlockOnStopSourceProvider.disableBlocking() withSQLConf(SQLConf.STREAMING_STOP_TIMEOUT.key -> "0") { sq.stop() } } } } } abstract class FakeSource extends StreamSourceProvider { private val fakeSchema = StructType(StructField("a", LongType) :: Nil) override def sourceSchema( spark: SQLContext, schema: Option[StructType], providerName: String, parameters: Map[String, String]): (String, StructType) = ("fakeSource", fakeSchema) } /** A fake StreamSourceProvider that creates a fake Source that cannot be reused. */ class FakeDefaultSource extends FakeSource { override def createSource( spark: SQLContext, metadataPath: String, schema: Option[StructType], providerName: String, parameters: Map[String, String]): Source = { // Create a fake Source that emits 0 to 10. new Source { private var offset = -1L override def schema: StructType = StructType(StructField("a", LongType) :: Nil) override def getOffset: Option[Offset] = { if (offset >= 10) { None } else { offset += 1 Some(LongOffset(offset)) } } override def getBatch(start: Option[Offset], end: Offset): DataFrame = { val startOffset = start.map(_.asInstanceOf[LongOffset].offset).getOrElse(-1L) + 1 val ds = new Dataset[java.lang.Long]( spark.sparkSession, Range( startOffset, end.asInstanceOf[LongOffset].offset + 1, 1, Some(spark.sparkSession.sparkContext.defaultParallelism), isStreaming = true), Encoders.LONG) ds.toDF("a") } override def stop(): Unit = {} } } } /** A fake source that throws the same IOException like pre Hadoop 2.8 when it's interrupted. */ class ThrowingIOExceptionLikeHadoop12074 extends FakeSource { import ThrowingIOExceptionLikeHadoop12074._ override def createSource( spark: SQLContext, metadataPath: String, schema: Option[StructType], providerName: String, parameters: Map[String, String]): Source = { createSourceLatch.countDown() try { Thread.sleep(30000) throw new TimeoutException("sleep was not interrupted in 30 seconds") } catch { case ie: InterruptedException => throw new IOException(ie.toString) } } } object ThrowingIOExceptionLikeHadoop12074 { /** * A latch to allow the user to wait until `ThrowingIOExceptionLikeHadoop12074.createSource` is * called. */ @volatile var createSourceLatch: CountDownLatch = null } /** A fake source that throws InterruptedIOException like Hadoop 2.8+ when it's interrupted. */ class ThrowingInterruptedIOException extends FakeSource { import ThrowingInterruptedIOException._ override def createSource( spark: SQLContext, metadataPath: String, schema: Option[StructType], providerName: String, parameters: Map[String, String]): Source = { createSourceLatch.countDown() try { Thread.sleep(30000) throw new TimeoutException("sleep was not interrupted in 30 seconds") } catch { case ie: InterruptedException => val iie = new InterruptedIOException(ie.toString) iie.initCause(ie) throw iie } } } object ThrowingInterruptedIOException { /** * A latch to allow the user to wait until `ThrowingInterruptedIOException.createSource` is * called. */ @volatile var createSourceLatch: CountDownLatch = null } class TestStateStoreProvider extends StateStoreProvider { override def init( stateStoreId: StateStoreId, keySchema: StructType, valueSchema: StructType, indexOrdinal: Option[Int], storeConfs: StateStoreConf, hadoopConf: Configuration): Unit = { throw new Exception("Successfully instantiated") } override def stateStoreId: StateStoreId = null override def close(): Unit = { } override def getStore(version: Long): StateStore = null } /** A fake source that throws `ThrowingExceptionInCreateSource.exception` in `createSource` */ class ThrowingExceptionInCreateSource extends FakeSource { override def createSource( spark: SQLContext, metadataPath: String, schema: Option[StructType], providerName: String, parameters: Map[String, String]): Source = { ThrowingExceptionInCreateSource.createSourceLatch.countDown() try { Thread.sleep(30000) throw new TimeoutException("sleep was not interrupted in 30 seconds") } catch { case _: InterruptedException => throw ThrowingExceptionInCreateSource.exception } } } object ThrowingExceptionInCreateSource { /** * A latch to allow the user to wait until `ThrowingExceptionInCreateSource.createSource` is * called. */ @volatile var createSourceLatch: CountDownLatch = null @volatile var exception: Exception = null }
witgo/spark
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
Scala
apache-2.0
51,538
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU Affero GPL v3, the copyright holders add the following * Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3 * licence, when you create a Related Module, this Related Module is * not considered as a part of the work and may be distributed under the * license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>. * ************************************************************************************* */ package com.normation.rudder.services.queries import com.normation.inventory.domain.NodeId import com.normation.rudder.domain.nodes.{NodeGroup,NodeGroupId} import com.unboundid.ldap.sdk.{DN,Filter} import com.normation.ldap.sdk._ import BuildFilter._ import com.normation.rudder.domain.{RudderDit,RudderLDAPConstants} import com.normation.inventory.ldap.core.LDAPConstants.{A_OC, A_NAME} import RudderLDAPConstants._ import com.normation.utils.Control.sequence import com.normation.inventory.ldap.core.LDAPConstants import com.normation.rudder.repository.ldap.LDAPEntityMapper import net.liftweb.common._ import com.normation.rudder.repository.WoNodeGroupRepository import com.normation.rudder.repository.RoNodeGroupRepository import com.normation.eventlog.EventActor import com.normation.utils.HashcodeCaching import com.normation.eventlog.ModificationId import scala.util.matching.Regex import com.normation.rudder.domain.queries._ /** * A container for a dynamic group update. * members are the list of members post-update, * removed/added members are compared with the * state pre-update. */ case class DynGroupDiff( members:Seq[NodeId], removed:Seq[NodeId], added:Seq[NodeId] ) extends HashcodeCaching trait DynGroupUpdaterService { /** * Update the given dynamic group, returning the diff * from the pre-update. * * IMPORTANT NOTE: system group are not updated with * that service ! * * @return */ def update(dynGroupId:NodeGroupId, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[DynGroupDiff] } class DynGroupUpdaterServiceImpl( roNodeGroupRepository: RoNodeGroupRepository, woNodeGroupRepository: WoNodeGroupRepository, queryProcessor : QueryProcessor ) extends DynGroupUpdaterService with Loggable { override def update(dynGroupId:NodeGroupId, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[DynGroupDiff] = { for { (group,_) <- roNodeGroupRepository.getNodeGroup(dynGroupId) isDynamic <- if(group.isDynamic) Full("OK") else Failure("Can not update a not dynamic group") query <- group.query newMembers <- queryProcessor.process(query) ?~! s"Error when processing request for updating dynamic group with id ${dynGroupId.value}" //save newMemberIdsSet = newMembers.map( _.id).toSet savedGroup <- { val newGroup = group.copy(serverList = newMemberIdsSet) if(group.isSystem) { woNodeGroupRepository.updateSystemGroup(newGroup, modId, actor, reason) } else { woNodeGroupRepository.update(newGroup, modId, actor, reason) } } ?~! s"Error when saving update for dynamic group '${group.name}' (${group.id.value})" } yield { val plus = newMemberIdsSet -- group.serverList val minus = group.serverList -- newMemberIdsSet DynGroupDiff(newMemberIdsSet.toSeq, minus.toSeq, plus.toSeq) } } }
Kegeruneku/rudder
rudder-core/src/main/scala/com/normation/rudder/services/queries/DynGroupUpdaterService.scala
Scala
agpl-3.0
4,705
package org.fuckboi import org.scalatest._ abstract class FuckboiGeneratorTest extends FlatSpec with Matchers { val fuckboiGenerator = new FuckboiGenerator val byteCodeExecutor = new ByteCodeExecutor var className = "Hello" def getOutput(fuckboiCode: String): String = { val (bytecode, root) = fuckboiGenerator.generate(fuckboiCode, className) byteCodeExecutor.getOutput(bytecode, className) } }
VirenMohindra/Fuckboi
src/test/scala/org/fuckboi/FuckboiGeneratorTest.scala
Scala
mit
419
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources import java.util.{Locale, ServiceConfigurationError, ServiceLoader} import scala.collection.JavaConverters._ import scala.util.{Failure, Success, Try} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.spark.SparkException import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogUtils} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, TypeUtils} import org.apache.spark.sql.connector.catalog.TableProvider import org.apache.spark.sql.errors.{QueryCompilationErrors, QueryExecutionErrors} import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.command.DataWritingCommand import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider import org.apache.spark.sql.execution.datasources.json.JsonFileFormat import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat import org.apache.spark.sql.execution.datasources.v2.FileDataSourceV2 import org.apache.spark.sql.execution.datasources.v2.orc.OrcDataSourceV2 import org.apache.spark.sql.execution.metric.SQLMetric import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.sources.{RateStreamProvider, TextSocketSourceProvider} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.sources._ import org.apache.spark.sql.streaming.OutputMode import org.apache.spark.sql.types.{DataType, StructField, StructType} import org.apache.spark.sql.util.SchemaUtils import org.apache.spark.util.{HadoopFSUtils, ThreadUtils, Utils} /** * The main class responsible for representing a pluggable Data Source in Spark SQL. In addition to * acting as the canonical set of parameters that can describe a Data Source, this class is used to * resolve a description to a concrete implementation that can be used in a query plan * (either batch or streaming) or to write out data using an external library. * * From an end user's perspective a DataSource description can be created explicitly using * [[org.apache.spark.sql.DataFrameReader]] or CREATE TABLE USING DDL. Additionally, this class is * used when resolving a description from a metastore to a concrete implementation. * * Many of the arguments to this class are optional, though depending on the specific API being used * these optional arguments might be filled in during resolution using either inference or external * metadata. For example, when reading a partitioned table from a file system, partition columns * will be inferred from the directory layout even if they are not specified. * * @param paths A list of file system paths that hold data. These will be globbed before if * the "__globPaths__" option is true, and will be qualified. This option only works * when reading from a [[FileFormat]]. * @param userSpecifiedSchema An optional specification of the schema of the data. When present * we skip attempting to infer the schema. * @param partitionColumns A list of column names that the relation is partitioned by. This list is * generally empty during the read path, unless this DataSource is managed * by Hive. In these cases, during `resolveRelation`, we will call * `getOrInferFileFormatSchema` for file based DataSources to infer the * partitioning. In other cases, if this list is empty, then this table * is unpartitioned. * @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data. * @param catalogTable Optional catalog table reference that can be used to push down operations * over the datasource to the catalog service. */ case class DataSource( sparkSession: SparkSession, className: String, paths: Seq[String] = Nil, userSpecifiedSchema: Option[StructType] = None, partitionColumns: Seq[String] = Seq.empty, bucketSpec: Option[BucketSpec] = None, options: Map[String, String] = Map.empty, catalogTable: Option[CatalogTable] = None) extends Logging { case class SourceInfo(name: String, schema: StructType, partitionColumns: Seq[String]) lazy val providingClass: Class[_] = { val cls = DataSource.lookupDataSource(className, sparkSession.sessionState.conf) // `providingClass` is used for resolving data source relation for catalog tables. // As now catalog for data source V2 is under development, here we fall back all the // [[FileDataSourceV2]] to [[FileFormat]] to guarantee the current catalog works. // [[FileDataSourceV2]] will still be used if we call the load()/save() method in // [[DataFrameReader]]/[[DataFrameWriter]], since they use method `lookupDataSource` // instead of `providingClass`. cls.newInstance() match { case f: FileDataSourceV2 => f.fallbackFileFormat case _ => cls } } private def providingInstance() = providingClass.getConstructor().newInstance() private def newHadoopConfiguration(): Configuration = sparkSession.sessionState.newHadoopConfWithOptions(options) lazy val sourceInfo: SourceInfo = sourceSchema() private val caseInsensitiveOptions = CaseInsensitiveMap(options) private val equality = sparkSession.sessionState.conf.resolver /** * Whether or not paths should be globbed before being used to access files. */ def globPaths: Boolean = { options.get(DataSource.GLOB_PATHS_KEY) .map(_ == "true") .getOrElse(true) } bucketSpec.map { bucket => SchemaUtils.checkColumnNameDuplication( bucket.bucketColumnNames, "in the bucket definition", equality) SchemaUtils.checkColumnNameDuplication( bucket.sortColumnNames, "in the sort definition", equality) } /** * Get the schema of the given FileFormat, if provided by `userSpecifiedSchema`, or try to infer * it. In the read path, only managed tables by Hive provide the partition columns properly when * initializing this class. All other file based data sources will try to infer the partitioning, * and then cast the inferred types to user specified dataTypes if the partition columns exist * inside `userSpecifiedSchema`, otherwise we can hit data corruption bugs like SPARK-18510. * This method will try to skip file scanning whether `userSpecifiedSchema` and * `partitionColumns` are provided. Here are some code paths that use this method: * 1. `spark.read` (no schema): Most amount of work. Infer both schema and partitioning columns * 2. `spark.read.schema(userSpecifiedSchema)`: Parse partitioning columns, cast them to the * dataTypes provided in `userSpecifiedSchema` if they exist or fallback to inferred * dataType if they don't. * 3. `spark.readStream.schema(userSpecifiedSchema)`: For streaming use cases, users have to * provide the schema. Here, we also perform partition inference like 2, and try to use * dataTypes in `userSpecifiedSchema`. All subsequent triggers for this stream will re-use * this information, therefore calls to this method should be very cheap, i.e. there won't * be any further inference in any triggers. * * @param format the file format object for this DataSource * @param getFileIndex [[InMemoryFileIndex]] for getting partition schema and file list * @return A pair of the data schema (excluding partition columns) and the schema of the partition * columns. */ private def getOrInferFileFormatSchema( format: FileFormat, getFileIndex: () => InMemoryFileIndex): (StructType, StructType) = { lazy val tempFileIndex = getFileIndex() val partitionSchema = if (partitionColumns.isEmpty) { // Try to infer partitioning, because no DataSource in the read path provides the partitioning // columns properly unless it is a Hive DataSource tempFileIndex.partitionSchema } else { // maintain old behavior before SPARK-18510. If userSpecifiedSchema is empty used inferred // partitioning if (userSpecifiedSchema.isEmpty) { val inferredPartitions = tempFileIndex.partitionSchema inferredPartitions } else { val partitionFields = partitionColumns.map { partitionColumn => userSpecifiedSchema.flatMap(_.find(c => equality(c.name, partitionColumn))).orElse { val inferredPartitions = tempFileIndex.partitionSchema val inferredOpt = inferredPartitions.find(p => equality(p.name, partitionColumn)) if (inferredOpt.isDefined) { logDebug( s"""Type of partition column: $partitionColumn not found in specified schema |for $format. |User Specified Schema |===================== |${userSpecifiedSchema.orNull} | |Falling back to inferred dataType if it exists. """.stripMargin) } inferredOpt }.getOrElse { throw QueryCompilationErrors.partitionColumnNotSpecifiedError( format.toString, partitionColumn) } } StructType(partitionFields) } } val dataSchema = userSpecifiedSchema.map { schema => StructType(schema.filterNot(f => partitionSchema.exists(p => equality(p.name, f.name)))) }.orElse { // Remove "path" option so that it is not added to the paths returned by // `tempFileIndex.allFiles()`. format.inferSchema( sparkSession, caseInsensitiveOptions - "path", tempFileIndex.allFiles()) }.getOrElse { throw QueryCompilationErrors.dataSchemaNotSpecifiedError(format.toString) } // We just print a warning message if the data schema and partition schema have the duplicate // columns. This is because we allow users to do so in the previous Spark releases and // we have the existing tests for the cases (e.g., `ParquetHadoopFsRelationSuite`). // See SPARK-18108 and SPARK-21144 for related discussions. try { SchemaUtils.checkColumnNameDuplication( (dataSchema ++ partitionSchema).map(_.name), "in the data schema and the partition schema", equality) } catch { case e: AnalysisException => logWarning(e.getMessage) } (dataSchema, partitionSchema) } /** Returns the name and schema of the source that can be used to continually read data. */ private def sourceSchema(): SourceInfo = { providingInstance() match { case s: StreamSourceProvider => val (name, schema) = s.sourceSchema( sparkSession.sqlContext, userSpecifiedSchema, className, caseInsensitiveOptions) SourceInfo(name, schema, Nil) case format: FileFormat => val path = caseInsensitiveOptions.getOrElse("path", { throw QueryExecutionErrors.dataPathNotSpecifiedError() }) // Check whether the path exists if it is not a glob pattern. // For glob pattern, we do not check it because the glob pattern might only make sense // once the streaming job starts and some upstream source starts dropping data. val hdfsPath = new Path(path) if (!globPaths || !SparkHadoopUtil.get.isGlobPath(hdfsPath)) { val fs = hdfsPath.getFileSystem(newHadoopConfiguration()) if (!fs.exists(hdfsPath)) { throw QueryCompilationErrors.dataPathNotExistError(path) } } val isSchemaInferenceEnabled = sparkSession.sessionState.conf.streamingSchemaInference val isTextSource = providingClass == classOf[text.TextFileFormat] // If the schema inference is disabled, only text sources require schema to be specified if (!isSchemaInferenceEnabled && !isTextSource && userSpecifiedSchema.isEmpty) { throw QueryExecutionErrors.createStreamingSourceNotSpecifySchemaError() } val (dataSchema, partitionSchema) = getOrInferFileFormatSchema(format, () => { // The operations below are expensive therefore try not to do them if we don't need to, // e.g., in streaming mode, we have already inferred and registered partition columns, // we will never have to materialize the lazy val below val globbedPaths = checkAndGlobPathIfNecessary(checkEmptyGlobPath = false, checkFilesExist = false) createInMemoryFileIndex(globbedPaths) }) val forceNullable = sparkSession.sessionState.conf.getConf(SQLConf.FILE_SOURCE_SCHEMA_FORCE_NULLABLE) val sourceDataSchema = if (forceNullable) dataSchema.asNullable else dataSchema SourceInfo( s"FileSource[$path]", StructType(sourceDataSchema ++ partitionSchema), partitionSchema.fieldNames) case _ => throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError( className, "reading") } } /** Returns a source that can be used to continually read data. */ def createSource(metadataPath: String): Source = { providingInstance() match { case s: StreamSourceProvider => s.createSource( sparkSession.sqlContext, metadataPath, userSpecifiedSchema, className, caseInsensitiveOptions) case format: FileFormat => val path = caseInsensitiveOptions.getOrElse("path", { throw QueryExecutionErrors.dataPathNotSpecifiedError() }) new FileStreamSource( sparkSession = sparkSession, path = path, fileFormatClassName = className, schema = sourceInfo.schema, partitionColumns = sourceInfo.partitionColumns, metadataPath = metadataPath, options = caseInsensitiveOptions) case _ => throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError( className, "reading") } } /** Returns a sink that can be used to continually write data. */ def createSink(outputMode: OutputMode): Sink = { providingInstance() match { case s: StreamSinkProvider => s.createSink(sparkSession.sqlContext, caseInsensitiveOptions, partitionColumns, outputMode) case fileFormat: FileFormat => val path = caseInsensitiveOptions.getOrElse("path", { throw QueryExecutionErrors.dataPathNotSpecifiedError() }) if (outputMode != OutputMode.Append) { throw QueryCompilationErrors.dataSourceOutputModeUnsupportedError(className, outputMode) } new FileStreamSink(sparkSession, path, fileFormat, partitionColumns, caseInsensitiveOptions) case _ => throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError( className, "writing") } } /** * Create a resolved [[BaseRelation]] that can be used to read data from or write data into this * [[DataSource]] * * @param checkFilesExist Whether to confirm that the files exist when generating the * non-streaming file based datasource. StructuredStreaming jobs already * list file existence, and when generating incremental jobs, the batch * is considered as a non-streaming file based data source. Since we know * that files already exist, we don't need to check them again. */ def resolveRelation(checkFilesExist: Boolean = true): BaseRelation = { val relation = (providingInstance(), userSpecifiedSchema) match { // TODO: Throw when too much is given. case (dataSource: SchemaRelationProvider, Some(schema)) => dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions, schema) case (dataSource: RelationProvider, None) => dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions) case (_: SchemaRelationProvider, None) => throw QueryCompilationErrors.schemaNotSpecifiedForSchemaRelationProviderError(className) case (dataSource: RelationProvider, Some(schema)) => val baseRelation = dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions) if (baseRelation.schema != schema) { throw QueryCompilationErrors.userSpecifiedSchemaMismatchActualSchemaError( schema, baseRelation.schema) } baseRelation // We are reading from the results of a streaming query. Load files from the metadata log // instead of listing them using HDFS APIs. Note that the config // `spark.sql.streaming.fileStreamSink.metadata.ignored` can be enabled to ignore the // metadata log. case (format: FileFormat, _) if FileStreamSink.hasMetadata( caseInsensitiveOptions.get("path").toSeq ++ paths, newHadoopConfiguration(), sparkSession.sessionState.conf) => val basePath = new Path((caseInsensitiveOptions.get("path").toSeq ++ paths).head) val fileCatalog = new MetadataLogFileIndex(sparkSession, basePath, caseInsensitiveOptions, userSpecifiedSchema) val dataSchema = userSpecifiedSchema.orElse { // Remove "path" option so that it is not added to the paths returned by // `fileCatalog.allFiles()`. format.inferSchema( sparkSession, caseInsensitiveOptions - "path", fileCatalog.allFiles()) }.getOrElse { throw QueryCompilationErrors.dataSchemaNotSpecifiedError( format.toString, fileCatalog.allFiles().mkString(",")) } HadoopFsRelation( fileCatalog, partitionSchema = fileCatalog.partitionSchema, dataSchema = dataSchema, bucketSpec = None, format, caseInsensitiveOptions)(sparkSession) // This is a non-streaming file based datasource. case (format: FileFormat, _) => val useCatalogFileIndex = sparkSession.sqlContext.conf.manageFilesourcePartitions && catalogTable.isDefined && catalogTable.get.tracksPartitionsInCatalog && catalogTable.get.partitionColumnNames.nonEmpty val (fileCatalog, dataSchema, partitionSchema) = if (useCatalogFileIndex) { val defaultTableSize = sparkSession.sessionState.conf.defaultSizeInBytes val index = new CatalogFileIndex( sparkSession, catalogTable.get, catalogTable.get.stats.map(_.sizeInBytes.toLong).getOrElse(defaultTableSize)) (index, catalogTable.get.dataSchema, catalogTable.get.partitionSchema) } else { val globbedPaths = checkAndGlobPathIfNecessary( checkEmptyGlobPath = true, checkFilesExist = checkFilesExist) val index = createInMemoryFileIndex(globbedPaths) val (resultDataSchema, resultPartitionSchema) = getOrInferFileFormatSchema(format, () => index) (index, resultDataSchema, resultPartitionSchema) } HadoopFsRelation( fileCatalog, partitionSchema = partitionSchema, dataSchema = dataSchema.asNullable, bucketSpec = bucketSpec, format, caseInsensitiveOptions)(sparkSession) case _ => throw QueryCompilationErrors.invalidDataSourceError(className) } relation match { case hs: HadoopFsRelation => SchemaUtils.checkSchemaColumnNameDuplication( hs.dataSchema, "in the data schema", equality) SchemaUtils.checkSchemaColumnNameDuplication( hs.partitionSchema, "in the partition schema", equality) DataSourceUtils.verifySchema(hs.fileFormat, hs.dataSchema) case _ => SchemaUtils.checkSchemaColumnNameDuplication( relation.schema, "in the data schema", equality) } relation } /** * Creates a command node to write the given [[LogicalPlan]] out to the given [[FileFormat]]. * The returned command is unresolved and need to be analyzed. */ private def planForWritingFileFormat( format: FileFormat, mode: SaveMode, data: LogicalPlan): InsertIntoHadoopFsRelationCommand = { // Don't glob path for the write path. The contracts here are: // 1. Only one output path can be specified on the write path; // 2. Output path must be a legal HDFS style file system path; // 3. It's OK that the output path doesn't exist yet; val allPaths = paths ++ caseInsensitiveOptions.get("path") val outputPath = if (allPaths.length == 1) { val path = new Path(allPaths.head) val fs = path.getFileSystem(newHadoopConfiguration()) path.makeQualified(fs.getUri, fs.getWorkingDirectory) } else { throw QueryExecutionErrors.multiplePathsSpecifiedError(allPaths) } val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis PartitioningUtils.validatePartitionColumn(data.schema, partitionColumns, caseSensitive) val fileIndex = catalogTable.map(_.identifier).map { tableIdent => sparkSession.table(tableIdent).queryExecution.analyzed.collect { case LogicalRelation(t: HadoopFsRelation, _, _, _) => t.location }.head } // For partitioned relation r, r.schema's column ordering can be different from the column // ordering of data.logicalPlan (partition columns are all moved after data column). This // will be adjusted within InsertIntoHadoopFsRelation. InsertIntoHadoopFsRelationCommand( outputPath = outputPath, staticPartitions = Map.empty, ifPartitionNotExists = false, partitionColumns = partitionColumns.map(UnresolvedAttribute.quoted), bucketSpec = bucketSpec, fileFormat = format, options = options, query = data, mode = mode, catalogTable = catalogTable, fileIndex = fileIndex, outputColumnNames = data.output.map(_.name)) } /** * Writes the given [[LogicalPlan]] out to this [[DataSource]] and returns a [[BaseRelation]] for * the following reading. * * @param mode The save mode for this writing. * @param data The input query plan that produces the data to be written. Note that this plan * is analyzed and optimized. * @param outputColumnNames The original output column names of the input query plan. The * optimizer may not preserve the output column's names' case, so we need * this parameter instead of `data.output`. * @param physicalPlan The physical plan of the input query plan. We should run the writing * command with this physical plan instead of creating a new physical plan, * so that the metrics can be correctly linked to the given physical plan and * shown in the web UI. */ def writeAndRead( mode: SaveMode, data: LogicalPlan, outputColumnNames: Seq[String], physicalPlan: SparkPlan, metrics: Map[String, SQLMetric]): BaseRelation = { val outputColumns = DataWritingCommand.logicalPlanOutputWithNames(data, outputColumnNames) providingInstance() match { case dataSource: CreatableRelationProvider => disallowWritingIntervals(outputColumns.map(_.dataType), forbidAnsiIntervals = true) dataSource.createRelation( sparkSession.sqlContext, mode, caseInsensitiveOptions, Dataset.ofRows(sparkSession, data)) case format: FileFormat => disallowWritingIntervals(outputColumns.map(_.dataType), forbidAnsiIntervals = false) val cmd = planForWritingFileFormat(format, mode, data) val resolvedPartCols = cmd.partitionColumns.map { col => // The partition columns created in `planForWritingFileFormat` should always be // `UnresolvedAttribute` with a single name part. assert(col.isInstanceOf[UnresolvedAttribute]) val unresolved = col.asInstanceOf[UnresolvedAttribute] assert(unresolved.nameParts.length == 1) val name = unresolved.nameParts.head outputColumns.find(a => equality(a.name, name)).getOrElse { throw QueryCompilationErrors.cannotResolveAttributeError( name, data.output.map(_.name).mkString(", ")) } } val resolved = cmd.copy( partitionColumns = resolvedPartCols, outputColumnNames = outputColumnNames) resolved.run(sparkSession, physicalPlan) DataWritingCommand.propogateMetrics(sparkSession.sparkContext, resolved, metrics) // Replace the schema with that of the DataFrame we just wrote out to avoid re-inferring copy(userSpecifiedSchema = Some(outputColumns.toStructType.asNullable)).resolveRelation() case _ => sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.") } } /** * Returns a logical plan to write the given [[LogicalPlan]] out to this [[DataSource]]. */ def planForWriting(mode: SaveMode, data: LogicalPlan): LogicalPlan = { providingInstance() match { case dataSource: CreatableRelationProvider => disallowWritingIntervals(data.schema.map(_.dataType), forbidAnsiIntervals = true) SaveIntoDataSourceCommand(data, dataSource, caseInsensitiveOptions, mode) case format: FileFormat => disallowWritingIntervals(data.schema.map(_.dataType), forbidAnsiIntervals = false) DataSource.validateSchema(data.schema) planForWritingFileFormat(format, mode, data) case _ => sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.") } } /** Returns an [[InMemoryFileIndex]] that can be used to get partition schema and file list. */ private def createInMemoryFileIndex(globbedPaths: Seq[Path]): InMemoryFileIndex = { val fileStatusCache = FileStatusCache.getOrCreate(sparkSession) new InMemoryFileIndex( sparkSession, globbedPaths, options, userSpecifiedSchema, fileStatusCache) } /** * Checks and returns files in all the paths. */ private def checkAndGlobPathIfNecessary( checkEmptyGlobPath: Boolean, checkFilesExist: Boolean): Seq[Path] = { val allPaths = caseInsensitiveOptions.get("path") ++ paths DataSource.checkAndGlobPathIfNecessary(allPaths.toSeq, newHadoopConfiguration(), checkEmptyGlobPath, checkFilesExist, enableGlobbing = globPaths) } private def disallowWritingIntervals( dataTypes: Seq[DataType], forbidAnsiIntervals: Boolean): Unit = { dataTypes.foreach( TypeUtils.invokeOnceForInterval(_, forbidAnsiIntervals) { throw QueryCompilationErrors.cannotSaveIntervalIntoExternalStorageError() }) } } object DataSource extends Logging { /** A map to maintain backward compatibility in case we move data sources around. */ private val backwardCompatibilityMap: Map[String, String] = { val jdbc = classOf[JdbcRelationProvider].getCanonicalName val json = classOf[JsonFileFormat].getCanonicalName val parquet = classOf[ParquetFileFormat].getCanonicalName val csv = classOf[CSVFileFormat].getCanonicalName val libsvm = "org.apache.spark.ml.source.libsvm.LibSVMFileFormat" val orc = "org.apache.spark.sql.hive.orc.OrcFileFormat" val nativeOrc = classOf[OrcFileFormat].getCanonicalName val socket = classOf[TextSocketSourceProvider].getCanonicalName val rate = classOf[RateStreamProvider].getCanonicalName Map( "org.apache.spark.sql.jdbc" -> jdbc, "org.apache.spark.sql.jdbc.DefaultSource" -> jdbc, "org.apache.spark.sql.execution.datasources.jdbc.DefaultSource" -> jdbc, "org.apache.spark.sql.execution.datasources.jdbc" -> jdbc, "org.apache.spark.sql.json" -> json, "org.apache.spark.sql.json.DefaultSource" -> json, "org.apache.spark.sql.execution.datasources.json" -> json, "org.apache.spark.sql.execution.datasources.json.DefaultSource" -> json, "org.apache.spark.sql.parquet" -> parquet, "org.apache.spark.sql.parquet.DefaultSource" -> parquet, "org.apache.spark.sql.execution.datasources.parquet" -> parquet, "org.apache.spark.sql.execution.datasources.parquet.DefaultSource" -> parquet, "org.apache.spark.sql.hive.orc.DefaultSource" -> orc, "org.apache.spark.sql.hive.orc" -> orc, "org.apache.spark.sql.execution.datasources.orc.DefaultSource" -> nativeOrc, "org.apache.spark.sql.execution.datasources.orc" -> nativeOrc, "org.apache.spark.ml.source.libsvm.DefaultSource" -> libsvm, "org.apache.spark.ml.source.libsvm" -> libsvm, "com.databricks.spark.csv" -> csv, "org.apache.spark.sql.execution.streaming.TextSocketSourceProvider" -> socket, "org.apache.spark.sql.execution.streaming.RateSourceProvider" -> rate ) } /** * Class that were removed in Spark 2.0. Used to detect incompatibility libraries for Spark 2.0. */ private val spark2RemovedClasses = Set( "org.apache.spark.sql.DataFrame", "org.apache.spark.sql.sources.HadoopFsRelationProvider", "org.apache.spark.Logging") /** Given a provider name, look up the data source class definition. */ def lookupDataSource(provider: String, conf: SQLConf): Class[_] = { val provider1 = backwardCompatibilityMap.getOrElse(provider, provider) match { case name if name.equalsIgnoreCase("orc") && conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "native" => classOf[OrcDataSourceV2].getCanonicalName case name if name.equalsIgnoreCase("orc") && conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "hive" => "org.apache.spark.sql.hive.orc.OrcFileFormat" case "com.databricks.spark.avro" if conf.replaceDatabricksSparkAvroEnabled => "org.apache.spark.sql.avro.AvroFileFormat" case name => name } val provider2 = s"$provider1.DefaultSource" val loader = Utils.getContextOrSparkClassLoader val serviceLoader = ServiceLoader.load(classOf[DataSourceRegister], loader) try { serviceLoader.asScala.filter(_.shortName().equalsIgnoreCase(provider1)).toList match { // the provider format did not match any given registered aliases case Nil => try { Try(loader.loadClass(provider1)).orElse(Try(loader.loadClass(provider2))) match { case Success(dataSource) => // Found the data source using fully qualified path dataSource case Failure(error) => if (provider1.startsWith("org.apache.spark.sql.hive.orc")) { throw QueryCompilationErrors.orcNotUsedWithHiveEnabledError() } else if (provider1.toLowerCase(Locale.ROOT) == "avro" || provider1 == "com.databricks.spark.avro" || provider1 == "org.apache.spark.sql.avro") { throw QueryCompilationErrors.failedToFindAvroDataSourceError(provider1) } else if (provider1.toLowerCase(Locale.ROOT) == "kafka") { throw QueryCompilationErrors.failedToFindKafkaDataSourceError(provider1) } else { throw QueryExecutionErrors.failedToFindDataSourceError(provider1, error) } } } catch { case e: NoClassDefFoundError => // This one won't be caught by Scala NonFatal // NoClassDefFoundError's class name uses "/" rather than "." for packages val className = e.getMessage.replaceAll("/", ".") if (spark2RemovedClasses.contains(className)) { throw QueryExecutionErrors.removedClassInSpark2Error(className, e) } else { throw e } } case head :: Nil => // there is exactly one registered alias head.getClass case sources => // There are multiple registered aliases for the input. If there is single datasource // that has "org.apache.spark" package in the prefix, we use it considering it is an // internal datasource within Spark. val sourceNames = sources.map(_.getClass.getName) val internalSources = sources.filter(_.getClass.getName.startsWith("org.apache.spark")) if (internalSources.size == 1) { logWarning(s"Multiple sources found for $provider1 (${sourceNames.mkString(", ")}), " + s"defaulting to the internal datasource (${internalSources.head.getClass.getName}).") internalSources.head.getClass } else { throw QueryCompilationErrors.findMultipleDataSourceError(provider1, sourceNames) } } } catch { case e: ServiceConfigurationError if e.getCause.isInstanceOf[NoClassDefFoundError] => // NoClassDefFoundError's class name uses "/" rather than "." for packages val className = e.getCause.getMessage.replaceAll("/", ".") if (spark2RemovedClasses.contains(className)) { throw QueryExecutionErrors.incompatibleDataSourceRegisterError(e) } else { throw e } } } /** * Returns an optional [[TableProvider]] instance for the given provider. It returns None if * there is no corresponding Data Source V2 implementation, or the provider is configured to * fallback to Data Source V1 code path. */ def lookupDataSourceV2(provider: String, conf: SQLConf): Option[TableProvider] = { val useV1Sources = conf.getConf(SQLConf.USE_V1_SOURCE_LIST).toLowerCase(Locale.ROOT) .split(",").map(_.trim) val cls = lookupDataSource(provider, conf) cls.newInstance() match { case d: DataSourceRegister if useV1Sources.contains(d.shortName()) => None case t: TableProvider if !useV1Sources.contains(cls.getCanonicalName.toLowerCase(Locale.ROOT)) => Some(t) case _ => None } } /** * The key in the "options" map for deciding whether or not to glob paths before use. */ val GLOB_PATHS_KEY = "__globPaths__" /** * Checks and returns files in all the paths. */ private[sql] def checkAndGlobPathIfNecessary( pathStrings: Seq[String], hadoopConf: Configuration, checkEmptyGlobPath: Boolean, checkFilesExist: Boolean, numThreads: Integer = 40, enableGlobbing: Boolean): Seq[Path] = { val qualifiedPaths = pathStrings.map { pathString => val path = new Path(pathString) val fs = path.getFileSystem(hadoopConf) path.makeQualified(fs.getUri, fs.getWorkingDirectory) } // Split the paths into glob and non glob paths, because we don't need to do an existence check // for globbed paths. val (globPaths, nonGlobPaths) = qualifiedPaths.partition(SparkHadoopUtil.get.isGlobPath) val globbedPaths = try { ThreadUtils.parmap(globPaths, "globPath", numThreads) { globPath => val fs = globPath.getFileSystem(hadoopConf) val globResult = if (enableGlobbing) { SparkHadoopUtil.get.globPath(fs, globPath) } else { qualifiedPaths } if (checkEmptyGlobPath && globResult.isEmpty) { throw QueryCompilationErrors.dataPathNotExistError(globPath.toString) } globResult }.flatten } catch { case e: SparkException => throw e.getCause } if (checkFilesExist) { try { ThreadUtils.parmap(nonGlobPaths, "checkPathsExist", numThreads) { path => val fs = path.getFileSystem(hadoopConf) if (!fs.exists(path)) { throw QueryCompilationErrors.dataPathNotExistError(path.toString) } } } catch { case e: SparkException => throw e.getCause } } val allPaths = globbedPaths ++ nonGlobPaths if (checkFilesExist) { val (filteredOut, filteredIn) = allPaths.partition { path => HadoopFSUtils.shouldFilterOutPathName(path.getName) } if (filteredIn.isEmpty) { logWarning( s"All paths were ignored:\\n ${filteredOut.mkString("\\n ")}") } else { logDebug( s"Some paths were ignored:\\n ${filteredOut.mkString("\\n ")}") } } allPaths } /** * When creating a data source table, the `path` option has a special meaning: the table location. * This method extracts the `path` option and treat it as table location to build a * [[CatalogStorageFormat]]. Note that, the `path` option is removed from options after this. */ def buildStorageFormatFromOptions(options: Map[String, String]): CatalogStorageFormat = { val path = CaseInsensitiveMap(options).get("path") val optionsWithoutPath = options.filterKeys(_.toLowerCase(Locale.ROOT) != "path") CatalogStorageFormat.empty.copy( locationUri = path.map(CatalogUtils.stringToURI), properties = optionsWithoutPath.toMap) } /** * Called before writing into a FileFormat based data source to make sure the * supplied schema is not empty. * @param schema */ def validateSchema(schema: StructType): Unit = { def hasEmptySchema(schema: StructType): Boolean = { schema.size == 0 || schema.exists { case StructField(_, b: StructType, _, _) => hasEmptySchema(b) case _ => false } } if (hasEmptySchema(schema)) { throw QueryCompilationErrors.writeEmptySchemasUnsupportedByDataSourceError() } } }
nchammas/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
Scala
apache-2.0
38,833
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controllers.responsiblepeople.address import connectors.DataCacheConnector import controllers.{AmlsBaseController, CommonPlayDependencies} import forms.{EmptyForm, Form2, InvalidForm, ValidForm} import javax.inject.{Inject, Singleton} import models.responsiblepeople.NewHomeAddress._ import models.responsiblepeople._ import play.api.mvc.MessagesControllerComponents import services.AutoCompleteService import utils.{AuthAction, ControllerHelper} import views.html.responsiblepeople.address.new_home_address_NonUK import scala.concurrent.Future @Singleton class NewHomeAddressNonUKController @Inject()(authAction: AuthAction, val dataCacheConnector: DataCacheConnector, val autoCompleteService: AutoCompleteService, val ds: CommonPlayDependencies, val cc: MessagesControllerComponents, new_home_address_NonUK: new_home_address_NonUK, implicit val error: views.html.error) extends AmlsBaseController(ds, cc) with AddressHelper { def get(index: Int) = authAction.async { implicit request => getData[ResponsiblePerson](request.credId, index) map { case Some(ResponsiblePerson(Some(personName), _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _)) => Ok(new_home_address_NonUK(EmptyForm, index, personName.titleName, autoCompleteService.getCountries)) case _ => NotFound(notFoundView) } } def post(index: Int) = authAction.async { implicit request => (Form2[NewHomeAddress](request.body) match { case f: InvalidForm => getData[ResponsiblePerson](request.credId, index) map { rp => BadRequest(new_home_address_NonUK(f, index, ControllerHelper.rpTitleName(rp), autoCompleteService.getCountries)) } case ValidForm(_, data) => { for { moveDate <- dataCacheConnector.fetch[NewHomeDateOfChange](request.credId, NewHomeDateOfChange.key) _ <- updateDataStrict[ResponsiblePerson](request.credId, index) { rp => rp.addressHistory(convertToCurrentAddress(data, moveDate, rp)) } _ <- dataCacheConnector.save[NewHomeDateOfChange](request.credId, NewHomeDateOfChange.key, NewHomeDateOfChange(None)) _ <- dataCacheConnector.removeByKey(request.credId, NewHomeAddress.key) } yield { Redirect(controllers.responsiblepeople.routes.DetailedAnswersController.get(index)) } } }).recoverWith { case _: IndexOutOfBoundsException => Future.successful(NotFound(notFoundView)) } } }
hmrc/amls-frontend
app/controllers/responsiblepeople/address/NewHomeAddressNonUKController.scala
Scala
apache-2.0
3,464
package pl.newicom.dddd.messaging import akka.contrib.pattern.ReceivePipeline import akka.contrib.pattern.ReceivePipeline.{Inner, HandledCompletely} import scala.collection.mutable /** * Designed to be used by persistent actors. Allows detecting duplicated messages sent to the actor. * Keeps a set of message IDs that were received by the actor. * * Provides messageProcessed(Message) method that should be called during the "update-state" stage. * The given message must contain CausationId attribute * referring to the ID of the received message. */ trait Deduplication { this: ReceivePipeline => private val ids: mutable.Set[String] = mutable.Set.empty pipelineInner { case msg: Message => if (wasReceived(msg)) { handleDuplicated(msg) HandledCompletely } else { Inner(msg) } } def handleDuplicated(msg: Message) def messageProcessed(msg: Message): Unit = msg.causationId.foreach(messageReceived) def wasReceived(msgId: String): Boolean = ids.contains(msgId) private def wasReceived(msg: Message): Boolean = wasReceived(msg.id) private def messageReceived(msgId: String): Unit = { ids += msgId } def receivedMsgIds: collection.immutable.Set[String] = ids.toSet def resetReceivedMsgIds(ids: collection.immutable.Set[String]) = this.ids ++= ids }
pawelkaczor/akka-ddd
akka-ddd-core/src/main/scala/pl/newicom/dddd/messaging/Deduplication.scala
Scala
mit
1,369
/* * Copyright 2018 Analytics Zoo Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.zoo.feature.python import java.util.{List => JList, Map => JMap} import com.intel.analytics.bigdl.python.api.{JTensor, Sample} import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.dataset.{Sample => JSample} import com.intel.analytics.bigdl.nn.abstractnn.Activity import com.intel.analytics.zoo.common.PythonZoo import com.intel.analytics.zoo.feature.common.{Preprocessing, Relation, Relations} import com.intel.analytics.zoo.feature.text.TruncMode.TruncMode import com.intel.analytics.zoo.feature.text.{DistributedTextSet, _} import org.apache.spark.api.java.{JavaRDD, JavaSparkContext} import org.apache.spark.rdd.RDD import org.apache.spark.sql.SQLContext import scala.collection.JavaConverters._ import scala.reflect.ClassTag object PythonTextFeature { def ofFloat(): PythonTextFeature[Float] = new PythonTextFeature[Float]() def ofDouble(): PythonTextFeature[Double] = new PythonTextFeature[Double]() } class PythonTextFeature[T: ClassTag](implicit ev: TensorNumeric[T]) extends PythonZoo[T] { def createTextFeature(text: String, uri: String): TextFeature = { TextFeature(text, uri) } def createTextFeature(text: String, label: Int, uri: String = null): TextFeature = { TextFeature(text, label, uri) } def textFeatureGetText(feature: TextFeature): String = { feature.getText } def textFeatureGetLabel(feature: TextFeature): Int = { feature.getLabel } def textFeatureGetURI(feature: TextFeature): String = { feature.getURI } def textFeatureHasLabel(feature: TextFeature): Boolean = { feature.hasLabel } def textFeatureSetLabel(feature: TextFeature, label: Int): TextFeature = { feature.setLabel(label) } def textFeatureGetKeys(feature: TextFeature): JList[String] = { feature.keys().toList.asJava } def textFeatureGetTokens(feature: TextFeature): JList[String] = { val tokens = feature.getTokens if (tokens != null ) tokens.toList.asJava else null } def textFeatureGetSample(feature: TextFeature): Sample = { val sample = feature.getSample if (sample != null) toPySample(sample.asInstanceOf[JSample[T]]) else null } def transformTextFeature( transformer: TextTransformer, feature: TextFeature): TextFeature = { transformer.transform(feature) } def createTokenizer(): Tokenizer = { Tokenizer() } def createNormalizer(): Normalizer = { Normalizer() } def createWordIndexer(vocab: JMap[String, Int]): WordIndexer = { WordIndexer(if (vocab != null) vocab.asScala.toMap else null) } def createSequenceShaper( len: Int, truncMode: String, padElement: Int): SequenceShaper = { SequenceShaper(len, toScalaTruncMode(truncMode), padElement) } private def toScalaTruncMode(truncMode: String): TruncMode = { truncMode.toLowerCase() match { case "pre" => TruncMode.pre case "post" => TruncMode.post case _ => throw new IllegalArgumentException(s"Unsupported truncMode $truncMode, " + s"please use pre or post") } } def createTextFeatureToSample(): TextFeatureToSample = { TextFeatureToSample() } def createLocalTextSet(texts: JList[String], labels: JList[Int]): LocalTextSet = { require(texts != null, "texts of a TextSet can't be null") val features = if (labels != null) { require(texts.size() == labels.size(), "texts and labels of a TextSet " + "should have the same size") texts.asScala.toArray[String].zip(labels.asScala.toArray[Int]).map{feature => createTextFeature(feature._1, feature._2) } } else { texts.asScala.toArray.map(text => createTextFeature(text, null)) } TextSet.array(features) } def createDistributedTextSet( texts: JavaRDD[String], labels: JavaRDD[Int]): DistributedTextSet = { require(texts != null, "texts of a TextSet can't be null") val features = if (labels != null) { texts.rdd.zip(labels.rdd).map{feature => createTextFeature(feature._1, feature._2) } } else { texts.rdd.map(text => createTextFeature(text, null)) } TextSet.rdd(features) } def readTextSet(path: String, sc: JavaSparkContext, minPartitions: Int): TextSet = { if (sc == null) { TextSet.read(path, null, minPartitions) } else { TextSet.read(path, sc.sc, minPartitions) } } def textSetGetWordIndex(textSet: TextSet): JMap[String, Int] = { val res = textSet.getWordIndex if (res == null) null else res.asJava } def textSetGenerateWordIndexMap( textSet: TextSet, removeTopN: Int = 0, maxWordsNum: Int = -1, minFreq: Int, existingMap: JMap[String, Int]): JMap[String, Int] = { val res = textSet.generateWordIndexMap(removeTopN, maxWordsNum, minFreq, if (existingMap != null) existingMap.asScala.toMap else null) if (res == null) null else res.asJava } def textSetIsDistributed(textSet: TextSet): Boolean = { textSet.isDistributed } def textSetIsLocal(textSet: TextSet): Boolean = { textSet.isLocal } def textSetGetTexts(textSet: LocalTextSet): JList[String] = { textSet.array.map(_.getText).toList.asJava } def textSetGetTexts(textSet: DistributedTextSet): JavaRDD[String] = { textSet.rdd.map(_.getText).toJavaRDD() } def textSetGetURIs(textSet: LocalTextSet): JList[String] = { textSet.array.map(_.getURI).toList.asJava } def textSetGetURIs(textSet: DistributedTextSet): JavaRDD[String] = { textSet.rdd.map(_.getURI).toJavaRDD() } def textSetGetLabels(textSet: LocalTextSet): JList[Int] = { textSet.array.map(_.getLabel).toList.asJava } def textSetGetLabels(textSet: DistributedTextSet): JavaRDD[Int] = { textSet.rdd.map(_.getLabel).toJavaRDD() } def textSetGetPredicts(textSet: LocalTextSet): JList[JList[Any]] = { textSet.array.map{feature => if (feature.contains(TextFeature.predict)) { List[Any](feature.getURI, activityToJTensors(feature[Activity](TextFeature.predict))).asJava } else { List[Any](feature.getURI, null).asJava } }.toList.asJava } def textSetGetPredicts(textSet: DistributedTextSet): JavaRDD[JList[Any]] = { textSet.rdd.map{feature => if (feature.contains(TextFeature.predict)) { List[Any](feature.getURI, activityToJTensors(feature[Activity](TextFeature.predict))).asJava } else { List[Any](feature.getURI, null).asJava } }.toJavaRDD() } def textSetGetSamples(textSet: LocalTextSet): JList[Sample] = { textSet.array.map{feature => if (feature.contains(TextFeature.sample)) { toPySample(feature.getSample.asInstanceOf[JSample[T]]) } else { null } }.toList.asJava } def textSetGetSamples(textSet: DistributedTextSet): JavaRDD[Sample] = { textSet.rdd.map{feature => if (feature.contains(TextFeature.sample)) { toPySample(feature.getSample.asInstanceOf[JSample[T]]) } else { null } }.toJavaRDD() } def textSetRandomSplit( textSet: TextSet, weights: JList[Double]): JList[TextSet] = { textSet.randomSplit(weights.asScala.toArray).toList.asJava } def textSetSetWordIndex( textSet: TextSet, vocab: JMap[String, Int]): TextSet = { textSet.setWordIndex(if (vocab != null) vocab.asScala.toMap else null) } def textSetTokenize(textSet: TextSet): TextSet = { textSet.tokenize() } def textSetNormalize(textSet: TextSet): TextSet = { textSet.normalize() } def textSetWord2idx( textSet: TextSet, removeTopN: Int, maxWordsNum: Int, minFreq: Int, existingMap: JMap[String, Int]): TextSet = { textSet.word2idx(removeTopN, maxWordsNum, minFreq, if (existingMap != null) existingMap.asScala.toMap else null) } def textSetShapeSequence( textSet: TextSet, len: Int, truncMode: String, padElement: Int): TextSet = { textSet.shapeSequence(len, toScalaTruncMode(truncMode), padElement) } def textSetGenerateSample(textSet: TextSet): TextSet = { textSet.generateSample() } def textSetToDistributed( textSet: TextSet, sc: JavaSparkContext, partitionNum: Int = 4): DistributedTextSet = { textSet.toDistributed(sc.sc, partitionNum) } def textSetToLocal(textSet: TextSet): LocalTextSet = { textSet.toLocal() } def transformTextSet( transformer: Preprocessing[TextFeature, TextFeature], imageSet: TextSet): TextSet = { imageSet.transform(transformer) } private def toScalaRelations(relations: JavaRDD[Array[Object]]): RDD[Relation] = { relations.rdd.map(x => { require(x.length == 3, "Relation should consist of id1, id2 and label") Relation(x(0).asInstanceOf[String], x(1).asInstanceOf[String], x(2).asInstanceOf[Int]) }) } private def toScalaRelations(relations: JList[JList[Any]]): Array[Relation] = { relations.asScala.toArray.map(relation => { val x = relation.asScala.toArray require(x.length == 3, "Relation should consist of id1, id2 and label") Relation(x(0).asInstanceOf[String], x(1).asInstanceOf[String], x(2).asInstanceOf[Int]) }) } private def toPythonRelations(relations: RDD[Relation]): JavaRDD[JList[Any]] = { relations.map(x => List(x.id1, x.id2, x.label).asJava).toJavaRDD() } private def toPythonRelations(relations: Array[Relation]): JList[JList[Any]] = { relations.map(x => List(x.id1, x.id2, x.label).asJava).toList.asJava } def readRelations(path: String): JList[JList[Any]] = { toPythonRelations(Relations.read(path)) } def readRelations( path: String, sc: JavaSparkContext, minPartitions: Int = 1): JavaRDD[JList[Any]] = { toPythonRelations(Relations.read(path, sc.sc, minPartitions)) } def readRelationsParquet( path: String, sc: JavaSparkContext): JavaRDD[JList[Any]] = { val sqlContext = new SQLContext(sc) toPythonRelations(Relations.readParquet(path, sqlContext)) } def textSetFromRelationPairs( relations: JavaRDD[Array[Object]], corpus1: TextSet, corpus2: TextSet): DistributedTextSet = { TextSet.fromRelationPairs(toScalaRelations(relations), corpus1, corpus2) } def textSetFromRelationPairs( relations: JList[JList[Any]], corpus1: TextSet, corpus2: TextSet): LocalTextSet = { TextSet.fromRelationPairs(toScalaRelations(relations), corpus1, corpus2) } def textSetFromRelationLists( relations: JavaRDD[Array[Object]], corpus1: TextSet, corpus2: TextSet): DistributedTextSet = { TextSet.fromRelationLists(toScalaRelations(relations), corpus1, corpus2) } def textSetFromRelationLists( relations: JList[JList[Any]], corpus1: TextSet, corpus2: TextSet): LocalTextSet = { TextSet.fromRelationLists(toScalaRelations(relations), corpus1, corpus2) } def textSetReadCSV(path: String, sc: JavaSparkContext, minPartitions: Int): TextSet = { if (sc == null) { TextSet.readCSV(path, null, minPartitions) } else { TextSet.readCSV(path, sc.sc, minPartitions) } } def textSetReadParquet( path: String, sc: JavaSparkContext): TextSet = { val sqlContext = new SQLContext(sc) TextSet.readParquet(path, sqlContext) } def textSetSaveWordIndex( textSet: TextSet, path: String): Unit = { textSet.saveWordIndex(path) } def textSetLoadWordIndex( textSet: TextSet, path: String): TextSet = { textSet.loadWordIndex(path) } }
intel-analytics/analytics-zoo
zoo/src/main/scala/com/intel/analytics/zoo/feature/python/PythonTextFeature.scala
Scala
apache-2.0
12,298
package com.github.jlprat.ninetynine.p07 import org.scalatest.WordSpec import com.github.jlprat.ninetynine.p07.P07._ /** * Created by @jlprat on 02/03/2016. * Specs for problem P07: Flatten a nested list structure */ class P07Spec extends WordSpec { "flatten" when { val intList = List(1, 2, 3) "called on empty lists" should { "return back the empty list" in { val empty = List[List[String]]() assert(flatten(empty) === empty) } } "called on an already flattened list" should { "return the same list" in { //candidate to generator test assert(flatten(intList) === intList) } } "called on list with only one inner list" should { "return the 'inner' list" in { //candidate to generator test val outer = List(intList) assert(flatten(outer) === intList) } } "called on a 3 level nested list" should { "return the concatenation of the 'inner' lists" in { val second = List(intList, intList, intList, intList) val outer = List(second, second) assert(flatten(outer) === List.tabulate(24)(x => (x % 3) + 1)) } } "called on a list with single elements and lists" should { "return a list with all elements without a list structure" in { val stringList = List("foo", "bar") val outer = List(stringList, 4L, intList) assert(flatten(outer) === List("foo", "bar",4L, 1, 2, 3)) } } "called on a list where each element is a list with only 1 element" should { "return a list containing only the inner elements" in { val list = List(List(1), List(2), List(3)) assert(flatten(list) === intList) } } } "newly added function myFlatten added on lists" when { val intList = List(1, 2, 3) "called on empty lists" should { "return back the empty list" in { val empty = List[List[String]]() assert(empty.myFlatten === empty) } } "called on an already flattened list" should { "return the same list" in { assert(intList.myFlatten === intList) } } "called on list with only one inner list" should { "return the 'inner' list" in { val outer = List(intList) assert(outer.myFlatten === intList) } } "called on a 3 level nested list" should { "return the concatenation of the 'inner' lists" in { val second = List(intList, intList, intList, intList) val outer = List(second, second) assert(outer.myFlatten === List.tabulate(24)(x => (x % 3) + 1)) } } "called on a list with single elements and lists" should { "return a list with all elements without a list structure" in { val stringList = List("foo", "bar") val outer = List(stringList, 4L, intList) assert(outer.myFlatten === List("foo", "bar",4L, 1, 2, 3)) } } "called on a list where each element is a list with only 1 element" should { "return a list containing only the inner elements" in { val list = List(List(1), List(2), List(3)) assert(list.myFlatten === intList) } } } }
jlprat/99-scala-problems
src/test/scala/com/github/jlprat/ninetynine/p07/P07Spec.scala
Scala
apache-2.0
3,194
package controllers import play.api._ import play.api.mvc._ import play.api.cache._ import play.api.libs.json._ import play.api.templates._ import scala.concurrent._ import ExecutionContext.Implicits.global import play.api.Play.current import scala.io.Source import services._ import lib.html.HtmlStream import lib.html.HtmlStream._ import lib.ActionUtils._ import models.{PublishRequest, SearchRequest} // ---------------------------------------------------------------------- object Application extends Controller { // ---------------------------------------------------------------------- val page = lib.html.Page("Where does money go? Moscow edition") def index = Logging("index page") { Action { Ok(views.html.index(page.index)) } } def about = Logging("about page") { Action { Ok(views.html.about(page.about)) } } def histo = Cached("histo") { _async { ServicePoint._histo }} def search_form = Logging("search form") { Action { val data = SearchRequest.DEFAULT val f = SearchRequest._form.fill(data) Ok(views.html.search_form(page.new_search, f)) } } def search_form_from(id: String) = Logging("copy form") { Action.async { ServicePoint.find_search(id).recover { case e => SearchRequest.DEFAULT }.map { data => val f = SearchRequest._form.fill(data) Ok(views.html.search_form(page.new_search, f)) } } } def search_start = Logging("start search") { Action.async { implicit req => val fa = SearchRequest._form.bindFromRequest if (fa.errors.length > 0) { Future { Ok(views.html.search_form(page.new_search, fa)) } } else { val d = fa.get.copy(published = None, approved = None) // securi ServicePoint.save_search(d).map { js => Redirect(routes.Application.results((js \\ "_id").as[String])) } } } } // ---------------------------------------------------------------------- def result_publish = Logging("result publish") { Action { implicit req => val fa = PublishRequest._publish_form.bindFromRequest var res = fa.errors.isEmpty if (res) { ServicePoint.find_search(fa.get.id).recover { case e => res = false }.map { data => ServicePoint.update_search(fa.get.id, fa.get.text) } } Ok(Json.toJson(Json.obj("success" -> res))) } } def result_list = Logging("result list page") { Action { Ok(views.html.result_list(page.results)) } } // ---------------------------------------------------------------------- def results(id: String) = Logging("show results for "+id, "search_id" -> id) { Action.async { ServicePoint.find_search(id).map { search => Ok(views.html.search_result(page.results, search)) } } } def req(id: String, page: Long) = _async { for { search <- ServicePoint.find_search(id) results <- ServicePoint.summarize(page, search) } yield { results } } // ---------------------------------------------------------------------- def addr = Action { val file = Source.fromFile("data/filtered.csv") val read = file.getLines() // .slice(0, 10) val list = read.toList.map { s => val a = OsmLoader.parseAddress(s) ServicePoint.osmGeoAddress(a.address) { Thread.sleep(400) // small timeout, "wait" for previous save operations Json.toJson(a) }.map { js => Html( (js \\ "address") + "</br>" ) } } val printed = list.map( HtmlStream(_) ) Ok.chunked(HtmlStream.interleave(printed)) } // ---------------------------------------------------------------------- def contract(regnum: String) = _async { ServicePoint.contract(regnum) } def customer(regnum: String) = _async { ServicePoint.customer(regnum) } def supplier(inn: String, kpp: String) = _async { ServicePoint.supplier(inn, kpp) } def customer_address(regnum: String) = _async { for { j <- ServicePoint.customer(regnum) s <- ServicePoint.geocode((j \\ "factualAddress" \\ "addressLine").as[String]) } yield { Json.arr(s, Json.obj("separator" -> "-------"), j) } } def supplier_address(inn: String, kpp: String) = _async { for { j <- ServicePoint.supplier(inn, kpp) s <- ServicePoint.geocode((j \\ "factualAddress").as[String]) } yield { Json.arr(s, Json.obj("separator" -> "-------"), j) } } // ------------------------------------------------------------------------------------------------------ def javascriptRoutes = Action { implicit request => import routes.javascript._ Ok( Routes.javascriptRouter("jsRoutes")( routes.javascript.Application.req, routes.javascript.Application.result_publish, routes.javascript.Application.contract, routes.javascript.Application.customer, routes.javascript.Application.supplier ) ).as("text/javascript") } }
radiantgeek/spending_map
app/controllers/Application.scala
Scala
apache-2.0
5,078
package controllers package actions package resources import forms._ import models.Truthy import util.IpmiCommand import util.concurrent.BackgroundProcessor import util.plugins.{IpmiPowerCommand, PowerManagement} import util.security.SecuritySpecification import collins.power.Identify import collins.power.management.{PowerManagement, PowerManagementConfig} import play.api.data.Form import play.api.data.Forms._ import play.api.mvc.AsyncResult case class IntakeStage1Action( assetId: Long, spec: SecuritySpecification, handler: SecureController ) extends SecureAction(spec, handler) with IntakeAction { val dataForm = Form(single( "light" -> of[Truthy] )) case class ActionDataHolder(light: Truthy) extends RequestDataHolder override def validate(): Either[RequestDataHolder,RequestDataHolder] = super.validate() match { case Left(err) => Left(err) case Right(dummy) => dataForm.bindFromRequest()(request).fold( err => Right(dummy), suc => Right(ActionDataHolder(suc)) ) } override def execute(rd: RequestDataHolder) = rd match { case ActionDataHolder(light) if light.toBoolean => Status.Ok( views.html.resources.intake2(definedAsset, IntakeStage2Action.dataForm)(flash, request) ) case dummy => PowerManagement.pluginEnabled match { case None => Status.Ok(views.html.help(Help.PowerManagementDisabled)(flash, request)) case Some(plugin) => AsyncResult { identifyAsset(plugin) } } } override def handleWebError(rd: RequestDataHolder) = Some( Redirect(app.routes.Resources.intake(assetId, 1)).flashing("error" -> rd.toString) ) protected def identifyAsset(plugin: PowerManagement) = { val cmd = IpmiPowerCommand.fromPowerAction(definedAsset, Identify) BackgroundProcessor.send(cmd) { result => IpmiCommand.fromResult(result) match { case Left(throwable) => verifyIpmiReachable(plugin, throwable.toString) case Right(None) => defaultView case Right(Some(suc)) if suc.isSuccess => defaultView case Right(Some(error)) if !error.isSuccess => verifyIpmiReachable(plugin, error.toString) } } } protected def defaultView = Status.Ok(views.html.resources.intake(definedAsset)(flash, request)) protected def verifyIpmiReachable(plugin: PowerManagement, errorString: String) = plugin.verify(definedAsset)() match { case reachable if reachable.isSuccess => Status.Ok(views.html.help(Help.IpmiError, errorString)(flash, request)) case unreachable if !unreachable.isSuccess => Status.Ok(views.html.help(Help.IpmiUnreachable, errorString)(flash, request)) } }
Shopify/collins
app/controllers/actions/resources/IntakeStage1Action.scala
Scala
apache-2.0
2,718
package org.jetbrains.plugins.scala package lang package parser package parsing package statements import com.intellij.lang.PsiBuilder import expressions.SelfInvocation import lexer.ScalaTokenTypes import builder.ScalaPsiBuilder /** * @author Alexander Podkhalyuzin * Date: 13.02.2008 */ /* * ConstrExpr ::= SelfInvocation * | '{' SelfInvocation {semi BlockStat} '}' */ object ConstrExpr { def parse(builder: ScalaPsiBuilder): Boolean = { val constrExprMarker = builder.mark builder.getTokenType match { case ScalaTokenTypes.tLBRACE => { ConstrBlock parse builder constrExprMarker.drop() return true } case _ => { SelfInvocation parse builder constrExprMarker.done(ScalaElementTypes.CONSTR_EXPR) return true } } //this line for compiler true } }
consulo/consulo-scala
src/org/jetbrains/plugins/scala/lang/parser/parsing/statements/ConstrExpr.scala
Scala
apache-2.0
861
/* Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package de.hpi.ingestion.dataimport.wikidata import com.holdenkarau.spark.testing.SharedSparkContext import org.scalatest.{FlatSpec, Matchers} class TagEntitiesTest extends FlatSpec with SharedSparkContext with Matchers { "Shorter paths" should "be added" in { val oldClasses = TestData.oldClassMap() val newClasses = TestData.newClassMap() val classMap = TagEntities.addShorterPaths(newClasses, oldClasses) val expectedMap = TestData.classMap() classMap shouldEqual expectedMap } "Subclass map" should "be built" in { val entries = sc.parallelize(TestData.subclassOfProperties()) val classMap = TagEntities.buildSubclassMap(entries, TestData.classesToTag()) val expectedMap = TestData.classMap() classMap shouldEqual expectedMap } it should "be properly reduced" in { val reducedSubclasses = TestData.reducableClassMaps().reduce(TagEntities.mergeSubclassMaps) val expectedSubclasses = TestData.classMap() reducedSubclasses shouldEqual expectedSubclasses } "Wikidata entity" should "be properly translated into SubclassEntry" in { val entries = TestData.classWikidataEntities() .map(TagEntities.translateToSubclassEntry) val expectedEntries = TestData.subclassEntries() entries shouldEqual expectedEntries } "Instance-of entities" should "be updated" in { val job = new TagEntities val pathKey = job.settings("wikidataPathProperty") val entries = sc.parallelize(TestData.subclassEntries()) val updatedEntries = TagEntities.updateEntities(entries, TestData.classMap(), pathKey).collect.toSet val expectedEntries = TestData.updatedInstanceOfProperties().toSet updatedEntries shouldEqual expectedEntries } it should "be updated correctly" in { val job = new TagEntities val pathKey = job.settings("wikidataPathProperty") val entries = TestData.validInstanceOfProperties() .map(TagEntities.updateInstanceOfProperty(_, TestData.classMap(), pathKey)) val expectedEntries = TestData.updatedInstanceOfProperties() entries shouldEqual expectedEntries } }
bpn1/ingestion
src/test/scala/de/hpi/ingestion/dataimport/wikidata/TagEntitiesTest.scala
Scala
apache-2.0
2,836
/* Copyright (c) 2010 Richard Searle * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @author Richard Searle */ package cognitiveentity.xml.sax.interop import scala.xml.factory.XMLLoader import scala.xml._ import org.xml.sax._ import org.xml.sax.helpers.DefaultHandler class Loader extends DefaultHandler with XMLLoader[Elem]{ val newAdapter = adapter def value = newAdapter.rootElem.asInstanceOf[Elem] override def characters( ch:Array[Char],start:Int,length:Int) { newAdapter.characters(ch,start,length) } override def endDocument() { newAdapter.scopeStack.pop } override def endElement(uri:String,localName:String, qName:String){ newAdapter.endElement(uri,localName,qName) } override def processingInstruction(target:String, data:String){ newAdapter.processingInstruction(target,data) } override def startDocument(){ newAdapter.scopeStack push TopScope } override def startElement(uri:String,localName:String, qName:String,atts:Attributes){ newAdapter.startElement(uri,localName,qName,atts) } }
searler/ScalaXML
src/main/scala/cognitiveentity/xml/sax/interop/XMLLoader.scala
Scala
apache-2.0
1,675
package filodb.http import scala.concurrent.duration.FiniteDuration import com.typesafe.config.Config import net.ceedubs.ficus.Ficus._ import filodb.coordinator.FilodbSettings class HttpSettings(config: Config, val filoSettings: FilodbSettings) { lazy val httpServerBindHost = config.getString("filodb.http.bind-host") lazy val httpServerBindPort = config.getInt("filodb.http.bind-port") lazy val httpServerStartTimeout = config.getDuration("filodb.http.start-timeout") lazy val queryDefaultSpread = config.getInt("filodb.spread-default") lazy val querySampleLimit = config.getInt("filodb.query.sample-limit") lazy val queryAskTimeout = config.as[FiniteDuration]("filodb.query.ask-timeout") lazy val queryBinaryJoinCardLimit = config.getInt("filodb.query.join-cardinality-limit") lazy val queryGroupByCardLimit = config.getInt("filodb.query.group-by-cardinality-limit") }
tuplejump/FiloDB
http/src/main/scala/filodb/http/HttpSettings.scala
Scala
apache-2.0
893
object DrugTest { import probability.probdsl._ sealed abstract class Status object User extends Status { override def toString = "User" } object Clean extends Status { override def toString = "Clean" } sealed abstract class TestResult object Positive extends TestResult { override def toString = "Positive" } object Negative extends TestResult { override def toString = "Negative" } val PosIfUser = 0.99 val PosIfClean = 0.01 val drugTest = normalizedProb[(Status, TestResult)] { val s = flip(0.001, User, Clean) val t = flip(if (s == User) PosIfUser else PosIfClean, Positive, Negative) (s, t) } val drugTest2 = drugTest.filter { ut:(Status, TestResult) => ut._2 == Positive } def main(args:Array[String]) = run def run = { println(drugTest.toString ++ "\\n") println(drugTest2.toString ++ "\\n") } }
urso/scala_prob
examples/DrugTest.scala
Scala
bsd-3-clause
867
package com.asto.dop.api import com.asto.dop.api.processor.MessageProcessor import com.typesafe.scalalogging.slf4j.LazyLogging import io.vertx.core._ import io.vertx.core.http.{HttpServer, HttpServerOptions} import io.vertx.core.json.JsonObject class Startup extends AbstractVerticle with LazyLogging{ override def start(startFuture: Future[Void]): Unit = { Startup.vertx=vertx Startup.config=config() MessageProcessor.init() val host=config().getJsonObject("http").getString("host") val port=config().getJsonObject("http").getInteger("port") vertx.createHttpServer(new HttpServerOptions().setCompressionSupported(true).setTcpKeepAlive(true)) .requestHandler(new APIHttpRequest).listen(port,host,new Handler[AsyncResult[HttpServer]] { override def handle(event: AsyncResult[HttpServer]): Unit = { if (event.succeeded()) { startFuture.complete() logger.info(s"API app start successful. http://$host:$port/") } else { logger.error("Startup fail .", event.cause()) } } }) } override def stop(stopFuture: Future[Void]): Unit = { logger.info(s"API app stopped , Bye .") stopFuture.complete() } } object Startup{ var vertx:Vertx=_ var config: JsonObject=_ }
zj-lingxin/dop
source/api/src/main/scala/com/asto/dop/api/Startup.scala
Scala
mit
1,279
/* * Accio is a platform to launch computer science experiments. * Copyright (C) 2016-2018 Vincent Primault <[email protected]> * * Accio is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Accio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Accio. If not, see <http://www.gnu.org/licenses/>. */ package fr.cnrs.liris.lumos.transport import com.twitter.util.{Closable, Future} import fr.cnrs.liris.lumos.domain.Event /** * Transport for the Lumos events. All implementations should be thread-safe. */ trait EventTransport extends Closable { /** * Return the name of this transport, as displayed to the user. */ def name: String /** * Write an event to an endpoint. * * Implementations should not contain any blocking call and return quickly, possibly before the * event is actually written. This method should never throw any exception. * * @param event Event to write. */ def sendEvent(event: Event): Unit }
privamov/accio
accio/java/fr/cnrs/liris/lumos/transport/EventTransport.scala
Scala
gpl-3.0
1,428
package org.fuckboi import org.fuckboi.ast.RootNode class FuckboiGenerator extends ClassLoader { def generate(fuckboiCode: String, filename: String): (Array[Byte], RootNode) = { val parser = new FuckboiParser val rootNode = parser.parse(fuckboiCode) (rootNode.generateByteCode(filename), rootNode) } }
VirenMohindra/Fuckboi
src/main/scala/org/fuckboi/FuckboiGenerator.scala
Scala
mit
322
package breeze.util /* Copyright 2009 David Hall, Daniel Ramage Licensed under the Apache License, Version 2.0 (the "License") you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import java.io.{IOException, ObjectInputStream, ObjectStreamException} import scala.jdk.CollectionConverters._ import scala.collection.mutable.{ArrayBuffer, HashMap} import java.util.Arrays import java.util import scala.io.Source /** * Trait that marks an O(1) bidirectional map between Ints (increasing from 0) * and T's. This class is used, for example, to efficiently build unique * vector space mappings for strings. The methods in this trait do not mutate * the underlying index. Use either a MutableIndex or one of the companion * object constructor methods to build an index. * * @author dlwh, dramage */ trait Index[T] extends Iterable[T] with (T => Int) with Serializable { /** Number of elements in this index. */ def size: Int /** * Returns the int id of the given element (0-based) or -1 if not * found in the index. This method never changes the index (even * in MutableIndex). */ def apply(t: T): Int /** * Returns Some(t) if this int corresponds to some object, * and None otherwise. */ def unapply(i: Int): Option[T] /** Returns true if this index contains the element t. */ def contains(t: T): Boolean = apply(t) >= 0 /** Returns Some(i) if the object has been indexed, or None. */ def indexOpt(t: T): Option[Int] = { val i = apply(t) if (i >= 0) Some(i) else None } /** Override Iterable's linear-scan indexOf to use our apply method. */ def indexOf(t: T): Int = apply(t) /** Returns the indexed items along with their indicies */ def pairs: Iterator[(T, Int)] /** * Returns an object at the given position or throws * IndexOutOfBoundsException if it's not found. */ def get(i: Int): T = unapply(i).getOrElse(throw new IndexOutOfBoundsException()) override def equals(other: Any): Boolean = { other match { case that: Index[_] if this.size == that.size => this.sameElements(that) case _ => false } } protected lazy val defaultHashCode = foldLeft(17)(_ * 41 + _.hashCode()) override def hashCode = defaultHashCode override def toString = { iterator.mkString("Index(", ",", ")") } def |[U](right: Index[U]) = new EitherIndex(this, right) } /** * An Index that contains an extra method: <em>index</em> that adds the * given element (if necessary), returning its (possibly new) position in * the index. * * @author dramage */ trait MutableIndex[T] extends Index[T] { /** * Returns an integer index for the given object, adding it to the * index if it is not already present. */ def index(t: T): Int } /** * Class that builds a 1-to-1 mapping between Ints and T's, which * is very useful for efficiency concerns. * * Two extra views are provided: the index.synchronized view * enables threadsafe access and the index.immutable view keeps * prevents the (view) from being updated. * * @author dlwh, dramage */ @SerialVersionUID(-7655100457525569617L) class HashIndex[T] extends MutableIndex[T] with Serializable { /** Forward map from int to object */ private var objects = new ArrayBuffer[T] /** Map from object back to int index */ private var indices = new util.HashMap[T, Int]() override def size = indices.size override def apply(t: T): Int = Option(indices.get(t)).getOrElse(-1) override def unapply(pos: Int): Option[T] = if (pos >= 0 && pos < objects.length) Some(objects(pos)) else None override def contains(t: T) = indices containsKey t override def indexOpt(t: T): Option[Int] = Option(indices.get(t)) override def get(pos: Int) = objects(pos); // throws IndexOutOfBoundsException as required override def iterator = objects.iterator /** Returns the position of T, adding it to the index if it's not there. */ override def index(t: T) = { if (!indices.containsKey(t)) { val ind = objects.size objects += t indices.put(t, ind) ind } else { indices.get(t) } } def pairs = indices.asScala.iterator @throws(classOf[ObjectStreamException]) private def writeReplace(): Object = { HashIndex.SerializedForm(objects) } // for backwards compatibility @throws(classOf[IOException]) @throws(classOf[ClassNotFoundException]) private def readObject(stream: ObjectInputStream): Unit = { HashIndex.logError("Deserializing an old-style HashIndex. Taking counter measures") val fields = stream.readFields() val objects = fields.get("objects", null) this.objects = objects.asInstanceOf[ArrayBuffer[T]] this.indices = new util.HashMap() for ((x, i) <- this.objects.zipWithIndex) { indices.put(x, i) } } } object HashIndex extends SerializableLogging { @SerialVersionUID(1L) private case class SerializedForm[T](objects: scala.collection.IndexedSeq[T]) { @throws(classOf[ObjectStreamException]) private def readResolve(): Object = { val ind = new HashIndex[T]() objects.foreach(ind.index) ind } } private def logError(str: => String) = logger.error(str) } /** * For use when we need an index, but we already have (densely packed) positive * ints and don't want hash overhead. * * @author dlwh, dramage */ @SerialVersionUID(1L) class DenseIntIndex(beg: Int, end: Int) extends Index[Int] { def this(end: Int) = this(0, end) require(beg >= 0) require(end >= beg) override def size = end - beg override def apply(t: Int) = if (contains(t)) t - beg else -1 override def unapply(i: Int) = if (i < size) Some(i + beg) else None override def contains(t: Int) = t < end - beg && t >= 0 override def indexOpt(t: Int) = if (contains(t)) Some(t) else None override def get(i: Int) = if (contains(i)) i else throw new IndexOutOfBoundsException() override def iterator = (beg until end).iterator def pairs = iterator.zip(iterator.map(_ + min)) override def hashCode = beg + 37 * end } /** * Utilities for manipulating and creating Index objects. */ object Index { /** Constructs an empty index. */ import scala.reflect.ClassTag.{Char => MChar} import scala.reflect.OptManifest def apply[T](): MutableIndex[T] = new HashIndex[T] /** Constructs an Index from some iterator. */ def apply[T](iterator: Iterator[T]): Index[T] = { val index = Index[T]() // read through all iterator now -- don't lazily defer evaluation for (element <- iterator) { index.index(element) } index } /** Constructs an Index from some iterator. */ def apply[T](iterable: Iterable[T]): Index[T] = { val index = Index[T]() // read through all iterator now -- don't lazily defer evaluation for (element <- iterable) { index.index(element) } index } /** * Loads a String index, one line per item with line * numbers (starting at 0) as the indices. */ def load(source: Source): Index[String] = { apply(source.getLines().map(_.stripLineEnd)) } } /** * An Index over two kinds of things. Layout is straightforward: * The first left.size entries are from the left index, while the next * right.size are from the right index. Values are wrapped in Left/Right * * @author dlwh */ @SerialVersionUID(1L) class EitherIndex[L, R](left: Index[L], right: Index[R]) extends Index[Either[L, R]] { def apply(t: Either[L, R]) = t match { case Left(l) => left(l) case Right(r) => right(r) + rightOffset } /** * What you add to the indices from the rightIndex to get indices into this index * @return */ def rightOffset = left.size def unapply(i: Int) = { if (i < 0 || i >= size) None else if (i < left.size) Some(Left(left.get(i))) else Some(Right(right.get(i - left.size))) } def pairs = left.pairs.map { case (l, i) => Left(l) -> i } ++ right.pairs.map { case (r, i) => Right(r) -> (i + left.size) } def iterator = left.iterator.map { Left(_) } ++ right.map { Right(_) } override def size: Int = left.size + right.size } /** * Lifts an index of T into an index of Option[T] . The last element is None. Everything else is as you expect. * * @author dlwh */ @SerialVersionUID(1L) class OptionIndex[T](inner: Index[T]) extends Index[Option[T]] { def apply(t: Option[T]) = t match { case Some(l) => inner(l) case None => inner.size } def unapply(i: Int) = { if (i < 0 || i >= size) None else if (i < inner.size) Some(Some(inner.get(i))) // sic! else Some(None) // sic! } override def get(i: Int): Option[T] = { if (i < 0 || i >= size) throw new IndexOutOfBoundsException() else if (i < inner.size) Some(inner.get(i)) else None } def pairs = inner.pairs.map { case (l, i) => Some(l) -> i } ++ Iterator(None -> inner.size) def iterator = inner.iterator.map { Some(_) } ++ Iterator(None) override def size: Int = inner.size + 1 } /** * An Index over N kinds of things. A little type unsafe. * * @author dlwh */ @SerialVersionUID(1L) final class CompositeIndex[U](indices: Index[_ <: U]*) extends Index[(Int, U)] { private val offsets: Array[Int] = indices .unfold(0) { (n, i) => n + i.size } .toArray /** If you know which component, and which index in that component, * you can quickly get its mapped value with this function. */ @inline def mapIndex(component: Int, uIndex: Int) = { if (uIndex < 0) -1 else offsets(component) + uIndex } def apply(t: (Int, U)) = { if (t._1 >= indices.length || t._1 < 0) -1 else { indices(t._1).asInstanceOf[Index[U]](t._2) + offsets(t._1) } } def unapply(i: Int) = { if (i < 0 || i >= size) None else { val index = { val res = Arrays.binarySearch(offsets, i) if (res >= 0) res else -(res + 2) } Some(index -> indices(index).get(i - offsets(index))) } } def pairs = indices.iterator.zipWithIndex.flatMap { case (index, i) => index.iterator.map { t => (i, t: U) } }.zipWithIndex def iterator = indices.iterator.zipWithIndex.flatMap { case (index, i) => index.iterator.map { t => (i -> t) } } override def size: Int = offsets(offsets.length - 1) } object EnumerationIndex { def apply[T <: Enumeration](t: T): Index[t.Value] = new Index[t.Value] { /** * Returns the int id of the given element (0-based) or -1 if not * found in the index. This method never changes the index (even * in MutableIndex). */ def apply(x: t.Value): Int = x.id /** * Returns Some(t) if this int corresponds to some object, * and None otherwise. */ def unapply(i: Int): Option[t.Value] = Some[t.Value](t(i)) /** Returns the indexed items along with their indicies */ def pairs: Iterator[(t.Value, Int)] = for (v <- t.values.iterator) yield v -> v.id def iterator: Iterator[t.Value] = t.values.iterator override def size: Int = t.maxId } }
scalanlp/breeze
math/src/main/scala/breeze/util/Index.scala
Scala
apache-2.0
11,536
/* * Copyright 2019 ACINQ SAS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fr.acinq.eclair.crypto import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey} import fr.acinq.bitcoin.{ByteVector32, Crypto} import scodec.bits.ByteVector object Generators { def fixSize(data: ByteVector): ByteVector32 = if (data.length < 32) ByteVector32(data padLeft 32) else ByteVector32(data) def perCommitSecret(seed: ByteVector32, index: Long): PrivateKey = PrivateKey(ShaChain.shaChainFromSeed(seed, 0xFFFFFFFFFFFFL - index)) def perCommitPoint(seed: ByteVector32, index: Long): PublicKey = perCommitSecret(seed, index).publicKey def derivePrivKey(secret: PrivateKey, perCommitPoint: PublicKey): PrivateKey = { // secretkey = basepoint-secret + SHA256(per-commitment-point || basepoint) secret.add(PrivateKey(Crypto.sha256(perCommitPoint.value ++ secret.publicKey.value))) } def derivePubKey(basePoint: PublicKey, perCommitPoint: PublicKey): PublicKey = { //pubkey = basepoint + SHA256(per-commitment-point || basepoint)*G val a = PrivateKey(Crypto.sha256(perCommitPoint.value ++ basePoint.value)) basePoint.add(a.publicKey) } def revocationPubKey(basePoint: PublicKey, perCommitPoint: PublicKey): PublicKey = { val a = PrivateKey(Crypto.sha256(basePoint.value ++ perCommitPoint.value)) val b = PrivateKey(Crypto.sha256(perCommitPoint.value ++ basePoint.value)) basePoint.multiply(a).add(perCommitPoint.multiply(b)) } def revocationPrivKey(secret: PrivateKey, perCommitSecret: PrivateKey): PrivateKey = { val a = PrivateKey(Crypto.sha256(secret.publicKey.value ++ perCommitSecret.publicKey.value)) val b = PrivateKey(Crypto.sha256(perCommitSecret.publicKey.value ++ secret.publicKey.value)) secret.multiply(a).add(perCommitSecret.multiply(b)) } }
btcontract/wallet
app/src/main/java/fr/acinq/eclair/crypto/Generators.scala
Scala
apache-2.0
2,332
/* * Copyright 2011-2021 Asakusa Framework Team. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.asakusafw.spark.extensions.iterativebatch.compiler package graph import org.junit.runner.RunWith import org.scalatest.FlatSpec import org.scalatest.junit.JUnitRunner import java.io.{ DataInput, DataOutput, File } import scala.concurrent.{ Await, Future } import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import org.apache.hadoop.conf.Configuration import org.apache.hadoop.io.{ NullWritable, Writable } import org.apache.spark.SparkConf import com.asakusafw.lang.compiler.extension.directio.{ DirectFileIoConstants, DirectFileOutputModel } import com.asakusafw.lang.compiler.model.description.{ ClassDescription, Descriptions } import com.asakusafw.lang.compiler.model.graph.{ ExternalOutput, MarkerOperator } import com.asakusafw.lang.compiler.model.info.ExternalOutputInfo import com.asakusafw.lang.compiler.planning.PlanMarker import com.asakusafw.runtime.directio.hadoop.{ HadoopDataSource, SequenceFileFormat } import com.asakusafw.runtime.model.DataModel import com.asakusafw.runtime.value.IntOption import com.asakusafw.spark.compiler.directio.DirectOutputDescription import com.asakusafw.spark.compiler._ import com.asakusafw.spark.runtime._ import com.asakusafw.spark.runtime.graph.{ CacheOnce, DirectOutputSetup } import com.asakusafw.spark.extensions.iterativebatch.runtime.graph.DirectOutputSetupForIterative @RunWith(classOf[JUnitRunner]) class DirectOutputSetupForIterativeClassBuilderSpecTest extends DirectOutputSetupForIterativeClassBuilderSpec class DirectOutputSetupForIterativeClassBuilderSpec extends FlatSpec with SparkForAll with FlowIdForEach with UsingCompilerContext with JobContextSugar with RoundContextSugar with TempDirForAll { import DirectOutputSetupForIterativeClassBuilderSpec._ behavior of classOf[DirectOutputSetupForIterativeClassBuilder].getSimpleName private var root: File = _ override def configure(conf: SparkConf): SparkConf = { root = createTempDirectoryForAll("directio-").toFile() conf.setHadoopConf("com.asakusafw.directio.test", classOf[HadoopDataSource].getName) conf.setHadoopConf("com.asakusafw.directio.test.path", "test") conf.setHadoopConf("com.asakusafw.directio.test.fs.path", root.getAbsolutePath) } def newSetup(outputs: Set[ExternalOutput])(implicit jobContext: JobContext): DirectOutputSetupForIterative = { implicit val context = newCompilerContext(flowId) val setupType = DirectOutputSetupForIterativeCompiler.compile(outputs) context.loadClass(setupType.getClassName) .getConstructor(classOf[JobContext]) .newInstance(jobContext) } it should "delete simple" in { val foosMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo])) .attribute(classOf[PlanMarker], PlanMarker.GATHER).build() val endMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo])) .attribute(classOf[PlanMarker], PlanMarker.END).build() val outputOperator = ExternalOutput.builder( "out1", new ExternalOutputInfo.Basic( ClassDescription.of(classOf[DirectFileOutputModel]), DirectFileIoConstants.MODULE_NAME, ClassDescription.of(classOf[Foo]), Descriptions.valueOf( new DirectFileOutputModel( DirectOutputDescription( basePath = "test/out1_${round}", resourcePattern = "*.bin", order = Seq.empty, deletePatterns = Seq("*.bin"), formatType = classOf[FooSequenceFileFormat]))))) .input(ExternalOutput.PORT_NAME, ClassDescription.of(classOf[Foo]), foosMarker.getOutput) .output("end", ClassDescription.of(classOf[Foo])) .build() outputOperator.findOutput("end").connect(endMarker.getInput) val rounds = 0 to 1 val files = rounds.map { round => val file = new File(root, s"out1_${round}/testing.bin") file.getParentFile.mkdirs() file.createNewFile() file } implicit val jobContext = newJobContext(sc) val setup = newSetup(Set(outputOperator)) val origin = newRoundContext() val rcs = rounds.map { round => newRoundContext( stageId = s"round_${round}", batchArguments = Map("round" -> round.toString)) } assert(files.forall(_.exists()) === true) Await.result(setup.perform(origin, rcs), Duration.Inf) assert(files.exists(_.exists()) === false) } it should "not delete out of scope" in { val foosMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo])) .attribute(classOf[PlanMarker], PlanMarker.GATHER).build() val endMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo])) .attribute(classOf[PlanMarker], PlanMarker.END).build() val outputOperator = ExternalOutput.builder( "out2", new ExternalOutputInfo.Basic( ClassDescription.of(classOf[DirectFileOutputModel]), DirectFileIoConstants.MODULE_NAME, ClassDescription.of(classOf[Foo]), Descriptions.valueOf( new DirectFileOutputModel( DirectOutputDescription( basePath = "test/out2", resourcePattern = "*.bin", order = Seq.empty, deletePatterns = Seq("*.txt"), formatType = classOf[FooSequenceFileFormat]))))) .input(ExternalOutput.PORT_NAME, ClassDescription.of(classOf[Foo]), foosMarker.getOutput) .output("end", ClassDescription.of(classOf[Foo])) .build() outputOperator.findOutput("end").connect(endMarker.getInput) val rounds = 0 to 1 val files = rounds.map { round => val file = new File(root, s"out2_${round}/testing.bin") file.getParentFile.mkdirs() file.createNewFile() file } implicit val jobContext = newJobContext(sc) val setup = newSetup(Set(outputOperator)) val origin = newRoundContext() val rcs = rounds.map { round => newRoundContext( stageId = s"round_${round}", batchArguments = Map("round" -> round.toString)) } assert(files.forall(_.exists()) === true) Await.result(setup.perform(origin, rcs), Duration.Inf) assert(files.forall(_.exists()) === true) } } object DirectOutputSetupForIterativeClassBuilderSpec { class Foo extends DataModel[Foo] with Writable { val id = new IntOption() val group = new IntOption() override def reset(): Unit = { id.setNull() group.setNull() } override def copyFrom(other: Foo): Unit = { id.copyFrom(other.id) group.copyFrom(other.group) } override def readFields(in: DataInput): Unit = { id.readFields(in) group.readFields(in) } override def write(out: DataOutput): Unit = { id.write(out) group.write(out) } override def toString: String = s"Foo(id=${id}, group=${group})" } class FooSequenceFileFormat extends SequenceFileFormat[NullWritable, Foo, Foo] { override def getSupportedType(): Class[Foo] = classOf[Foo] override def createKeyObject(): NullWritable = NullWritable.get() override def createValueObject(): Foo = new Foo() override def copyToModel(key: NullWritable, value: Foo, model: Foo): Unit = { model.copyFrom(value) } override def copyFromModel(model: Foo, key: NullWritable, value: Foo): Unit = { value.copyFrom(model) } } }
asakusafw/asakusafw-spark
extensions/iterativebatch/compiler/core/src/test/scala/com/asakusafw/spark/extensions/iterativebatch/compiler/graph/DirectOutputSetupForIterativeClassBuilderSpec.scala
Scala
apache-2.0
7,989
package demo import org.apache.log4j.Logger import org.apache.spark.{SparkContext, SparkConf} /** * Sparkアプリケーション起動用トレイト */ trait SparkBatchApplication { /** ロガー */ @transient lazy val log = Logger.getLogger(getClass.getName) /** 処理の最長時間(ms)(無制限の場合は 0, デフォルト : 0 ms) */ def lifetime(): Long = 0L /** 処理前の設定変更 */ def configure(conf: SparkConf, args: Array[String]): SparkConf = conf /** * 起動メソッド * * @param args 起動引数 */ def main(args: Array[String]): Unit = { val appName = getClass.getName.split("\\\\$").last val conf = configure(new SparkConf().setAppName(appName), args) val sc = new SparkContext(conf) log.info("Application '" + appName + "' start.") process(sc, args) sc.stop() log.info("Application '" + appName + "' stopped.") } /** * Spark アプリケーションの主処理 * <p/> * * @param sc スパークコンテキスト * @param args 起動引数 */ def process(sc: SparkContext, args: Array[String]) }
h-mochizuki/rts-sample
spark-sample/src/main/scala/demo/SparkBatchApplication.scala
Scala
apache-2.0
1,124