code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.gizwits.rabbitmq
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import scala.reflect.ClassTag
object RabbitMQUtils {
/**
* Create an input stream that receives messages from a RabbitMQ queue.
* @param ssc StreamingContext object
* @param rabbitMQHost Url of remote RabbitMQ server
* @param rabbitMQPort Port of remote RabbitMQ server
* @param rabbitMQQueueName Queue to subscribe to
* @param storageLevel RDD storage level. Defaults to StorageLevel.MEMORY_AND_DISK_SER_2.
*/
def createStreamFromAQueue(ssc: StreamingContext,
rabbitMQHost: String,
rabbitMQPort: Int,
virtualhost: String,
username: String,
password: String,
rabbitMQQueueName: String,
ack: Boolean,
autoDelete: Boolean,
prefetchCount: Int,
streamingtime: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[String] = {
new RabbitMQInputDStream(
ssc,
Some(rabbitMQQueueName),
rabbitMQHost,
rabbitMQPort,
virtualhost,
username,
password,
None,
Seq(),
None,
ack,
autoDelete,
prefetchCount,
streamingtime,
storageLevel)
}
/**
* Create an input stream that receives messages from a RabbitMQ queue.
* @param jssc JavaStreamingContext object
* @param rabbitMQHost Url of remote RabbitMQ server
* @param rabbitMQPort Port of remote RabbitMQ server
* @param rabbitMQQueueName Queue to subscribe to
* @param storageLevel RDD storage level. Defaults to StorageLevel.MEMORY_AND_DISK_SER_2.
*/
def createJavaStreamFromAQueue(jssc: JavaStreamingContext,
rabbitMQHost: String,
rabbitMQPort: Int,
virtualhost: String,
username: String,
password: String,
rabbitMQQueueName: String,
ack: Boolean,
autoDelete: Boolean,
prefetchCount: Int,
streamingtime: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): JavaReceiverInputDStream[String] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]]
createStreamFromAQueue(
jssc.ssc,
rabbitMQHost,
rabbitMQPort,
virtualhost,
username,
password,
rabbitMQQueueName,
ack,
autoDelete,
prefetchCount,
streamingtime)
}
/**
* Create an input stream that receives messages from a RabbitMQ queue.
* @param ssc StreamingContext object
* @param rabbitMQHost Url of remote RabbitMQ server
* @param rabbitMQPort Port of remote RabbitMQ server
* @param exchangeName Exchange name to subscribe to
* @param routingKeys Routing keys to subscribe to
* @param storageLevel RDD storage level. Defaults to StorageLevel.MEMORY_AND_DISK_SER_2.
*/
def createStreamFromRoutingKeys(ssc: StreamingContext,
rabbitMQQueueName: Option[String],
rabbitMQHost: String,
rabbitMQPort: Int,
virtualhost: String,
username: String,
password: String,
exchangeName: String,
routingKeys: Seq[String],
DirectExchangeType: Option[String],
ack: Boolean,
autoDelete: Boolean,
prefetchCount: Int,
streamingtime: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[String] = {
new RabbitMQInputDStream(
ssc,
rabbitMQQueueName,
rabbitMQHost,
rabbitMQPort,
virtualhost,
username,
password,
Some(exchangeName),
routingKeys,
DirectExchangeType,
ack,
autoDelete,
prefetchCount,
streamingtime,
storageLevel)
}
/**
* Create an input stream that receives messages from a RabbitMQ queue.
* @param jssc JavaStreamingContext object
* @param rabbitMQHost Url of remote RabbitMQ server
* @param rabbitMQPort Port of remote RabbitMQ server
* @param exchangeName Exchange name to subscribe to
* @param routingKeys Routing keys to subscribe to
* @param storageLevel RDD storage level. Defaults to StorageLevel.MEMORY_AND_DISK_SER_2.
*/
def createJavaStreamFromRoutingKeys(jssc: JavaStreamingContext,
rabbitMQQueueName: String,
rabbitMQHost: String,
rabbitMQPort: Int,
virtualhost: String,
username: String,
password: String,
exchangeName: String,
routingKeys: java.util.List[String],
DirectExchangeType: String,
ack: Boolean,
autoDelete: Boolean,
prefetchCount: Int,
streamingtime: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): JavaReceiverInputDStream[String] = {
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]]
createStreamFromRoutingKeys(jssc.ssc, Option(rabbitMQQueueName), rabbitMQHost, rabbitMQPort,
virtualhost,
username,
password,
exchangeName,
scala.collection.JavaConversions
.asScalaBuffer(routingKeys),
Option(DirectExchangeType),
ack,
autoDelete,
prefetchCount,
streamingtime,
storageLevel)
}
}
|
Bestfeel/spark-rabbitmq-stream
|
src/main/scala/com/gizwits/rabbitmq/RabbitMQUtils.scala
|
Scala
|
apache-2.0
| 6,948 |
package org.machine.engine.encoder.json
import org.scalatest._
import org.scalatest.mock._
import org.machine.engine.graph.nodes.{Association}
import org.machine.engine.graph.commands.{CommandScopes, EngineCmdResult, QueryCmdResult, UpdateCmdResult, DeleteCmdResult, InsertCmdResult}
class OutboundJSONSerializerSpec extends FunSpec with Matchers with EasyMockSugar{
describe("Serializing Outbound JSON Messages"){
it ("should serialze a QueryCmdResult"){
val elements = Seq(Association("1", "a1", Map.empty[String,Any], "start","stop","yesterday", "today"),
Association("2", "a2", Map.empty[String,Any], "start","stop", "yesterday", "today"),
Association("3", "a3", Map.empty[String,Any], "start","stop", "yesterday", "today"))
val json = OutboundJSONSerializer.serialize(QueryCmdResult[Association](elements))
val expected = """
|{
| "status": "OK",
| "Associations":[
| {
| "id":"1",
| "associationType":"a1",
| "startingElementId":"start",
| "endingElementId":"stop",
| "creationTime":"yesterday",
| "lastModifiedTime":"today",
| "fields":[]
| },
| {
| "id":"2",
| "associationType":"a2",
| "startingElementId":"start",
| "endingElementId":"stop",
| "creationTime":"yesterday",
| "lastModifiedTime":"today",
| "fields":[]
| },
| {
| "id":"3",
| "associationType":"a3",
| "startingElementId":"start",
| "endingElementId":"stop",
| "creationTime":"yesterday",
| "lastModifiedTime":"today",
| "fields":[]
| }
| ]
|}
"""
strip(json) should equal(strip(expected))
}
it ("should serialize a UpdateCmdResult"){
val json = OutboundJSONSerializer.serialize(UpdateCmdResult[String]("identifier"))
val expected = """
|{
| "status": "OK",
| "id": "identifier"
|}
"""
strip(json) should equal(strip(expected))
}
it ("should serialize a DeleteCmdResult"){
val json = OutboundJSONSerializer.serialize(DeleteCmdResult[String]("identifier"))
val expected = """
|{
| "status": "OK",
| "id": "identifier"
|}
"""
strip(json) should equal(strip(expected))
}
it ("should serialize a InsertCmdResult"){
val json = OutboundJSONSerializer.serialize(InsertCmdResult[String]("identifier"))
val expected = """
|{
| "status": "OK",
| "id": "identifier"
|}
"""
strip(json) should equal(strip(expected))
}
}
def strip(str: String):String = str.stripMargin.replaceAll("\\t","").replaceAll(" ","").replaceAll("\\n","")
}
|
sholloway/graph-engine
|
src/test/scala/org/machine/engine/encoder/json/OutboundJSONSerializerSpec.scala
|
Scala
|
mit
| 2,844 |
package org.vitrivr.adampro.data.index.structures.va
import org.apache.spark.sql.{DataFrame, Row}
import org.vitrivr.adampro.data.datatypes.vector.Vector
import org.vitrivr.adampro.data.datatypes.vector.Vector._
import org.vitrivr.adampro.query.tracker.QueryTracker
import org.vitrivr.adampro.data.index.Index._
import org.vitrivr.adampro.data.index.structures.IndexTypes
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.query.distance.DistanceFunction
import org.apache.spark.ml.linalg.{DenseVector, Vectors}
/**
* ADAMpro
*
* Ivan Giangreco
* September 2016
*
* see H. Ferhatosmanoglu, E. Tuncel, D. Agrawal, A. El Abbadi (2006): High dimensional nearest neighbor searching. Information Systems.
*/
class VAPlusIndex(override val indexname: IndexName)(@transient override implicit val ac: SharedComponentContext) extends VAIndex(indexname)(ac) {
override lazy val indextypename: IndexTypeName = IndexTypes.VAPLUSINDEX
override lazy val lossy: Boolean = meta.asInstanceOf[VAPlusIndexMetaData].approximate
override lazy val confidence: Float = if (meta.asInstanceOf[VAPlusIndexMetaData].approximate) {
0.9.toFloat
} else {
1.0.toFloat
}
override lazy val score: Float = if (meta.asInstanceOf[VAPlusIndexMetaData].approximate) {
0.9.toFloat
} else {
1.0.toFloat
}
override def scan(data: DataFrame, q: MathVector, distance: DistanceFunction, options: Map[String, String], k: Int)(tracker : QueryTracker): DataFrame = {
val queries = ac.spark.createDataFrame(Seq(Tuple1(Vectors.dense(q.toArray.map(_.toDouble))))).toDF("queries")
val adjustedQuery: Array[Row] = meta.asInstanceOf[VAPlusIndexMetaData].pca.setInputCol("queries").setOutputCol("pcaQueries").transform(queries).collect()
super.scan(data, new DenseMathVector(adjustedQuery.head.getAs[DenseVector]("pcaQueries").values.map(Vector.conv_double2vb(_))), distance, options, k)(tracker)
}
}
|
dbisUnibas/ADAMpro
|
src/main/scala/org/vitrivr/adampro/data/index/structures/va/VAPlusIndex.scala
|
Scala
|
mit
| 1,949 |
package slick.codegen
import java.net.URI
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration.Duration
import slick.basic.DatabaseConfig
import slick.{model => m}
import slick.jdbc.JdbcProfile
import slick.model.Model
import slick.util.ConfigExtensionMethods.configExtensionMethods
/**
* A customizable code generator for working with Slick.
*
* For usage information please see the corresponding part of the Slick documentation.
*
* The implementation is structured into a small hierarchy of sub-generators responsible
* for different fragments of the complete output. The implementation of each
* sub-generator can be swapped out for a customized one by overriding the corresponding
* factory method. SourceCodeGenerator contains a factory method Table, which it uses to
* generate a sub-generator for each table. The sub-generator Table in turn contains
* sub-generators for Table classes, entity case classes, columns, key, indices, etc.
* Custom sub-generators can easily be added as well.
*
* Within the sub-generators the relevant part of the Slick data `model` can
* be accessed to drive the code generation.
*
* Of coures it makes sense to integrate this into your build process.
* @param model Slick data model for which code should be generated.
*/
class SourceCodeGenerator(model: m.Model)
extends AbstractSourceCodeGenerator(model) with OutputHelpers{
// "Tying the knot": making virtual classes concrete
type Table = TableDef
def Table = new TableDef(_)
class TableDef(model: m.Table) extends super.TableDef(model){
// Using defs instead of (caching) lazy vals here to provide consitent interface to the user.
// Performance should really not be critical in the code generator. Models shouldn't be huge.
// Also lazy vals don't inherit docs from defs
type EntityType = EntityTypeDef
def EntityType = new EntityType{}
type PlainSqlMapper = PlainSqlMapperDef
def PlainSqlMapper = new PlainSqlMapper{}
type TableClass = TableClassDef
def TableClass = new TableClass{}
type TableValue = TableValueDef
def TableValue = new TableValue{}
type Column = ColumnDef
def Column = new Column(_)
type PrimaryKey = PrimaryKeyDef
def PrimaryKey = new PrimaryKey(_)
type ForeignKey = ForeignKeyDef
def ForeignKey = new ForeignKey(_)
type Index = IndexDef
def Index = new Index(_)
}
}
/** A runnable class to execute the code generator without further setup */
object SourceCodeGenerator {
def run(profile: String, jdbcDriver: String, url: String, outputDir: String, pkg: String, user: Option[String], password: Option[String], ignoreInvalidDefaults: Boolean, outputToMultipleFiles: Boolean): Unit =
run(profile, jdbcDriver, url, outputDir, pkg, user, password, ignoreInvalidDefaults, None, outputToMultipleFiles)
def run(profile: String, jdbcDriver: String, url: String, outputDir: String, pkg: String, user: Option[String], password: Option[String], ignoreInvalidDefaults: Boolean, codeGeneratorClass: Option[String], outputToMultipleFiles: Boolean): Unit = {
val profileInstance: JdbcProfile =
Class.forName(profile + "$").getField("MODULE$").get(null).asInstanceOf[JdbcProfile]
val dbFactory = profileInstance.api.Database
val db = dbFactory.forURL(url, driver = jdbcDriver,
user = user.getOrElse(null), password = password.getOrElse(null), keepAliveConnection = true)
try {
val m = Await.result(db.run(profileInstance.createModel(None, ignoreInvalidDefaults)(ExecutionContext.global).withPinnedSession), Duration.Inf)
val codeGenerator = codeGeneratorClass.getOrElse("slick.codegen.SourceCodeGenerator")
val sourceGeneratorClass = Class.forName(codeGenerator).asInstanceOf[Class[_ <: SourceCodeGenerator]]
val generatorInstance = sourceGeneratorClass.getConstructor(classOf[Model]).newInstance(m)
if(outputToMultipleFiles)
generatorInstance.writeToMultipleFiles(profile, outputDir, pkg)
else
generatorInstance.writeToFile(profile, outputDir, pkg)
} finally db.close
}
def run(uri: URI, outputDir: Option[String], ignoreInvalidDefaults: Boolean = true, outputToMultipleFiles: Boolean = false): Unit = {
val dc = DatabaseConfig.forURI[JdbcProfile](uri)
val pkg = dc.config.getString("codegen.package")
val out = outputDir.getOrElse(dc.config.getStringOr("codegen.outputDir", "."))
val profile = if(dc.profileIsObject) dc.profileName else "new " + dc.profileName
try {
val m = Await.result(dc.db.run(dc.profile.createModel(None, ignoreInvalidDefaults)(ExecutionContext.global).withPinnedSession), Duration.Inf)
val generator = new SourceCodeGenerator(m)
if(outputToMultipleFiles)
generator.writeToMultipleFiles(profile, out, pkg)
else
generator.writeToFile(profile, out, pkg)
} finally dc.db.close
}
def main(args: Array[String]): Unit = {
args.toList match {
case uri :: Nil =>
run(new URI(uri), None)
case uri :: outputDir :: Nil =>
run(new URI(uri), Some(outputDir))
case profile :: jdbcDriver :: url :: outputDir :: pkg :: Nil =>
run(profile, jdbcDriver, url, outputDir, pkg, None, None, true, None, false)
case profile :: jdbcDriver :: url :: outputDir :: pkg :: user :: password :: Nil =>
run(profile, jdbcDriver, url, outputDir, pkg, Some(user), Some(password), true, None, false)
case profile:: jdbcDriver :: url :: outputDir :: pkg :: user :: password :: ignoreInvalidDefaults :: Nil =>
run(profile, jdbcDriver, url, outputDir, pkg, Some(user), Some(password), ignoreInvalidDefaults.toBoolean, None, false)
case profile:: jdbcDriver :: url :: outputDir :: pkg :: user :: password :: ignoreInvalidDefaults :: codeGeneratorClass :: outputToMultipleFiles:: Nil =>
run(profile, jdbcDriver, url, outputDir, pkg, Some(user), Some(password), ignoreInvalidDefaults.toBoolean, Some(codeGeneratorClass), outputToMultipleFiles.toBoolean)
case _ => {
println("""
|Usage:
| SourceCodeGenerator configURI [outputDir]
| SourceCodeGenerator profile jdbcDriver url outputDir pkg [user password]
|
|Options:
| configURI: A URL pointing to a standard database config file (a fragment is
| resolved as a path in the config), or just a fragment used as a path in
| application.conf on the class path
| profile: Fully qualified name of Slick profile class, e.g. "slick.jdbc.H2Profile"
| jdbcDriver: Fully qualified name of jdbc driver class, e.g. "org.h2.Driver"
| url: JDBC URL, e.g. "jdbc:postgresql://localhost/test"
| outputDir: Place where the package folder structure should be put
| pkg: Scala package the generated code should be places in
| user: database connection user name
| password: database connection password
|
|When using a config file, in addition to the standard config parameters from
|slick.basic.DatabaseConfig you can set "codegen.package" and
|"codegen.outputDir". The latter can be overridden on the command line.
""".stripMargin.trim)
System.exit(1)
}
}
}
}
|
nafg/slick
|
slick-codegen/src/main/scala/slick/codegen/SourceCodeGenerator.scala
|
Scala
|
bsd-2-clause
| 7,492 |
package smtlib
package theories
package experimental
import trees.Terms._
import Operations._
object Strings {
/** The strings and format are taken from here:
* http://cvc4.cs.nyu.edu/wiki/Strings#Examples
*/
private val StringConcat = "str.++"
private val StringLength = "str.len"
private val StringAt = "str.at"
private val StringSubstring = "str.substr"
private val StringInRegex = "str.in.re"
private val StringToRegex = "str.to.re"
private val StringContains = "str.contains"
private val StringIndexOf = "str.indexof"
private val StringReplace = "str.replace"
private val StringPrefixOf = "str.prefixof"
private val StringSuffixOf = "str.suffixof"
private val StringStringToInt = "str.to.int"
private val StringIntToString = "int.to.str"
private val RegexConcat = "re.++"
private val RegexUnion = "re.union"
private val RegexInter = "re.inter"
private val RegexKleeneStar = "re.*"
private val RegexKleeneCross = "re.+"
private val RegexKleeneOpt = "re.opt"
private val RegexRange = "re.range"
private val RegexLoop = "re.loop"
private val RegexLoop2 = "re.loop2"
private val RegexEmpty = "re.nostr"
private val RegexAllChar = "re.allchar"
object StringSort {
def apply(): Sort = {
Sort(Identifier(SSymbol("String")))
}
def unapply(sort: Sort): Boolean = sort match {
case Sort(Identifier(SSymbol("String"), Seq()), Seq()) => true
case _ => false
}
}
object StringLit {
def apply(value: String): Term = SString(value)
def unapply(term: Term): Option[String] = term match {
case SString(value) => Some(value)
case _ => None
}
}
/** Length of string. */
object Length extends Operation1 { override val name = StringLength }
/** String concatenation takes at least 2 arguments. */
object Concat extends OperationN2 { override val name = StringConcat }
/** Character in String. First argument is a string term and second is a natural number. The index is starting from 0. */
object At extends Operation2 { override val name = StringAt }
/** Substring given string, start and length/offset */
object Substring extends Operation3 { override val name = StringSubstring }
/** Membership Constraint where first arg is a string term and second a regular expression. */
object InRegex extends Operation2 { override val name = StringInRegex }
/** String to Regular Expression Conversion.
* The statement turns a regular expression that only contains a string s.
*/
object ToRegex extends Operation1 { override val name = StringToRegex }
object Regex {
/** Membership constraint. See [InRegex]. */
val In = InRegex
/** Membership constraint. See [ToRegex]. */
val To = ToRegex
lazy val Star = KleeneStar
lazy val * = KleeneStar
lazy val Cross = KleeneCross
lazy val Plus = KleeneCross
lazy val + = KleeneCross
lazy val ? = Opt
lazy val NoStr = Empty
/** Regular Expression Concatenation. */
object Concat extends OperationN2 { override val name = RegexConcat }
/** Regular Expression Alternation. */
object Union extends OperationN2 { override val name = RegexUnion }
/** Regular Expression Intersection. */
object Inter extends OperationN2 { override val name = RegexInter }
/** Regular Expression Kleene-Star (equivalent to Loop(r, 0)) */
object KleeneStar extends Operation1 { override val name = RegexKleeneStar }
/** Regular Expression Kleene-Cross (equivalent to Loop(r, 1)) */
object KleeneCross extends Operation1 { override val name = RegexKleeneCross }
/** Regular Expression Option marker (equivalent to Loop(r, 0, 1)) */
object Opt extends Operation1 { override val name = RegexKleeneOpt }
/** Regular Expression Range where arguments s, t are single characters in double quotes, e.g. "a", "b". It returns a regular expression that contains any character between s and t.*/
object Range extends Operation2 { override val name = RegexRange }
/** Regular Expression Loop with arguments (r, l, u) where r is a regular expression, l is a non-negative constant integer, and u is an optional non-negative constant integer. It returns a regular expression that contains at least l repetitions of r and at most u repetitions of r. If l >= u, it returns exactly l repetitions of r.*/
object Loop {
def apply(r: Term, minRepetitions: Term, maxRepetitions: Term): Term =
FunctionApplication(
QualifiedIdentifier(Identifier(SSymbol(RegexLoop))),
Seq(r, minRepetitions, maxRepetitions)
)
def apply(r: Term, minRepetitions: Term): Term =
FunctionApplication(
QualifiedIdentifier(Identifier(SSymbol(RegexLoop))),
Seq(r, minRepetitions)
)
def unapplySeq(term: Term): Option[Seq[Term]] = term match {
case FunctionApplication(
QualifiedIdentifier(
Identifier(SSymbol(RegexRange), Seq()),
None
), seqTerm) if seqTerm.length == 2 || seqTerm.length == 3 => Some(seqTerm)
case _ => None
}
}
/** Empty Regular Expression */
object Empty extends Operation0 { override val name = RegexEmpty }
/** All characters Regular Expression */
object AllChar extends Operation0 { override val name = RegexAllChar }
}
/**
Following functions are under the --strings-exp option. They are under active refinement. Once they are stable, we will move them to the default mode. Please let us know when you have some suggestions.
*/
object Experimental {
/** String Contain. Arguments (s,t) where s and t are string terms. It returns true if the string s contains the string t. This function determines whether the string t can be found within the string s, returning true or false as appropriate. */
object Contains extends Operation2 { override val name = StringContains }
/** String IndexOf. Arguments (s, t, i) where s is a string, t is a non-empty string and i is a non-negative integer. This function returns the position of the first occurrence of the specified value t in the string s after the index i. It returns -1 if the value to search for never occurs. */
object IndexOf extends Operation3 { override val name = StringIndexOf }
/** String Replacement. Arguments (s, t1, t2) where s, t1 and t2 are string terms, t1 is non-empty. This function searches the string s for the specified value t1, and returns a new string where the first occurrence of the specified value t1 is replaced by the string t2. */
object Replace extends Operation3 { override val name = StringReplace }
/** String PrefixOf. Arguments (s, t) where s and t are string terms. It returns true if the string s is a prefix of the string t. */
object PrefixOf extends Operation2 { override val name = StringPrefixOf }
/** String SuffixOf. Arguments (s, t) where s and t are string terms. It returns true if the string s is a suffix of the string t. */
object SuffixOf extends Operation2 { override val name = StringSuffixOf }
/** String To Integer Conversion. Argument s where s is a string term. It returns the corresponding natural number if s is valid; otherwise, it returns -1. */
object StringToInt extends Operation1 { override val name = StringStringToInt }
/** Integer To String Conversion. Argument s where i is an integer term. It returns the corresponding string if i is a natural number; otherwise, it returns an empty string. */
object IntToString extends Operation1 { override val name = StringIntToString }
}
}
|
regb/scala-smtlib
|
src/main/scala/smtlib/theories/experimental/Strings.scala
|
Scala
|
mit
| 7,774 |
import scala.reflect.io._
import java.net.URLClassLoader
object Test extends App {
val jarsOrDirectories = Set("partest.lib", "partest.reflect", "partest.comp") map sys.props
object AllowedMissingClass {
// Some classes in scala-compiler.jar have references to jline / ant classes, which seem to be
// not on the classpath. We just skip over those classes.
// PENDING: for now we also allow missing $anonfun classes: the optimizer may eliminate some closures
// that are referred to in EnclosingClass attributes. SI-9136
val allowedMissingPackages = Set("jline", "org.apache.tools.ant", "$anonfun")
def ok(t: Throwable) = {
allowedMissingPackages.exists(p => t.getMessage.replace('/', '.').contains(p))
}
def unapply(t: Throwable): Option[Throwable] = t match {
case _: NoClassDefFoundError | _: ClassNotFoundException | _: TypeNotPresentException if ok(t) => Some(t)
case _ => None
}
}
jarsOrDirectories foreach testClasses
def testClasses(jarOrDirectory: String): Unit = {
val classPath = AbstractFile.getDirectory(new java.io.File(jarOrDirectory))
val basePath = classPath.path + "/"
def flatten(f: AbstractFile, s: String): Iterator[(AbstractFile, String)] =
if (f.isClassContainer) f.iterator.map(ch => (ch, (if(s.isEmpty) "" else s + "/") + ch.name)).flatMap((flatten _).tupled)
else Iterator((f, s))
val classFullNames = flatten(classPath, "").filter(_._1.hasExtension("class")).map(_._2.replace("/", ".").replaceAll(".class$", ""))
// it seems that Class objects can only be GC'd together with their class loader
// (http://stackoverflow.com/questions/2433261/when-and-how-are-classes-garbage-collected-in-java)
// if we just use the same class loader for the entire test (Class.forName), we run out of PermGen
// even with that, we still neeed a PermGen of 90M or so, the default 64 is not enough. I tried
// using one class loader per 100 classes, but that didn't help, the classes didn't get GC'd.
val classLoader = new URLClassLoader(Array(classPath.toURL))
val faulty = new collection.mutable.ListBuffer[(String, Throwable)]
def tryGetClass(name: String) = try {
Some[Class[_]](classLoader.loadClass(name))
} catch {
case AllowedMissingClass(_) => None
}
for (name <- classFullNames; cls <- tryGetClass(name)) {
try {
cls.getEnclosingMethod
cls.getEnclosingClass
cls.getEnclosingConstructor
cls.getDeclaredClasses
} catch {
case AllowedMissingClass(_) =>
case t: Throwable => faulty += ((name, t))
}
}
if (faulty.nonEmpty)
println(faulty.toList mkString "\\n")
}
}
|
felixmulder/scala
|
test/files/jvm/innerClassEnclMethodJavaReflection.scala
|
Scala
|
bsd-3-clause
| 2,722 |
package SMART
import Chisel._
// Implementing the XY Routing Unit
class RoutingUnit() extends Module {
val io = new Bundle {
val xHops = UInt(INPUT, width = X_HOP_WIDTH)
val yHops = UInt(INPUT, width = Y_HOP_WIDTH)
val xDir = UInt(INPUT, width = 1)
val yDir = UInt(INPUT, width = 1)
val outport = UInt(OUTPUT, width = NUM_OF_DIRS)
val xHopsNext = UInt(OUTPUT, width = X_HOP_WIDTH)
val yHopsNext = UInt(OUTPUT, width = Y_HOP_WIDTH)
val xDirNext = UInt(OUTPUT, width = 1)
val yDirNext = UInt(OUTPUT, width = 1)
}
when (io.xHops =/= UInt(0)) {
io.xHopsNext := io.xHops - UInt(1)
io.yHopsNext := io.yHops
when (io.xDir === UInt(1)) {
io.outport := EAST_OH
} .otherwise {
io.outport := WEST_OH
}
} .elsewhen (io.yHops =/= UInt(0)) {
io.xHopsNext := io.xHops
io.yHopsNext := io.yHops - UInt(1)
when (io.yDir === UInt(1)) {
io.outport := NORTH_OH
} .otherwise {
io.outport := SOUTH_OH
}
} .otherwise {
io.outport := LOCAL_OH
io.xHopsNext := io.xHops
io.yHopsNext := io.yHops
}
io.yDirNext := io.yDir
io.xDirNext := io.xDir
}
class RoutingUnitTests(c: RoutingUnit) extends Tester(c) {
poke(c.io.xHops, 3)
poke(c.io.yHops, 3)
poke(c.io.xDir, 0)
poke(c.io.yDir, 1)
expect(c.io.xHopsNext, 2)
expect(c.io.yHopsNext, 3)
expect(c.io.outport, WEST_OH.litValue())
step(1)
poke(c.io.xHops, 2)
poke(c.io.yHops, 2)
poke(c.io.xDir, 1)
poke(c.io.yDir, 1)
peek(c.io)
step(1)
poke(c.io.xHops, 0)
poke(c.io.yHops, 2)
poke(c.io.xDir, 1)
poke(c.io.yDir, 1)
peek(c.io)
step(1)
}
// class RoutingUnit() extends Module {
// val io = new Bundle {
// val curCoord = new Coordinate().asInput
// val destCoord = new Coordinate().asInput
// val outDir = UInt(dir = OUTPUT, width = NUM_OF_DIRS)
// }
// when (io.curCoord.x != io.destCoord.x) {
// when (io.curCoord.x > io.destCoord.x) {
// io.outDir := WEST_OH
// } .otherwise {
// io.outDir := EAST_OH
// }
// } .elsewhen (io.curCoord.y != io.destCoord.y) {
// when (io.curCoord.y > io.destCoord.y) {
// io.outDir := SOUTH_OH
// } .otherwise {
// io.outDir := NORTH_OH
// }
// } .otherwise {
// io.outDir := LOCAL_OH
// }
// }
// class RoutingUnitTests(c: RoutingUnit) extends Tester(c) {
// poke(c.io.curCoord.x, 0)
// poke(c.io.curCoord.y, 0)
// poke(c.io.destCoord.x, 0)
// poke(c.io.destCoord.y, 0)
// expect(c.io.outDir, 1)
// poke(c.io.curCoord.x, 0)
// poke(c.io.curCoord.y, 0)
// poke(c.io.destCoord.x, 0)
// poke(c.io.destCoord.y, 1)
// peek(c.io.outDir)
// poke(c.io.curCoord.x, 0)
// poke(c.io.curCoord.y, 0)
// poke(c.io.destCoord.x, 1)
// poke(c.io.destCoord.y, 0)
// peek(c.io.outDir)
// poke(c.io.curCoord.x, 0)
// poke(c.io.curCoord.y, 0)
// poke(c.io.destCoord.x, 1)
// poke(c.io.destCoord.y, 1)
// peek(c.io.outDir)
// poke(c.io.curCoord.x, 1)
// poke(c.io.curCoord.y, 1)
// poke(c.io.destCoord.x, 0)
// poke(c.io.destCoord.y, 0)
// peek(c.io.outDir)
// }
// }
|
hyoukjun/OpenSMART
|
Backend/Chisel/RoutingUnit.scala
|
Scala
|
mit
| 3,512 |
package controllers
import javax.inject.Inject
import play.api.mvc._
/**
* This controller creates an `Action` to handle HTTP requests to the
* application's home page.
*/
class HomeController @Inject()(cc: ControllerComponents) extends AbstractController(cc) {
/**
* Create an Action to render an HTML page with a welcome message.
* The configuration in the `routes` file means that this method
* will be called when the application receives a `GET` request with
* a path of `/`.
*/
def index = Action { request =>
Ok("Got request [" + request + "]")
}
def sum = Action {
Ok(views.html.index("Your new xxx is ready."))
}
def action = Action {
request => Ok("ok"+request.uri)
}
}
|
rafajpet/iot-pood
|
iot-pood-web/app/controllers/HomeController.scala
|
Scala
|
mit
| 733 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.carbondata.restructure
import java.io.File
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.common.util.Spark2QueryTest
import org.apache.spark.sql.test.TestQueryExecutor
import org.apache.spark.util.AlterTableUtil
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.metadata.CarbonMetadata
class AlterTableRevertTestCase extends Spark2QueryTest with BeforeAndAfterAll {
override def beforeAll() {
sql("drop table if exists reverttest")
sql(
"CREATE TABLE reverttest(intField int,stringField string,timestampField timestamp," +
"decimalField decimal(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data4.csv' INTO TABLE reverttest " +
s"options('FILEHEADER'='intField,stringField,timestampField,decimalField')")
}
test("test to revert new added columns on failure") {
intercept[RuntimeException] {
hiveClient.runSqlHive("set hive.security.authorization.enabled=true")
sql(
"Alter table reverttest add columns(newField string) TBLPROPERTIES" +
"('DEFAULT.VALUE.newField'='def')")
hiveClient.runSqlHive("set hive.security.authorization.enabled=false")
intercept[AnalysisException] {
sql("select newField from reverttest")
}
}
}
test("test to revert table name on failure") {
intercept[RuntimeException] {
new File(TestQueryExecutor.warehouse + "/reverttest_fail").mkdir()
sql("alter table reverttest rename to reverttest_fail")
new File(TestQueryExecutor.warehouse + "/reverttest_fail").delete()
}
val result = sql("select * from reverttest").count()
assert(result.equals(1L))
}
test("test to revert drop columns on failure") {
intercept[Exception] {
hiveClient.runSqlHive("set hive.security.authorization.enabled=true")
sql("Alter table reverttest drop columns(decimalField)")
hiveClient.runSqlHive("set hive.security.authorization.enabled=false")
}
assert(sql("select decimalField from reverttest").count().equals(1L))
}
test("test to revert changed datatype on failure") {
intercept[Exception] {
hiveClient.runSqlHive("set hive.security.authorization.enabled=true")
sql("Alter table reverttest change intField intfield bigint")
hiveClient.runSqlHive("set hive.security.authorization.enabled=false")
}
assert(
sql("select intfield from reverttest").schema.fields.apply(0).dataType.simpleString == "int")
}
test("test to check if dictionary files are deleted for new column if query fails") {
intercept[RuntimeException] {
hiveClient.runSqlHive("set hive.security.authorization.enabled=true")
sql(
"Alter table reverttest add columns(newField string) TBLPROPERTIES" +
"('DEFAULT.VALUE.newField'='def')")
hiveClient.runSqlHive("set hive.security.authorization.enabled=false")
intercept[AnalysisException] {
sql("select newField from reverttest")
}
val carbonTable = CarbonMetadata.getInstance.getCarbonTable("default_reverttest")
assert(new File(carbonTable.getMetaDataFilepath).listFiles().length < 6)
}
}
test("test to check if exception during rename table does not throws table not found exception") {
val locks = AlterTableUtil
.validateTableAndAcquireLock("default", "reverttest", List("meta.lock"))(sqlContext
.sparkSession)
val exception = intercept[RuntimeException] {
sql("alter table reverttest rename to revert")
}
AlterTableUtil.releaseLocks(locks)
assert(exception.getMessage == "Alter table rename table operation failed: Table is locked for updation. Please try after some time")
}
override def afterAll() {
hiveClient.runSqlHive("set hive.security.authorization.enabled=false")
sql("drop table if exists reverttest")
}
}
|
shivangi1015/incubator-carbondata
|
integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/AlterTableRevertTestCase.scala
|
Scala
|
apache-2.0
| 4,710 |
package actors
import models.Recipe
import akka.actor._
import play.api.libs.json._
class RecipesActor extends Actor {
def receive = {
case ListRecipes() =>
sender() ! Json.toJson(Recipe.list)
}
implicit val recipeWrites = new Writes[Recipe] {
def writes(recipe: Recipe) = Json.obj(
"id" -> recipe.id,
"name" -> recipe.name,
"category" -> recipe.category,
"ingredients" -> recipe.ingredients,
"directions" -> recipe.directions,
"rating" -> recipe.rating,
"imgUrl" -> recipe.imgUrl
)
}
}
case class ListRecipes()
|
MatthieuNICOLAS/play-with-dart
|
app/actors/RecipesActor.scala
|
Scala
|
gpl-3.0
| 620 |
package com.sksamuel.elastic4s.requests.get
import java.net.URLEncoder
import com.fasterxml.jackson.databind.JsonNode
import com.sksamuel.elastic4s.requests.common.FetchSourceContextQueryParameterFn
import com.sksamuel.elastic4s.requests.indexes.VersionTypeHttpString
import com.sksamuel.elastic4s.{ElasticError, ElasticRequest, Handler, HitReader, HttpEntity, HttpResponse, ResponseHandler}
import com.sksamuel.exts.Logging
import scala.util.Try
case class MultiGetResponse(docs: Seq[GetResponse]) {
def items: Seq[GetResponse] = docs
def size: Int = docs.size
def to[T: HitReader]: IndexedSeq[T] = docs.map(_.to[T]).toIndexedSeq
def safeTo[T: HitReader]: IndexedSeq[Try[T]] = docs.map(_.safeTo[T]).toIndexedSeq
}
trait GetHandlers {
implicit object MultiGetHandler extends Handler[MultiGetRequest, MultiGetResponse] with Logging {
override def responseHandler: ResponseHandler[MultiGetResponse] = new ResponseHandler[MultiGetResponse] {
override def handle(response: HttpResponse): Either[ElasticError, MultiGetResponse] = response.statusCode match {
case 404 | 500 => sys.error(response.toString)
case _ => Right(ResponseHandler.fromResponse[MultiGetResponse](response))
}
}
override def build(request: MultiGetRequest): ElasticRequest = {
val body = MultiGetBodyBuilder(request).string()
val entity = HttpEntity(body, "application/json")
val params = scala.collection.mutable.Map.empty[String, String]
request.preference.foreach(params.put("preference", _))
request.refresh.map(_.toString).foreach(params.put("refresh", _))
request.realtime.map(_.toString).foreach(params.put("realtime", _))
ElasticRequest("GET", "/_mget", params.toMap, entity)
}
}
implicit object GetHandler extends Handler[GetRequest, GetResponse] with Logging {
override def responseHandler: ResponseHandler[GetResponse] = new ResponseHandler[GetResponse] {
override def handle(response: HttpResponse): Either[ElasticError, GetResponse] = {
def bad(status: Int): Left[ElasticError, GetResponse] = {
val node = ResponseHandler.fromResponse[JsonNode](response)
if (node.has("error") && node.get("error").isObject)
Left(ElasticError.parse(response))
else
Left(ElasticError(response.entity.get.content, response.entity.get.content, None, None, None, Nil, None))
}
def good = Right(ResponseHandler.fromResponse[GetResponse](response))
response.statusCode match {
case 200 => good
// 404s are odd, can be different document types
case 404 =>
val node = ResponseHandler.fromResponse[JsonNode](response)
if (node.has("error")) bad(404) else good
case other => bad(other)
}
}
}
override def build(request: GetRequest): ElasticRequest = {
val endpoint =
s"/${URLEncoder.encode(request.index.index, "UTF-8")}/_doc/${URLEncoder.encode(request.id, "UTF-8")}"
val params = scala.collection.mutable.Map.empty[String, String]
request.fetchSource.foreach { context =>
FetchSourceContextQueryParameterFn(context).foreach { case (key, value) => params.put(key, value) }
}
if (request.storedFields.nonEmpty)
params.put("stored_fields", request.storedFields.mkString(","))
request.parent.foreach(params.put("parent", _))
request.routing.foreach(params.put("routing", _))
request.preference.foreach(params.put("preference", _))
request.refresh.map(_.toString).foreach(params.put("refresh", _))
request.realtime.map(_.toString).foreach(params.put("realtime", _))
request.version.map(_.toString).foreach(params.put("version", _))
request.versionType.map(VersionTypeHttpString.apply).foreach(params.put("version_type", _))
ElasticRequest("GET", endpoint, params.toMap)
}
}
}
|
stringbean/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/get/GetHandlers.scala
|
Scala
|
apache-2.0
| 3,975 |
/*
Copyright 2012 Georgia Tech Research Institute
Author: [email protected]
This file is part of org.gtri.util.scala library.
org.gtri.util.scala library is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
org.gtri.util.scala library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with org.gtri.util.scala library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.gtri.util.scala.exelog.sideeffects
import org.apache.log4j
import org.apache.log4j.Level
import org.apache.log4j.Level._
import org.gtri.util.scala.exelog
final class Log4jLog(
val name : String,
val parentName : String
) {
val fqcn = parentName + "." + name
val log = log4j.Logger.getLogger(fqcn)
@inline def tryLog(level : Level, message: => String) {
if(log.isEnabledFor(level)) {
log.log(fqcn, level, message, null)
}
}
@inline def tryLog(level : Level, message: => String, cause : Throwable) {
if(log.isEnabledFor(level)) {
log.log(fqcn, level, message, cause)
}
}
@inline def trace(message: => String) = tryLog(TRACE, message)
@inline def debug(message: => String) = tryLog(DEBUG, message)
@inline def info(message: => String) = tryLog(INFO, message)
@inline def warn(message: => String) = tryLog(WARN, message)
@inline def warn(cause: Throwable) = tryLog(WARN, cause.getMessage, cause)
@inline def warn(message: => String, cause: Throwable) = tryLog(WARN, message, cause)
@inline def error(message: => String) = tryLog(ERROR, message)
@inline def error(cause: Throwable) = tryLog(ERROR, cause.getMessage, cause)
@inline def error(message: => String, cause: Throwable) = tryLog(ERROR, message, cause)
@inline def fatal(message: => String) = tryLog(FATAL,message)
@inline def fatal(cause: Throwable) = tryLog(FATAL, cause.getMessage, cause)
@inline def fatal(message: => String, cause: Throwable) = tryLog(FATAL, message, cause)
}
|
gtri-iead/org.gtri.util.scala
|
exelog/src/main/scala/org/gtri/util/scala/exelog/sideeffects/Log4jLog.scala
|
Scala
|
gpl-3.0
| 2,385 |
package BIDMach.networks.layers
import BIDMat.{Mat,SBMat,CMat,DMat,FMat,IMat,LMat,HMat,GMat,GDMat,GIMat,GLMat,GSMat,GSDMat,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMach.datasources._
import BIDMach.updaters._
import BIDMach.mixins._
import BIDMach.models._
import BIDMach._
import edu.berkeley.bid.CPUMACH
import edu.berkeley.bid.CUMACH
import scala.util.hashing.MurmurHash3;
import java.util.HashMap;
import BIDMach.networks._
/**
* Rectifying Linear Unit layer.
*/
class RectLayer(override val net:Net, override val opts:RectNodeOpts = new RectNode) extends Layer(net, opts) {
override def forward = {
val start = toc;
createOutput;
output.asMat <-- max(inputData.asMat, 0f);
clearDeriv;
forwardtime += toc - start;
}
override def backward = {
val start = toc;
if (inputDeriv.asInstanceOf[AnyRef] != null) inputDeriv ~ inputDeriv + (deriv ∘ (inputData > 0f));
backwardtime += toc - start;
}
override def toString = {
"rect@"+Integer.toHexString(hashCode % 0x10000).toString
}
}
trait RectNodeOpts extends NodeOpts {
}
class RectNode extends Node with RectNodeOpts {
def copyTo(opts:RectNode):RectNode = {
super.copyTo(opts);
opts;
}
override def clone:RectNode = {
copyTo(new RectNode);
}
override def create(net:Net):RectLayer = {
RectLayer(net, this);
}
override def toString = {
"rect@"+Integer.toHexString(hashCode % 0x10000).toString
}
}
object RectLayer {
def apply(net:Net) = new RectLayer(net, new RectNode);
def apply(net:Net, opts:RectNodeOpts) = new RectLayer(net, opts);
}
|
jamesjia94/BIDMach
|
src/main/scala/BIDMach/networks/layers/RectLayer.scala
|
Scala
|
bsd-3-clause
| 1,713 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, Properties}
import scala.collection.JavaConverters._
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.spark.Partition
import org.apache.spark.annotation.Stable
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.csv.{CSVHeaderChecker, CSVOptions, UnivocityParser}
import org.apache.spark.sql.catalyst.expressions.ExprUtils
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, FailureSafeParser}
import org.apache.spark.sql.connector.catalog.{CatalogV2Util, SupportsCatalogOptions, SupportsRead}
import org.apache.spark.sql.connector.catalog.TableCapability._
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.datasources.csv._
import org.apache.spark.sql.execution.datasources.jdbc._
import org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, DataSourceV2Utils}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.unsafe.types.UTF8String
/**
* Interface used to load a [[Dataset]] from external storage systems (e.g. file systems,
* key-value stores, etc). Use `SparkSession.read` to access this.
*
* @since 1.4.0
*/
@Stable
class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
/**
* Specifies the input data source format.
*
* @since 1.4.0
*/
def format(source: String): DataFrameReader = {
this.source = source
this
}
/**
* Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema
* automatically from data. By specifying the schema here, the underlying data source can
* skip the schema inference step, and thus speed up data loading.
*
* @since 1.4.0
*/
def schema(schema: StructType): DataFrameReader = {
this.userSpecifiedSchema = Option(schema)
this
}
/**
* Specifies the schema by using the input DDL-formatted string. Some data sources (e.g. JSON) can
* infer the input schema automatically from data. By specifying the schema here, the underlying
* data source can skip the schema inference step, and thus speed up data loading.
*
* {{{
* spark.read.schema("a INT, b STRING, c DOUBLE").csv("test.csv")
* }}}
*
* @since 2.3.0
*/
def schema(schemaString: String): DataFrameReader = {
this.userSpecifiedSchema = Option(StructType.fromDDL(schemaString))
this
}
/**
* Adds an input option for the underlying data source.
*
* All options are maintained in a case-insensitive way in terms of key names.
* If a new option has the same key case-insensitively, it will override the existing option.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a time zone ID
* to be used to parse timestamps in the JSON/CSV datasources or partition values. The following
* formats of `timeZone` are supported:
* <ul>
* <li> Region-based zone ID: It should have the form 'area/city', such as
* 'America/Los_Angeles'.</li>
* <li> Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00'
* or '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</li>
* </ul>
* Other short names like 'CST' are not recommended to use because they can be ambiguous.
* If it isn't set, the current value of the SQL config `spark.sql.session.timeZone` is
* used by default.
* </li>
* </ul>
*
* @since 1.4.0
*/
def option(key: String, value: String): DataFrameReader = {
this.extraOptions = this.extraOptions + (key -> value)
this
}
/**
* Adds an input option for the underlying data source.
*
* All options are maintained in a case-insensitive way in terms of key names.
* If a new option has the same key case-insensitively, it will override the existing option.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* All options are maintained in a case-insensitive way in terms of key names.
* If a new option has the same key case-insensitively, it will override the existing option.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* All options are maintained in a case-insensitive way in terms of key names.
* If a new option has the same key case-insensitively, it will override the existing option.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataFrameReader = option(key, value.toString)
/**
* (Scala-specific) Adds input options for the underlying data source.
*
* All options are maintained in a case-insensitive way in terms of key names.
* If a new option has the same key case-insensitively, it will override the existing option.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a time zone ID
* to be used to parse timestamps in the JSON/CSV datasources or partition values. The following
* formats of `timeZone` are supported:
* <ul>
* <li> Region-based zone ID: It should have the form 'area/city', such as
* 'America/Los_Angeles'.</li>
* <li> Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00'
* or '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</li>
* </ul>
* Other short names like 'CST' are not recommended to use because they can be ambiguous.
* If it isn't set, the current value of the SQL config `spark.sql.session.timeZone` is
* used by default.
* </li>
* </ul>
*
* @since 1.4.0
*/
def options(options: scala.collection.Map[String, String]): DataFrameReader = {
this.extraOptions ++= options
this
}
/**
* Adds input options for the underlying data source.
*
* All options are maintained in a case-insensitive way in terms of key names.
* If a new option has the same key case-insensitively, it will override the existing option.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a time zone ID
* to be used to parse timestamps in the JSON/CSV datasources or partition values. The following
* formats of `timeZone` are supported:
* <ul>
* <li> Region-based zone ID: It should have the form 'area/city', such as
* 'America/Los_Angeles'.</li>
* <li> Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00'
* or '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</li>
* </ul>
* Other short names like 'CST' are not recommended to use because they can be ambiguous.
* If it isn't set, the current value of the SQL config `spark.sql.session.timeZone` is
* used by default.
* </li>
* </ul>
*
* @since 1.4.0
*/
def options(options: java.util.Map[String, String]): DataFrameReader = {
this.options(options.asScala)
this
}
/**
* Loads input in as a `DataFrame`, for data sources that don't require a path (e.g. external
* key-value stores).
*
* @since 1.4.0
*/
def load(): DataFrame = {
load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that require a path (e.g. data backed by
* a local or distributed file system).
*
* @since 1.4.0
*/
def load(path: String): DataFrame = {
// force invocation of `load(...varargs...)`
if (sparkSession.sessionState.conf.legacyPathOptionBehavior) {
option("path", path).load(Seq.empty: _*)
} else {
load(Seq(path): _*)
}
}
/**
* Loads input in as a `DataFrame`, for data sources that support multiple paths.
* Only works if the source is a HadoopFsRelationProvider.
*
* @since 1.6.0
*/
@scala.annotation.varargs
def load(paths: String*): DataFrame = {
if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Hive data source can only be used with tables, you can not " +
"read files of Hive data source directly.")
}
val legacyPathOptionBehavior = sparkSession.sessionState.conf.legacyPathOptionBehavior
if (!legacyPathOptionBehavior &&
(extraOptions.contains("path") || extraOptions.contains("paths")) && paths.nonEmpty) {
throw new AnalysisException("There is a 'path' or 'paths' option set and load() is called " +
"with path parameters. Either remove the path option if it's the same as the path " +
"parameter, or add it to the load() parameter if you do want to read multiple paths. " +
s"To ignore this check, set '${SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key}' to 'true'.")
}
DataSource.lookupDataSourceV2(source, sparkSession.sessionState.conf).map { provider =>
val catalogManager = sparkSession.sessionState.catalogManager
val sessionOptions = DataSourceV2Utils.extractSessionConfigs(
source = provider, conf = sparkSession.sessionState.conf)
val optionsWithPath = if (paths.isEmpty) {
extraOptions
} else if (paths.length == 1) {
extraOptions + ("path" -> paths.head)
} else {
val objectMapper = new ObjectMapper()
extraOptions + ("paths" -> objectMapper.writeValueAsString(paths.toArray))
}
val finalOptions = sessionOptions.filterKeys(!optionsWithPath.contains(_)).toMap ++
optionsWithPath.originalMap
val dsOptions = new CaseInsensitiveStringMap(finalOptions.asJava)
val (table, catalog, ident) = provider match {
case _: SupportsCatalogOptions if userSpecifiedSchema.nonEmpty =>
throw new IllegalArgumentException(
s"$source does not support user specified schema. Please don't specify the schema.")
case hasCatalog: SupportsCatalogOptions =>
val ident = hasCatalog.extractIdentifier(dsOptions)
val catalog = CatalogV2Util.getTableProviderCatalog(
hasCatalog,
catalogManager,
dsOptions)
(catalog.loadTable(ident), Some(catalog), Some(ident))
case _ =>
// TODO: Non-catalog paths for DSV2 are currently not well defined.
val tbl = DataSourceV2Utils.getTableFromProvider(provider, dsOptions, userSpecifiedSchema)
(tbl, None, None)
}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits._
table match {
case _: SupportsRead if table.supports(BATCH_READ) =>
Dataset.ofRows(
sparkSession,
DataSourceV2Relation.create(table, catalog, ident, dsOptions))
case _ => loadV1Source(paths: _*)
}
}.getOrElse(loadV1Source(paths: _*))
}
private def loadV1Source(paths: String*) = {
val legacyPathOptionBehavior = sparkSession.sessionState.conf.legacyPathOptionBehavior
val (finalPaths, finalOptions) = if (!legacyPathOptionBehavior && paths.length == 1) {
(Nil, extraOptions + ("path" -> paths.head))
} else {
(paths, extraOptions)
}
// Code path for data source v1.
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = finalPaths,
userSpecifiedSchema = userSpecifiedSchema,
className = source,
options = finalOptions.originalMap).resolveRelation())
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table and connection properties.
*
* @since 1.4.0
*/
def jdbc(url: String, table: String, properties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// properties should override settings in extraOptions.
this.extraOptions ++= properties.asScala
// explicit url and dbtable should override all
this.extraOptions ++= Seq(JDBCOptions.JDBC_URL -> url, JDBCOptions.JDBC_TABLE_NAME -> table)
format("jdbc").load()
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table. Partitions of the table will be retrieved in parallel based on the parameters
* passed to this function.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`.
* @param table Name of the table in the external database.
* @param columnName the name of a column of numeric, date, or timestamp type
* that will be used for partitioning.
* @param lowerBound the minimum value of `columnName` used to decide partition stride.
* @param upperBound the maximum value of `columnName` used to decide partition stride.
* @param numPartitions the number of partitions. This, along with `lowerBound` (inclusive),
* `upperBound` (exclusive), form partition strides for generated WHERE
* clause expressions used to split the column `columnName` evenly. When
* the input is less than 1, the number is set to 1.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch and "queryTimeout" can be used to wait
* for a Statement object to execute to the given number of seconds.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
columnName: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
connectionProperties: Properties): DataFrame = {
// columnName, lowerBound, upperBound and numPartitions override settings in extraOptions.
this.extraOptions ++= Map(
JDBCOptions.JDBC_PARTITION_COLUMN -> columnName,
JDBCOptions.JDBC_LOWER_BOUND -> lowerBound.toString,
JDBCOptions.JDBC_UPPER_BOUND -> upperBound.toString,
JDBCOptions.JDBC_NUM_PARTITIONS -> numPartitions.toString)
jdbc(url, table, connectionProperties)
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table using connection properties. The `predicates` parameter gives a list
* expressions suitable for inclusion in WHERE clauses; each one defines one partition
* of the `DataFrame`.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`
* @param table Name of the table in the external database.
* @param predicates Condition in the where clause for each partition.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// connectionProperties should override settings in extraOptions.
val params = extraOptions ++ connectionProperties.asScala
val options = new JDBCOptions(url, table, params)
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
JDBCPartition(part, i) : Partition
}
val relation = JDBCRelation(parts, options)(sparkSession)
sparkSession.baseRelationToDataFrame(relation)
}
/**
* Loads a JSON file and returns the results as a `DataFrame`.
*
* See the documentation on the overloaded `json()` method with varargs for more details.
*
* @since 1.4.0
*/
def json(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
json(Seq(path): _*)
}
/**
* Loads JSON files and returns the results as a `DataFrame`.
*
* <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by
* default. For JSON (one record per file), set the `multiLine` option to true.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.
*
* You can set the following JSON-specific options to deal with non-standard JSON files:
* <ul>
* <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li>
* <li>`prefersDecimal` (default `false`): infers all floating-point values as a decimal
* type. If the values do not fit in decimal, then it infers them as doubles.</li>
* <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li>
* <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li>
* <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes
* </li>
* <li>`allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers
* (e.g. 00012)</li>
* <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows accepting quoting of all
* character using backslash quoting mechanism</li>
* <li>`allowUnquotedControlChars` (default `false`): allows JSON Strings to contain unquoted
* control characters (ASCII characters with value less than 32, including tab and line feed
* characters) or not.</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing.
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets malformed fields to `null`. To
* keep corrupt records, an user can set a string type field named
* `columnNameOfCorruptRecord` in an user-defined schema. If a schema does not have the
* field, it drops corrupt records during parsing. When inferring a schema, it implicitly
* adds a `columnNameOfCorruptRecord` field in an output schema.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to timestamp type.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines,
* per file</li>
* <li>`encoding` (by default it is not set): allows to forcibly set one of standard basic
* or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If the encoding
* is not specified and `multiLine` is set to `true`, it will be detected automatically.</li>
* <li>`lineSep` (default covers all `\\r`, `\\r\\n` and `\\n`): defines the line separator
* that should be used for parsing.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of input JSON objects used
* for schema inferring.</li>
* <li>`dropFieldIfAllNull` (default `false`): whether to ignore column of all null values or
* empty array/struct during schema inference.</li>
* <li>`locale` (default is `en-US`): sets a locale as language tag in IETF BCP 47 format.
* For instance, this is used while parsing dates and timestamps.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* <li>`modifiedBefore` (batch only): an optional timestamp to only include files with
* modification times occurring before the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`modifiedAfter` (batch only): an optional timestamp to only include files with
* modification times occurring after the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`recursiveFileLookup`: recursively scan a directory for files. Using this option
* disables partition discovery</li>
* <li>`allowNonNumericNumbers` (default `true`): allows JSON parser to recognize set of
* "Not-a-Number" (NaN) tokens as legal floating number values:
* <ul>
* <li>`+INF` for positive infinity, as well as alias of `+Infinity` and `Infinity`.
* <li>`-INF` for negative infinity), alias `-Infinity`.
* <li>`NaN` for other not-a-numbers, like result of division by zero.
* </ul>
* </li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
* Loads a `JavaRDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON
* Lines text format or newline-delimited JSON</a>) and returns the result as
* a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: JavaRDD[String]): DataFrame = json(jsonRDD.rdd)
/**
* Loads an `RDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: RDD[String]): DataFrame = {
json(sparkSession.createDataset(jsonRDD)(Encoders.STRING))
}
/**
* Loads a `Dataset[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonDataset input Dataset with one JSON object per record
* @since 2.2.0
*/
def json(jsonDataset: Dataset[String]): DataFrame = {
val parsedOptions = new JSONOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val schema = userSpecifiedSchema.getOrElse {
TextInputJsonDataSource.inferFromDataset(jsonDataset, parsedOptions)
}
ExprUtils.verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val createParser = CreateJacksonParser.string _
val parsed = jsonDataset.rdd.mapPartitions { iter =>
val rawParser = new JacksonParser(actualSchema, parsedOptions, allowArrayAsStructs = true)
val parser = new FailureSafeParser[String](
input => rawParser.parse(input, createParser, UTF8String.fromString),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = jsonDataset.isStreaming)
}
/**
* Loads a CSV file and returns the result as a `DataFrame`. See the documentation on the
* other overloaded `csv()` method for more details.
*
* @since 2.0.0
*/
def csv(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
csv(Seq(path): _*)
}
/**
* Loads an `Dataset[String]` storing CSV rows and returns the result as a `DataFrame`.
*
* If the schema is not specified using `schema` function and `inferSchema` option is enabled,
* this function goes through the input once to determine the input schema.
*
* If the schema is not specified using `schema` function and `inferSchema` option is disabled,
* it determines the columns as string types and it reads only the first line to determine the
* names and the number of fields.
*
* If the enforceSchema is set to `false`, only the CSV header in the first line is checked
* to conform specified or inferred schema.
*
* @note if `header` option is set to `true` when calling this API, all lines same with
* the header will be removed if exists.
*
* @param csvDataset input Dataset with one CSV row per record
* @since 2.2.0
*/
def csv(csvDataset: Dataset[String]): DataFrame = {
val parsedOptions: CSVOptions = new CSVOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.csvColumnPruning,
sparkSession.sessionState.conf.sessionLocalTimeZone)
val filteredLines: Dataset[String] =
CSVUtils.filterCommentAndEmpty(csvDataset, parsedOptions)
// For performance, short-circuit the collection of the first line when it won't be used:
// - TextInputCSVDataSource - Only uses firstLine to infer an unspecified schema
// - CSVHeaderChecker - Only uses firstLine to check header, when headerFlag is true
// - CSVUtils - Only uses firstLine to filter headers, when headerFlag is true
// (If the downstream logic grows more complicated, consider refactoring to an approach that
// delegates this decision to the constituent consumers themselves.)
val maybeFirstLine: Option[String] =
if (userSpecifiedSchema.isEmpty || parsedOptions.headerFlag) {
filteredLines.take(1).headOption
} else {
None
}
val schema = userSpecifiedSchema.getOrElse {
TextInputCSVDataSource.inferFromDataset(
sparkSession,
csvDataset,
maybeFirstLine,
parsedOptions)
}
ExprUtils.verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val linesWithoutHeader: RDD[String] = maybeFirstLine.map { firstLine =>
val headerChecker = new CSVHeaderChecker(
actualSchema,
parsedOptions,
source = s"CSV source: $csvDataset")
headerChecker.checkHeaderColumnNames(firstLine)
filteredLines.rdd.mapPartitions(CSVUtils.filterHeaderLine(_, firstLine, parsedOptions))
}.getOrElse(filteredLines.rdd)
val parsed = linesWithoutHeader.mapPartitions { iter =>
val rawParser = new UnivocityParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => rawParser.parse(input),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = csvDataset.isStreaming)
}
/**
* Loads CSV files and returns the result as a `DataFrame`.
*
* This function will go through the input once to determine the input schema if `inferSchema`
* is enabled. To avoid going through the entire data once, disable `inferSchema` option or
* specify the schema explicitly using `schema`.
*
* You can set the following CSV-specific options to deal with CSV files:
* <ul>
* <li>`sep` (default `,`): sets a separator for each field and value. This separator can be one
* or more characters.</li>
* <li>`encoding` (default `UTF-8`): decodes the CSV files by the given encoding
* type.</li>
* <li>`quote` (default `"`): sets a single character used for escaping quoted values where
* the separator can be part of the value. If you would like to turn off quotations, you need to
* set not `null` but an empty string. This behaviour is different from
* `com.databricks.spark.csv`.</li>
* <li>`escape` (default `\\`): sets a single character used for escaping quotes inside
* an already quoted value.</li>
* <li>`charToEscapeQuoteEscaping` (default `escape` or `\\0`): sets a single character used for
* escaping the escape for the quote character. The default value is escape character when escape
* and quote characters are different, `\\0` otherwise.</li>
* <li>`comment` (default empty string): sets a single character used for skipping lines
* beginning with this character. By default, it is disabled.</li>
* <li>`header` (default `false`): uses the first line as names of columns.</li>
* <li>`enforceSchema` (default `true`): If it is set to `true`, the specified or inferred schema
* will be forcibly applied to datasource files, and headers in CSV files will be ignored.
* If the option is set to `false`, the schema will be validated against all headers in CSV files
* in the case when the `header` option is set to `true`. Field names in the schema
* and column names in CSV headers are checked by their positions taking into account
* `spark.sql.caseSensitive`. Though the default value is true, it is recommended to disable
* the `enforceSchema` option to avoid incorrect results.</li>
* <li>`inferSchema` (default `false`): infers the input schema automatically from data. It
* requires one extra pass over the data.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of rows used for schema inferring.</li>
* <li>`ignoreLeadingWhiteSpace` (default `false`): a flag indicating whether or not leading
* whitespaces from values being read should be skipped.</li>
* <li>`ignoreTrailingWhiteSpace` (default `false`): a flag indicating whether or not trailing
* whitespaces from values being read should be skipped.</li>
* <li>`nullValue` (default empty string): sets the string representation of a null value. Since
* 2.0.1, this applies to all supported types including the string type.</li>
* <li>`emptyValue` (default empty string): sets the string representation of an empty value.</li>
* <li>`nanValue` (default `NaN`): sets the string representation of a non-number" value.</li>
* <li>`positiveInf` (default `Inf`): sets the string representation of a positive infinity
* value.</li>
* <li>`negativeInf` (default `-Inf`): sets the string representation of a negative infinity
* value.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to timestamp type.</li>
* <li>`maxColumns` (default `20480`): defines a hard limit of how many columns
* a record can have.</li>
* <li>`maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed
* for any given value being read. By default, it is -1 meaning unlimited length</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing. It supports the following case-insensitive modes. Note that Spark tries
* to parse only required columns in CSV under column pruning. Therefore, corrupt records
* can be different based on required set of fields. This behavior can be controlled by
* `spark.sql.csv.parser.columnPruning.enabled` (enabled by default).
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets malformed fields to `null`.
* To keep corrupt records, an user can set a string type field named
* `columnNameOfCorruptRecord` in an user-defined schema. If a schema does not have
* the field, it drops corrupt records during parsing. A record with less/more tokens
* than schema is not a corrupted record to CSV. When it meets a record having fewer
* tokens than the length of the schema, sets `null` to extra fields. When the record
* has more tokens than the length of the schema, it drops extra tokens.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines.</li>
* <li>`locale` (default is `en-US`): sets a locale as language tag in IETF BCP 47 format.
* For instance, this is used while parsing dates and timestamps.</li>
* <li>`lineSep` (default covers all `\\r`, `\\r\\n` and `\\n`): defines the line separator
* that should be used for parsing. Maximum length is 1 character.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* <li>`modifiedBefore` (batch only): an optional timestamp to only include files with
* modification times occurring before the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`modifiedAfter` (batch only): an optional timestamp to only include files with
* modification times occurring after the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`recursiveFileLookup`: recursively scan a directory for files. Using this option
* disables partition discovery</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def csv(paths: String*): DataFrame = format("csv").load(paths : _*)
/**
* Loads a Parquet file, returning the result as a `DataFrame`. See the documentation
* on the other overloaded `parquet()` method for more details.
*
* @since 2.0.0
*/
def parquet(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
parquet(Seq(path): _*)
}
/**
* Loads a Parquet file, returning the result as a `DataFrame`.
*
* You can set the following Parquet-specific option(s) for reading Parquet files:
* <ul>
* <li>`mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets
* whether we should merge schemas collected from all Parquet part-files. This will override
* `spark.sql.parquet.mergeSchema`.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* <li>`modifiedBefore` (batch only): an optional timestamp to only include files with
* modification times occurring before the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`modifiedAfter` (batch only): an optional timestamp to only include files with
* modification times occurring after the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`recursiveFileLookup`: recursively scan a directory for files. Using this option
* disables partition discovery</li>
* </ul>
*
* @since 1.4.0
*/
@scala.annotation.varargs
def parquet(paths: String*): DataFrame = {
format("parquet").load(paths: _*)
}
/**
* Loads an ORC file and returns the result as a `DataFrame`.
*
* @param path input path
* @since 1.5.0
*/
def orc(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
orc(Seq(path): _*)
}
/**
* Loads ORC files and returns the result as a `DataFrame`.
*
* You can set the following ORC-specific option(s) for reading ORC files:
* <ul>
* <li>`mergeSchema` (default is the value specified in `spark.sql.orc.mergeSchema`): sets whether
* we should merge schemas collected from all ORC part-files. This will override
* `spark.sql.orc.mergeSchema`.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* <li>`modifiedBefore` (batch only): an optional timestamp to only include files with
* modification times occurring before the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`modifiedAfter` (batch only): an optional timestamp to only include files with
* modification times occurring after the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`recursiveFileLookup`: recursively scan a directory for files. Using this option
* disables partition discovery</li>
* </ul>
*
* @param paths input paths
* @since 2.0.0
*/
@scala.annotation.varargs
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)
/**
* Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch
* reading and the returned DataFrame is the batch scan query plan of this table. If it's a view,
* the returned DataFrame is simply the query plan of the view, which can either be a batch or
* streaming query plan.
*
* @param tableName is either a qualified or unqualified name that designates a table or view.
* If a database is specified, it identifies the table/view from the database.
* Otherwise, it first attempts to find a temporary view with the given name
* and then match the table/view from the current database.
* Note that, the global temporary view database is also valid here.
* @since 1.4.0
*/
def table(tableName: String): DataFrame = {
assertNoSpecifiedSchema("table")
val multipartIdentifier =
sparkSession.sessionState.sqlParser.parseMultipartIdentifier(tableName)
Dataset.ofRows(sparkSession, UnresolvedRelation(multipartIdentifier,
new CaseInsensitiveStringMap(extraOptions.toMap.asJava)))
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any. See the documentation on
* the other overloaded `text()` method for more details.
*
* @since 2.0.0
*/
def text(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
text(Seq(path): _*)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any.
* The text files must be encoded as UTF-8.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.text("/path/to/spark/README.md")
*
* // Java:
* spark.read().text("/path/to/spark/README.md")
* }}}
*
* You can set the following text-specific option(s) for reading text files:
* <ul>
* <li>`wholetext` (default `false`): If true, read a file as a single row and not split by "\\n".
* </li>
* <li>`lineSep` (default covers all `\\r`, `\\r\\n` and `\\n`): defines the line separator
* that should be used for parsing.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* <li>`modifiedBefore` (batch only): an optional timestamp to only include files with
* modification times occurring before the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`modifiedAfter` (batch only): an optional timestamp to only include files with
* modification times occurring after the specified Time. The provided timestamp
* must be in the following form: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</li>
* <li>`recursiveFileLookup`: recursively scan a directory for files. Using this option
* disables partition discovery</li>
* </ul>
*
* @param paths input paths
* @since 1.6.0
*/
@scala.annotation.varargs
def text(paths: String*): DataFrame = format("text").load(paths : _*)
/**
* Loads text files and returns a [[Dataset]] of String. See the documentation on the
* other overloaded `textFile()` method for more details.
* @since 2.0.0
*/
def textFile(path: String): Dataset[String] = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
textFile(Seq(path): _*)
}
/**
* Loads text files and returns a [[Dataset]] of String. The underlying schema of the Dataset
* contains a single string column named "value".
* The text files must be encoded as UTF-8.
*
* If the directory structure of the text files contains partitioning information, those are
* ignored in the resulting Dataset. To include partitioning information as columns, use `text`.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.textFile("/path/to/spark/README.md")
*
* // Java:
* spark.read().textFile("/path/to/spark/README.md")
* }}}
*
* You can set the text-specific options as specified in `DataFrameReader.text`.
*
* @param paths input path
* @since 2.0.0
*/
@scala.annotation.varargs
def textFile(paths: String*): Dataset[String] = {
assertNoSpecifiedSchema("textFile")
text(paths : _*).select("value").as[String](sparkSession.implicits.newStringEncoder)
}
/**
* A convenient function for schema validation in APIs.
*/
private def assertNoSpecifiedSchema(operation: String): Unit = {
if (userSpecifiedSchema.nonEmpty) {
throw new AnalysisException(s"User specified schema not supported with `$operation`")
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = sparkSession.sessionState.conf.defaultDataSourceName
private var userSpecifiedSchema: Option[StructType] = None
private var extraOptions = CaseInsensitiveMap[String](Map.empty)
}
|
shuangshuangwang/spark
|
sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
|
Scala
|
apache-2.0
| 46,356 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessactivities
import connectors.DataCacheConnector
import controllers.actions.SuccessfulAuthAction
import models.businessactivities.{BusinessActivities, IdentifySuspiciousActivity}
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import play.api.i18n.Messages
import play.api.libs.json.Json
import play.api.test.Helpers._
import uk.gov.hmrc.http.cache.client.CacheMap
import utils.AmlsSpec
import views.html.businessactivities.identify_suspicious_activity
import scala.concurrent.Future
class IdentifiySuspiciousActivityControllerSpec extends AmlsSpec with MockitoSugar with ScalaFutures{
trait Fixture {
self => val request = addToken(authRequest)
lazy val view = app.injector.instanceOf[identify_suspicious_activity]
val controller = new IdentifySuspiciousActivityController (
dataCacheConnector = mock[DataCacheConnector],
SuccessfulAuthAction,
ds = commonDependencies,
cc = mockMcc,
identify_suspicious_activity = view)
}
"IdentifySuspiciousActivityController" when {
"get is called" must {
"display the Identify suspicious activity page with an empty form" in new Fixture {
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(None))
val result = controller.get()(request)
status(result) must be(OK)
val page = Jsoup.parse(contentAsString(result))
page.select("input[type=radio][name=hasWrittenGuidance][value=true]").hasAttr("checked") must be(false)
page.select("input[type=radio][name=hasWrittenGuidance][value=false]").hasAttr("checked") must be(false)
}
"display the identify suspicious activity page with pre populated data" in new Fixture {
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(Some(BusinessActivities(
identifySuspiciousActivity = Some(IdentifySuspiciousActivity(true))
))))
val result = controller.get()(request)
status(result) must be(OK)
val page = Jsoup.parse(contentAsString(result))
page.select("input[type=radio][name=hasWrittenGuidance][value=true]").hasAttr("checked") must be(true)
}
}
"post is called" must {
"on post with valid data" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"hasWrittenGuidance" -> "true"
)
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(None))
when(controller.dataCacheConnector.save[BusinessActivities](any(), any(), any())(any(), any()))
.thenReturn(Future.successful(CacheMap(BusinessActivities.key, Map("" -> Json.obj()))))
val result = controller.post()(newRequest)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.NCARegisteredController.get(false).url))
}
"on post with invalid data" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"hasWrittenGuidance" -> "grrrrr"
)
val result = controller.post()(newRequest)
status(result) must be(BAD_REQUEST)
val document: Document = Jsoup.parse(contentAsString(result))
document.select("span").html() must include(Messages("error.required.ba.suspicious.activity"))
}
"on post with valid data in edit mode" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"hasWrittenGuidance" -> "true"
)
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(None))
when(controller.dataCacheConnector.save[BusinessActivities](any(), any(), any())(any(), any()))
.thenReturn(Future.successful(CacheMap(BusinessActivities.key, Map("" -> Json.obj()))))
val result = controller.post(true)(newRequest)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.SummaryController.get.url))
}
}
}
}
|
hmrc/amls-frontend
|
test/controllers/businessactivities/IdentifiySuspiciousActivityControllerSpec.scala
|
Scala
|
apache-2.0
| 4,949 |
package alternating
import scala.collection.mutable.ListBuffer
@serializable
class AlternatingAlgorithm {
def generateInitialEdge(item : (Long, Iterable[Long])) : Iterable[(Long, Set[Long])] =
{
var outputList : ListBuffer[(Long, Set[Long])] = new ListBuffer
val it = item._2.toSet.iterator
while (it.hasNext) {
val next = it.next
outputList.prepend((item._1, Set(next)))
}
outputList.toIterable
}
def smallStarMap(item : (Long, Set[Long])) : Iterable[(Long, Set[Long])] =
{
var outputList : ListBuffer[(Long, Set[Long])] = new ListBuffer
val it2 = item._2.iterator
while (it2.hasNext) {
val next = it2.next
if(next <= item._1)
{
outputList.prepend((item._1, Set(next)))
} else
{
outputList.prepend((next, Set(item._1)))
}
}
outputList.toIterable
}
def smallStarReduce(item : (Long, Set[Long])) : Iterable[(Long, Set[Long])] =
{
var outputList : ListBuffer[(Long, Set[Long])] = new ListBuffer
var min = Math.min( item._1, item._2.min)
val it2 = item._2.iterator
// var valueList : Set[Long] = Set()
//
// while (it.hasNext) {
// val next = it.next
// valueList = valueList + next
// if (next < min) {
// min = next
// }
// }
// val it2 = valueList.iterator
while (it2.hasNext) {
val next = it2.next
outputList.prepend((next, Set(min)))
}
outputList.prepend((item._1, Set(min)))
outputList.toIterable
}
def largeStarMapOptimized(item: (Long, Set[Long]), limit : Int) : Iterable[(Long, Set[Long])] =
{
val sizeNeighborhood = item._2.size
var outputList : ListBuffer[(Long, Set[Long])] = new ListBuffer
// if(info.isDefined && info.get.isMarkedAsRootNode)
// {
// outputList.prepend((Option(item._2, item._1), Option.empty))
// }
// else
val it = item._2.iterator
if(item._1 == item._2.min)
{
while(it.hasNext)
{
val next = it.next
outputList.prepend((next, Set(item._1)))
}
}
else if(sizeNeighborhood > limit && item._1 %limit==0)
{
while(it.hasNext)
{
val next = it.next
val hash = item._1 + (next % (limit-1)) + 1
outputList.prepend((item._1, Set(hash)))
outputList.prepend((hash, Set(next)))
}
}
else
{
while(it.hasNext)
{
val next = it.next
outputList.prepend((item._1, Set(next)))
outputList.prepend((next, Set(item._1)))
}
}
outputList.toIterable
}
def reduceMessageByKey(a : Set[Long], b : Set[Long]) : Set[Long] =
{
a++b
}
def largeStarReduceOptimized(item: (Long, Set[Long])) : Iterable[(Long, Set[Long])] =
{
var outputList : ListBuffer[(Long, Set[Long])] = new ListBuffer
var min = Math.min(item._1, item._2.min)
val it2 = item._2.iterator
var valueList : Set[Long] = Set()
// while (it.hasNext) {
// val next = it.next
// valueList = valueList + next
// if (next < min) {
// min = next
// }
// }
//
// val it2 = valueList.iterator
while (it2.hasNext) {
val next = it2.next
if (next > item._1) {
outputList.prepend((next, Set(min)))
}
}
outputList.prepend((item._1, Set(min)))
// outputList.prepend((Option.empty, Option(item._1, new AlternatingMessage(item._1 == min))))
outputList.toIterable
}
def largeStarMap(item: (Long, Set[Long])) : Iterable[(Long, Set[Long])] =
{
val sizeNeighborhood = item._2.toSet.size
var outputList : ListBuffer[(Long, Set[Long])] = new ListBuffer
// if(info.isDefined && info.get.isMarkedAsRootNode)
// {
// outputList.prepend((Option(item._2, item._1), Option.empty))
// }
// else
val it = item._2.iterator
while(it.hasNext)
{
val next = it.next
outputList.prepend((item._1, Set(next)))
outputList.prepend((next, Set(item._1)))
}
outputList.toIterable
}
def largeStarReduce(item : (Long, Set[Long])) : Iterable[(Long, Set[Long])] =
{
var outputList : ListBuffer[(Long, Set[Long])] = new ListBuffer
var min = item._1
val it = item._2.iterator
var valueList : Set[Long] = Set()
while (it.hasNext) {
val next = it.next
valueList = valueList + next
if (next < min) {
min = next
}
}
val it2 = valueList.iterator
while (it2.hasNext) {
val next = it2.next
if (next > item._1) {
outputList.prepend((next, Set(min)))
}
}
outputList.prepend((item._1, Set(min)))
outputList.toIterable
}
}
|
hpclab/cracker
|
src/alternating/AlternatingAlgorithm.scala
|
Scala
|
mit
| 4,484 |
package org.template.recommendation
import io.prediction.controller.PAlgorithm
import io.prediction.controller.Params
import io.prediction.data.storage.BiMap
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import grizzled.slf4j.Logger
case class ALSAlgorithmParams(rank: Int, numIterations: Int, lambda: Double,
seed: Option[Long]) extends Params
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends PAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
def train(data: PreparedData): ALSModel = {
require(!data.ratings.take(1).isEmpty,
s"viewEvents in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.items.take(1).isEmpty,
s"items in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
// create item's String ID to integer index BiMap
val itemStringIntMap = BiMap.stringInt(data.items.keys)
val userStringIntMap = BiMap.stringInt(data.ratings.map(_.user))
// HOWTO: collect Item as Map and convert ID to Int index
val items: Map[Int, Item] = data.items.map { case (id, item) ⇒
(itemStringIntMap(id), item)
}.collectAsMap.toMap
val mllibRatings = data.ratings.map { r =>
// Convert user and item String IDs to Int index for MLlib
val iindex = itemStringIntMap.getOrElse(r.item, -1)
val uindex = userStringIntMap.getOrElse(r.user, -1)
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
(uindex -> iindex) -> 1
}.filter { case ((u, i), v) => (i != -1) && (u != -1) }
.reduceByKey(_ + _) // aggregate all view events of same item
.map { case ((u, i), v) => MLlibRating(u, i, v) }
// MLLib ALS cannot handle empty training data.
require(!mllibRatings.take(1).isEmpty,
s"mllibRatings cannot be empty." +
" Please check if your events contain valid user and item ID.")
// seed for MLlib ALS
val seed = ap.seed.getOrElse(System.nanoTime)
val m = ALS.trainImplicit(
ratings = mllibRatings,
rank = ap.rank,
iterations = ap.numIterations,
lambda = ap.lambda,
blocks = -1,
alpha = 1.0,
seed = seed)
new ALSModel(productFeatures = m.productFeatures,
itemStringIntMap = itemStringIntMap, items = items)
}
def predict(model: ALSModel, query: Query): PredictedResult = {
val queryFeatures =
model.items.keys.flatMap(model.productFeatures.lookup(_).headOption)
val indexScores = if (queryFeatures.isEmpty) {
logger.info(s"No productFeatures found for query ${query}.")
Array[(Int, Double)]()
} else {
model.productFeatures.mapValues { f ⇒
queryFeatures.map(cosine(_, f)).reduce(_ + _)
}.filter(_._2 > 0) // keep items with score > 0
.collect()
}
// HOWTO: filter predicted results by query.
val filteredScores = filterItems(indexScores, model.items, query)
implicit val ord = Ordering.by[(Int, Double), Double](_._2)
val topScores = getTopN(filteredScores, query.num).toArray
val itemScores = topScores.map { case (i, s) ⇒
new ItemScore(item = model.itemIntStringMap(i), score = s,
creationYear = model.items(i).creationYear)
}
new PredictedResult(itemScores)
}
private def getTopN[T](s: Seq[T], n: Int)
(implicit ord: Ordering[T]): Iterable[T] = {
var result = List.empty[T]
for (x <- s) {
if (result.size < n)
result = x :: result
else {
val min = result.min
if (ord.compare(x, min) < 0) {
result = x :: result.filter(_ != min)
}
}
}
result.sorted.reverse
}
private def cosine(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var n1: Double = 0
var n2: Double = 0
var d: Double = 0
while (i < size) {
n1 += v1(i) * v1(i)
n2 += v2(i) * v2(i)
d += v1(i) * v2(i)
i += 1
}
val n1n2 = (math.sqrt(n1) * math.sqrt(n2))
if (n1n2 == 0) 0 else (d / n1n2)
}
// HOWTO: actual filter of predicted movie results.
// filter selects all movies
// that were made after the year specified in the query
private def filterItems(selectedScores: Array[(Int, Double)],
items: Map[Int, Item],
query: Query) =
selectedScores.view.filter { case (iId, _) ⇒
items(iId).creationYear.map(icr ⇒ query.creationYear.forall(icr >= _))
.getOrElse(true)
}
}
|
wenaz/PredictionIO
|
examples/scala-parallel-recommendation/custom-query/src/main/scala/ALSAlgorithm.scala
|
Scala
|
apache-2.0
| 4,962 |
package rml.args.conversions.map
import rml.args.arg.input.ListArg
import rml.args.arg.input.PositionalArg
import rml.args.arg.input.SingleArg
import rml.args.exceptions.IllegalArgException
import rml.args.arg.restriction.SetRestriction
import rml.args.arg.InputArg
import rml.args.arg.input.JoinArg
import rml.args.arg.input.ListArg0
import rml.args.arg.restriction.FixRestriction
import rml.args.arg.restriction.Restricted
import rml.args.arg.InputCmdMapper
trait ToEnum[E <: Enumeration] {
val enum: E
def mapToType(value: String): E#Value = try {
enum.withName(value)
} catch {
case nsee: NoSuchElementException => throw new IllegalArgException("", value, (0 until enum.maxId).toList.map(enum(_).toString))
}
lazy val getRestriction = FixRestriction(enum.values.map(_.toString))
lazy val baseType: String = "Enumeration " + enum.getClass().getName()
override def toString = enum.values.mkString("[", ", ", "]")
}
object AnEnum{ def apply[E <: Enumeration](key: String, e: E) = InputArg(key, new SingleArg[E#Value] with ToEnum[E] { val enum = e } ) }
object JEnum { def apply[E <: Enumeration](key: String, e: E) = InputArg(key, new JoinArg[E#Value] with ToEnum[E] { val enum = e } ) }
object Enums { def apply[E <: Enumeration](key: String, e: E) = InputArg(key, new ListArg[E#Value] with ToEnum[E] { val enum = e } ) }
object Enums0{ def apply[E <: Enumeration](key: String, e: E) = InputArg(key, new ListArg0[E#Value] with ToEnum[E] { val enum = e } ) }
object PEnum { def apply[E <: Enumeration](pos: Int, e: E) = InputArg("-", new ToEnum[E] with PositionalArg[E#Value]{ val enum = e ; val position = pos }) }
|
rml/scala_args
|
src/main/scala/rml/args/conversions/map/ToEnum.scala
|
Scala
|
gpl-3.0
| 1,671 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.codegenerator.opencl.hyperkernels
import cogx.compiler.codegenerator.opencl.fragments.{AddressingMode, HyperKernel}
import cogx.platform.types.{VirtualFieldRegister, FieldType}
import cogx.compiler.codegenerator.opencl.OpcodeToFunction
import cogx.compiler.codegenerator.common.FieldPolicies._
import cogx.compiler.parser.op.ComplexBinaryRealConstOp
/** Combines both real and imaginary components of each complex element of a
* complex field with a real constant.
*
* @author Greg Snider and Dick Carter
*
* @param in The input virtual field register driving this kernel.
* @param op The opcode for this op.
* @param resultType The FieldType of the result of this kernel.
* @param addressMode The addressing mode of this kernel.
*/
private[cogx]
class ComplexBinaryRealConstHyperKernel private (in: Array[VirtualFieldRegister],
op: ComplexBinaryRealConstOp,
resultType: FieldType,
addressMode: AddressingMode)
extends HyperKernel(op, in, resultType, addressMode)
{
addCode(" @out0 = " + OpcodeToFunction(op) + "(read(@in0), " +
op.const + "f);")
// debugCompile
}
/** Factory object for creating kernels of this type.
*/
private[cogx]
object ComplexBinaryRealConstHyperKernel extends HyperHelper {
/**
* Create a HyperKernel that combines both real and imaginary components of
* each complex element of a complex field with a real constant.
*
* @param in The input virtual field register driving this kernel.
* @param operation The opcode for this operation.
* @param resultType The FieldType of the result of this kernel.
* @return Synthesized hyperkernel for the operation.
*/
def apply(in: Array[VirtualFieldRegister], operation: ComplexBinaryRealConstOp, resultType: FieldType): HyperKernel = {
require(in.length == 1)
val in0Type = in(0).fieldType
require(isComplexField(in0Type))
require(resultType == in0Type)
val addressing = bestAddressMode(in, resultType)
new ComplexBinaryRealConstHyperKernel(in, operation, resultType, addressing)
}
}
|
hpe-cct/cct-core
|
src/main/scala/cogx/compiler/codegenerator/opencl/hyperkernels/ComplexBinaryRealConstHyperKernel.scala
|
Scala
|
apache-2.0
| 2,802 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import javax.annotation.concurrent.GuardedBy
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.util.control.NonFatal
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.AMRMClient
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.yarn.ResourceRequestHelper._
import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Python._
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.resource.ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID
import org.apache.spark.rpc.{RpcCallContext, RpcEndpointRef}
import org.apache.spark.scheduler.{ExecutorExited, ExecutorLossReason}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RemoveExecutor
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RetrieveLastAllocatedExecutorId
import org.apache.spark.scheduler.cluster.SchedulerBackendUtils
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils}
/**
* YarnAllocator is charged with requesting containers from the YARN ResourceManager and deciding
* what to do with containers when YARN fulfills these requests.
*
* This class makes use of YARN's AMRMClient APIs. We interact with the AMRMClient in three ways:
* * Making our resource needs known, which updates local bookkeeping about containers requested.
* * Calling "allocate", which syncs our local container requests with the RM, and returns any
* containers that YARN has granted to us. This also functions as a heartbeat.
* * Processing the containers granted to us to possibly launch executors inside of them.
*
* The public methods of this class are thread-safe. All methods that mutate state are
* synchronized.
*/
private[yarn] class YarnAllocator(
driverUrl: String,
driverRef: RpcEndpointRef,
conf: YarnConfiguration,
sparkConf: SparkConf,
amClient: AMRMClient[ContainerRequest],
appAttemptId: ApplicationAttemptId,
securityMgr: SecurityManager,
localResources: Map[String, LocalResource],
resolver: SparkRackResolver,
clock: Clock = new SystemClock)
extends Logging {
import YarnAllocator._
// Visible for testing.
@GuardedBy("this")
val allocatedHostToContainersMapPerRPId =
new HashMap[Int, HashMap[String, collection.mutable.Set[ContainerId]]]
@GuardedBy("this")
val allocatedContainerToHostMap = new HashMap[ContainerId, String]
// Containers that we no longer care about. We've either already told the RM to release them or
// will on the next heartbeat. Containers get removed from this map after the RM tells us they've
// completed.
@GuardedBy("this")
private val releasedContainers = collection.mutable.HashSet[ContainerId]()
@GuardedBy("this")
private val runningExecutorsPerResourceProfileId = new HashMap[Int, mutable.Set[String]]()
@GuardedBy("this")
private val numExecutorsStartingPerResourceProfileId = new HashMap[Int, AtomicInteger]
@GuardedBy("this")
private val targetNumExecutorsPerResourceProfileId = new mutable.HashMap[Int, Int]
// Executor loss reason requests that are pending - maps from executor ID for inquiry to a
// list of requesters that should be responded to once we find out why the given executor
// was lost.
@GuardedBy("this")
private val pendingLossReasonRequests = new HashMap[String, mutable.Buffer[RpcCallContext]]
// Maintain loss reasons for already released executors, it will be added when executor loss
// reason is got from AM-RM call, and be removed after querying this loss reason.
@GuardedBy("this")
private val releasedExecutorLossReasons = new HashMap[String, ExecutorLossReason]
// Keep track of which container is running which executor to remove the executors later
// Visible for testing.
@GuardedBy("this")
private[yarn] val executorIdToContainer = new HashMap[String, Container]
@GuardedBy("this")
private var numUnexpectedContainerRelease = 0L
@GuardedBy("this")
private val containerIdToExecutorIdAndResourceProfileId = new HashMap[ContainerId, (String, Int)]
// Use a ConcurrentHashMap because this is used in matchContainerToRequest, which is called
// from the rack resolver thread where synchronize(this) on this would cause a deadlock.
@GuardedBy("ConcurrentHashMap")
private[yarn] val rpIdToYarnResource = new ConcurrentHashMap[Int, Resource]()
// note currently we don't remove ResourceProfiles
@GuardedBy("this")
private[yarn] val rpIdToResourceProfile = new mutable.HashMap[Int, ResourceProfile]
// A map of ResourceProfile id to a map of preferred hostname and possible
// task numbers running on it.
@GuardedBy("this")
private var hostToLocalTaskCountPerResourceProfileId: Map[Int, Map[String, Int]] =
Map(DEFAULT_RESOURCE_PROFILE_ID -> Map.empty)
// ResourceProfile Id to number of tasks that have locality preferences in active stages
@GuardedBy("this")
private[yarn] var numLocalityAwareTasksPerResourceProfileId: Map[Int, Int] =
Map(DEFAULT_RESOURCE_PROFILE_ID -> 0)
/**
* Used to generate a unique ID per executor
*
* Init `executorIdCounter`. when AM restart, `executorIdCounter` will reset to 0. Then
* the id of new executor will start from 1, this will conflict with the executor has
* already created before. So, we should initialize the `executorIdCounter` by getting
* the max executorId from driver.
*
* And this situation of executorId conflict is just in yarn client mode, so this is an issue
* in yarn client mode. For more details, can check in jira.
*
* @see SPARK-12864
*/
@GuardedBy("this")
private var executorIdCounter: Int =
driverRef.askSync[Int](RetrieveLastAllocatedExecutorId)
private[spark] val failureTracker = new FailureTracker(sparkConf, clock)
private val allocatorBlacklistTracker =
new YarnAllocatorBlacklistTracker(sparkConf, amClient, failureTracker)
// Executor memory in MiB.
protected val executorMemory = sparkConf.get(EXECUTOR_MEMORY).toInt
// Executor offHeap memory in MiB.
protected val executorOffHeapMemory = YarnSparkHadoopUtil.executorOffHeapMemorySizeAsMb(sparkConf)
// Additional memory overhead.
protected val memoryOverhead: Int = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toInt, MEMORY_OVERHEAD_MIN)).toInt
protected val pysparkWorkerMemory: Int = if (sparkConf.get(IS_PYTHON_APP)) {
sparkConf.get(PYSPARK_EXECUTOR_MEMORY).map(_.toInt).getOrElse(0)
} else {
0
}
// Number of cores per executor for the default profile
protected val defaultExecutorCores = sparkConf.get(EXECUTOR_CORES)
private val executorResourceRequests =
getYarnResourcesAndAmounts(sparkConf, config.YARN_EXECUTOR_RESOURCE_TYPES_PREFIX) ++
getYarnResourcesFromSparkResources(SPARK_EXECUTOR_PREFIX, sparkConf)
// Resource capability requested for each executor for the default profile
private[yarn] val defaultResource: Resource = {
val resource: Resource = Resource.newInstance(
executorMemory + executorOffHeapMemory + memoryOverhead + pysparkWorkerMemory,
defaultExecutorCores)
ResourceRequestHelper.setResourceRequests(executorResourceRequests, resource)
logDebug(s"Created resource capability: $resource")
resource
}
private val launcherPool = ThreadUtils.newDaemonCachedThreadPool(
"ContainerLauncher", sparkConf.get(CONTAINER_LAUNCH_MAX_THREADS))
// For testing
private val launchContainers = sparkConf.getBoolean("spark.yarn.launchContainers", true)
private val labelExpression = sparkConf.get(EXECUTOR_NODE_LABEL_EXPRESSION)
// A container placement strategy based on pending tasks' locality preference
private[yarn] val containerPlacementStrategy =
new LocalityPreferredContainerPlacementStrategy(sparkConf, conf, resolver)
// The default profile is always present so we need to initialize the datastructures keyed by
// ResourceProfile id to ensure its present if things start running before a request for
// executors could add it. This approach is easier then going and special casing everywhere.
private def initDefaultProfile(): Unit = synchronized {
allocatedHostToContainersMapPerRPId(DEFAULT_RESOURCE_PROFILE_ID) =
new HashMap[String, mutable.Set[ContainerId]]()
runningExecutorsPerResourceProfileId.put(DEFAULT_RESOURCE_PROFILE_ID, mutable.HashSet[String]())
numExecutorsStartingPerResourceProfileId(DEFAULT_RESOURCE_PROFILE_ID) = new AtomicInteger(0)
targetNumExecutorsPerResourceProfileId(DEFAULT_RESOURCE_PROFILE_ID) =
SchedulerBackendUtils.getInitialTargetExecutorNumber(sparkConf)
rpIdToYarnResource.put(DEFAULT_RESOURCE_PROFILE_ID, defaultResource)
rpIdToResourceProfile(DEFAULT_RESOURCE_PROFILE_ID) =
ResourceProfile.getOrCreateDefaultProfile(sparkConf)
}
initDefaultProfile()
def getNumExecutorsRunning: Int = synchronized {
runningExecutorsPerResourceProfileId.values.map(_.size).sum
}
def getNumLocalityAwareTasks: Int = synchronized {
numLocalityAwareTasksPerResourceProfileId.values.sum
}
def getNumExecutorsStarting: Int = synchronized {
numExecutorsStartingPerResourceProfileId.values.map(_.get()).sum
}
def getNumReleasedContainers: Int = synchronized {
releasedContainers.size
}
def getNumExecutorsFailed: Int = failureTracker.numFailedExecutors
def isAllNodeBlacklisted: Boolean = allocatorBlacklistTracker.isAllNodeBlacklisted
/**
* A sequence of pending container requests that have not yet been fulfilled.
* ResourceProfile id -> pendingAllocate container request
*/
def getPendingAllocate: Map[Int, Seq[ContainerRequest]] = getPendingAtLocation(ANY_HOST)
def getNumContainersPendingAllocate: Int = synchronized {
getPendingAllocate.values.flatten.size
}
// YARN priorities are such that lower number is higher priority.
// We need to allocate a different priority for each ResourceProfile because YARN
// won't allow different container resource requirements within a Priority.
// We could allocate per Stage to make sure earlier stages get priority but Spark
// always finishes a stage before starting a later one and if we have 2 running in parallel
// the priority doesn't matter.
// We are using the ResourceProfile id as the priority.
private def getContainerPriority(rpId: Int): Priority = {
Priority.newInstance(rpId)
}
// The ResourceProfile id is the priority
private def getResourceProfileIdFromPriority(priority: Priority): Int = {
priority.getPriority()
}
private def getOrUpdateAllocatedHostToContainersMapForRPId(
rpId: Int): HashMap[String, collection.mutable.Set[ContainerId]] = synchronized {
allocatedHostToContainersMapPerRPId.getOrElseUpdate(rpId,
new HashMap[String, mutable.Set[ContainerId]]())
}
private def getOrUpdateRunningExecutorForRPId(rpId: Int): mutable.Set[String] = synchronized {
runningExecutorsPerResourceProfileId.getOrElseUpdate(rpId, mutable.HashSet[String]())
}
private def getOrUpdateNumExecutorsStartingForRPId(rpId: Int): AtomicInteger = synchronized {
numExecutorsStartingPerResourceProfileId.getOrElseUpdate(rpId, new AtomicInteger(0))
}
private def getOrUpdateTargetNumExecutorsForRPId(rpId: Int): Int = synchronized {
targetNumExecutorsPerResourceProfileId.getOrElseUpdate(rpId,
SchedulerBackendUtils.getInitialTargetExecutorNumber(sparkConf))
}
/**
* A sequence of pending container requests at the given location for each ResourceProfile id
* that have not yet been fulfilled.
*/
private def getPendingAtLocation(
location: String): Map[Int, Seq[ContainerRequest]] = synchronized {
val allContainerRequests = new mutable.HashMap[Int, Seq[ContainerRequest]]
rpIdToResourceProfile.keys.map { id =>
val profResource = rpIdToYarnResource.get(id)
val result = amClient.getMatchingRequests(getContainerPriority(id), location, profResource)
.asScala.flatMap(_.asScala)
allContainerRequests(id) = result
}
allContainerRequests.toMap
}
// if a ResourceProfile hasn't been seen yet, create the corresponding YARN Resource for it
private def createYarnResourceForResourceProfile(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Unit = synchronized {
resourceProfileToTotalExecs.foreach { case (rp, num) =>
if (!rpIdToYarnResource.contains(rp.id)) {
// Start with the application or default settings
var heapMem = executorMemory.toLong
// Note we currently don't support off heap memory in ResourceProfile - SPARK-30794
var offHeapMem = executorOffHeapMemory.toLong
var overheadMem = memoryOverhead.toLong
var pysparkMem = pysparkWorkerMemory.toLong
var cores = defaultExecutorCores
val customResources = new mutable.HashMap[String, String]
// track the resource profile if not already there
getOrUpdateRunningExecutorForRPId(rp.id)
logInfo(s"Resource profile ${rp.id} doesn't exist, adding it")
val execResources = rp.executorResources
execResources.foreach { case (r, execReq) =>
r match {
case ResourceProfile.MEMORY =>
heapMem = execReq.amount
case ResourceProfile.OVERHEAD_MEM =>
overheadMem = execReq.amount
case ResourceProfile.PYSPARK_MEM =>
pysparkMem = execReq.amount
case ResourceProfile.CORES =>
cores = execReq.amount.toInt
case "gpu" =>
customResources(YARN_GPU_RESOURCE_CONFIG) = execReq.amount.toString
case "fpga" =>
customResources(YARN_FPGA_RESOURCE_CONFIG) = execReq.amount.toString
case rName =>
customResources(rName) = execReq.amount.toString
}
}
val totalMem = (heapMem + offHeapMem + overheadMem + pysparkMem).toInt
val resource = Resource.newInstance(totalMem, cores)
ResourceRequestHelper.setResourceRequests(customResources.toMap, resource)
logDebug(s"Created resource capability: $resource")
rpIdToYarnResource.putIfAbsent(rp.id, resource)
rpIdToResourceProfile(rp.id) = rp
}
}
}
/**
* Request as many executors from the ResourceManager as needed to reach the desired total. If
* the requested total is smaller than the current number of running executors, no executors will
* be killed.
* @param resourceProfileToTotalExecs total number of containers requested for each
* ResourceProfile
* @param numLocalityAwareTasksPerResourceProfileId number of locality aware tasks for each
* ResourceProfile id to be used as container
* placement hint.
* @param hostToLocalTaskCount a map of preferred hostname to possible task counts for each
* ResourceProfile id to be used as container placement hint.
* @param nodeBlacklist blacklisted nodes, which is passed in to avoid allocating new containers
* on them. It will be used to update the application master's blacklist.
* @return Whether the new requested total is different than the old value.
*/
def requestTotalExecutorsWithPreferredLocalities(
resourceProfileToTotalExecs: Map[ResourceProfile, Int],
numLocalityAwareTasksPerResourceProfileId: Map[Int, Int],
hostToLocalTaskCountPerResourceProfileId: Map[Int, Map[String, Int]],
nodeBlacklist: Set[String]): Boolean = synchronized {
this.numLocalityAwareTasksPerResourceProfileId = numLocalityAwareTasksPerResourceProfileId
this.hostToLocalTaskCountPerResourceProfileId = hostToLocalTaskCountPerResourceProfileId
createYarnResourceForResourceProfile(resourceProfileToTotalExecs)
val res = resourceProfileToTotalExecs.map { case (rp, numExecs) =>
if (numExecs != getOrUpdateTargetNumExecutorsForRPId(rp.id)) {
logInfo(s"Driver requested a total number of $numExecs executor(s) " +
s"for resource profile id: ${rp.id}.")
targetNumExecutorsPerResourceProfileId(rp.id) = numExecs
allocatorBlacklistTracker.setSchedulerBlacklistedNodes(nodeBlacklist)
true
} else {
false
}
}
res.exists(_ == true)
}
/**
* Request that the ResourceManager release the container running the specified executor.
*/
def killExecutor(executorId: String): Unit = synchronized {
executorIdToContainer.get(executorId) match {
case Some(container) if !releasedContainers.contains(container.getId) =>
val (_, rpId) = containerIdToExecutorIdAndResourceProfileId(container.getId)
internalReleaseContainer(container)
getOrUpdateRunningExecutorForRPId(rpId).remove(executorId)
case _ => logWarning(s"Attempted to kill unknown executor $executorId!")
}
}
/**
* Request resources such that, if YARN gives us all we ask for, we'll have a number of containers
* equal to maxExecutors.
*
* Deal with any containers YARN has granted to us by possibly launching executors in them.
*
* This must be synchronized because variables read in this method are mutated by other methods.
*/
def allocateResources(): Unit = synchronized {
updateResourceRequests()
val progressIndicator = 0.1f
// Poll the ResourceManager. This doubles as a heartbeat if there are no pending container
// requests.
val allocateResponse = amClient.allocate(progressIndicator)
val allocatedContainers = allocateResponse.getAllocatedContainers()
allocatorBlacklistTracker.setNumClusterNodes(allocateResponse.getNumClusterNodes)
if (allocatedContainers.size > 0) {
logDebug(("Allocated containers: %d. Current executor count: %d. " +
"Launching executor count: %d. Cluster resources: %s.")
.format(
allocatedContainers.size,
getNumExecutorsRunning,
getNumExecutorsStarting,
allocateResponse.getAvailableResources))
handleAllocatedContainers(allocatedContainers.asScala)
}
val completedContainers = allocateResponse.getCompletedContainersStatuses()
if (completedContainers.size > 0) {
logDebug("Completed %d containers".format(completedContainers.size))
processCompletedContainers(completedContainers.asScala)
logDebug("Finished processing %d completed containers. Current running executor count: %d."
.format(completedContainers.size, getNumExecutorsRunning))
}
}
/**
* Update the set of container requests that we will sync with the RM based on the number of
* executors we have currently running and our target number of executors for each
* ResourceProfile.
*
* Visible for testing.
*/
def updateResourceRequests(): Unit = synchronized {
val pendingAllocatePerResourceProfileId = getPendingAllocate
val missingPerProfile = targetNumExecutorsPerResourceProfileId.map { case (rpId, targetNum) =>
val starting = getOrUpdateNumExecutorsStartingForRPId(rpId).get
val pending = pendingAllocatePerResourceProfileId.getOrElse(rpId, Seq.empty).size
val running = getOrUpdateRunningExecutorForRPId(rpId).size
logDebug(s"Updating resource requests for ResourceProfile id: $rpId, target: " +
s"$targetNum, pending: $pending, running: $running, executorsStarting: $starting")
(rpId, targetNum - pending - running - starting)
}.toMap
missingPerProfile.foreach { case (rpId, missing) =>
val hostToLocalTaskCount =
hostToLocalTaskCountPerResourceProfileId.getOrElse(rpId, Map.empty)
val pendingAllocate = pendingAllocatePerResourceProfileId.getOrElse(rpId, Seq.empty)
val numPendingAllocate = pendingAllocate.size
// Split the pending container request into three groups: locality matched list, locality
// unmatched list and non-locality list. Take the locality matched container request into
// consideration of container placement, treat as allocated containers.
// For locality unmatched and locality free container requests, cancel these container
// requests, since required locality preference has been changed, recalculating using
// container placement strategy.
val (localRequests, staleRequests, anyHostRequests) = splitPendingAllocationsByLocality(
hostToLocalTaskCount, pendingAllocate)
if (missing > 0) {
val resource = rpIdToYarnResource.get(rpId)
if (log.isInfoEnabled()) {
var requestContainerMessage = s"Will request $missing executor container(s) for " +
s" ResourceProfile Id: $rpId, each with " +
s"${resource.getVirtualCores} core(s) and " +
s"${resource.getMemory} MB memory (including $memoryOverhead MB of overhead)"
if (ResourceRequestHelper.isYarnResourceTypesAvailable() &&
ResourceRequestHelper.isYarnCustomResourcesNonEmpty(resource)) {
requestContainerMessage ++= s" with custom resources: " + resource.toString
}
logInfo(requestContainerMessage)
}
// cancel "stale" requests for locations that are no longer needed
staleRequests.foreach { stale =>
amClient.removeContainerRequest(stale)
}
val cancelledContainers = staleRequests.size
if (cancelledContainers > 0) {
logInfo(s"Canceled $cancelledContainers container request(s) (locality no longer needed)")
}
// consider the number of new containers and cancelled stale containers available
val availableContainers = missing + cancelledContainers
// to maximize locality, include requests with no locality preference that can be cancelled
val potentialContainers = availableContainers + anyHostRequests.size
val allocatedHostToContainer = getOrUpdateAllocatedHostToContainersMapForRPId(rpId)
val numLocalityAwareTasks = numLocalityAwareTasksPerResourceProfileId.getOrElse(rpId, 0)
val containerLocalityPreferences = containerPlacementStrategy.localityOfRequestedContainers(
potentialContainers, numLocalityAwareTasks, hostToLocalTaskCount,
allocatedHostToContainer, localRequests, rpIdToResourceProfile(rpId))
val newLocalityRequests = new mutable.ArrayBuffer[ContainerRequest]
containerLocalityPreferences.foreach {
case ContainerLocalityPreferences(nodes, racks) if nodes != null =>
newLocalityRequests += createContainerRequest(resource, nodes, racks, rpId)
case _ =>
}
if (availableContainers >= newLocalityRequests.size) {
// more containers are available than needed for locality, fill in requests for any host
for (i <- 0 until (availableContainers - newLocalityRequests.size)) {
newLocalityRequests += createContainerRequest(resource, null, null, rpId)
}
} else {
val numToCancel = newLocalityRequests.size - availableContainers
// cancel some requests without locality preferences to schedule more local containers
anyHostRequests.slice(0, numToCancel).foreach { nonLocal =>
amClient.removeContainerRequest(nonLocal)
}
if (numToCancel > 0) {
logInfo(s"Canceled $numToCancel unlocalized container requests to " +
s"resubmit with locality")
}
}
newLocalityRequests.foreach { request =>
amClient.addContainerRequest(request)
}
if (log.isInfoEnabled()) {
val (localized, anyHost) = newLocalityRequests.partition(_.getNodes() != null)
if (anyHost.nonEmpty) {
logInfo(s"Submitted ${anyHost.size} unlocalized container requests.")
}
localized.foreach { request =>
logInfo(s"Submitted container request for host ${hostStr(request)}.")
}
}
} else if (numPendingAllocate > 0 && missing < 0) {
val numToCancel = math.min(numPendingAllocate, -missing)
logInfo(s"Canceling requests for $numToCancel executor container(s) to have a new " +
s"desired total ${getOrUpdateTargetNumExecutorsForRPId(rpId)} executors.")
// cancel pending allocate requests by taking locality preference into account
val cancelRequests = (staleRequests ++ anyHostRequests ++ localRequests).take(numToCancel)
cancelRequests.foreach(amClient.removeContainerRequest)
}
}
}
def stop(): Unit = {
// Forcefully shut down the launcher pool, in case this is being called in the middle of
// container allocation. This will prevent queued executors from being started - and
// potentially interrupt active ExecutorRunnable instances too.
launcherPool.shutdownNow()
}
private def hostStr(request: ContainerRequest): String = {
Option(request.getNodes) match {
case Some(nodes) => nodes.asScala.mkString(",")
case None => "Any"
}
}
/**
* Creates a container request, handling the reflection required to use YARN features that were
* added in recent versions.
*/
private def createContainerRequest(
resource: Resource,
nodes: Array[String],
racks: Array[String],
rpId: Int): ContainerRequest = {
new ContainerRequest(resource, nodes, racks, getContainerPriority(rpId),
true, labelExpression.orNull)
}
/**
* Handle containers granted by the RM by launching executors on them.
*
* Due to the way the YARN allocation protocol works, certain healthy race conditions can result
* in YARN granting containers that we no longer need. In this case, we release them.
*
* Visible for testing.
*/
def handleAllocatedContainers(allocatedContainers: Seq[Container]): Unit = {
val containersToUse = new ArrayBuffer[Container](allocatedContainers.size)
// Match incoming requests by host
val remainingAfterHostMatches = new ArrayBuffer[Container]
for (allocatedContainer <- allocatedContainers) {
matchContainerToRequest(allocatedContainer, allocatedContainer.getNodeId.getHost,
containersToUse, remainingAfterHostMatches)
}
// Match remaining by rack. Because YARN's RackResolver swallows thread interrupts
// (see SPARK-27094), which can cause this code to miss interrupts from the AM, use
// a separate thread to perform the operation.
val remainingAfterRackMatches = new ArrayBuffer[Container]
if (remainingAfterHostMatches.nonEmpty) {
var exception: Option[Throwable] = None
val thread = new Thread("spark-rack-resolver") {
override def run(): Unit = {
try {
for (allocatedContainer <- remainingAfterHostMatches) {
val rack = resolver.resolve(allocatedContainer.getNodeId.getHost)
matchContainerToRequest(allocatedContainer, rack, containersToUse,
remainingAfterRackMatches)
}
} catch {
case e: Throwable =>
exception = Some(e)
}
}
}
thread.setDaemon(true)
thread.start()
try {
thread.join()
} catch {
case e: InterruptedException =>
thread.interrupt()
throw e
}
if (exception.isDefined) {
throw exception.get
}
}
// Assign remaining that are neither node-local nor rack-local
val remainingAfterOffRackMatches = new ArrayBuffer[Container]
for (allocatedContainer <- remainingAfterRackMatches) {
matchContainerToRequest(allocatedContainer, ANY_HOST, containersToUse,
remainingAfterOffRackMatches)
}
if (remainingAfterOffRackMatches.nonEmpty) {
logDebug(s"Releasing ${remainingAfterOffRackMatches.size} unneeded containers that were " +
s"allocated to us")
for (container <- remainingAfterOffRackMatches) {
internalReleaseContainer(container)
}
}
runAllocatedContainers(containersToUse)
logInfo("Received %d containers from YARN, launching executors on %d of them."
.format(allocatedContainers.size, containersToUse.size))
}
/**
* Looks for requests for the given location that match the given container allocation. If it
* finds one, removes the request so that it won't be submitted again. Places the container into
* containersToUse or remaining.
*
* @param allocatedContainer container that was given to us by YARN
* @param location resource name, either a node, rack, or *
* @param containersToUse list of containers that will be used
* @param remaining list of containers that will not be used
*/
private def matchContainerToRequest(
allocatedContainer: Container,
location: String,
containersToUse: ArrayBuffer[Container],
remaining: ArrayBuffer[Container]): Unit = {
// Match on the exact resource we requested so there shouldn't be a mismatch,
// we are relying on YARN to return a container with resources no less then we requested.
// If we change this, or starting validating the container, be sure the logic covers SPARK-6050.
val rpId = getResourceProfileIdFromPriority(allocatedContainer.getPriority)
val resourceForRP = rpIdToYarnResource.get(rpId)
logDebug(s"Calling amClient.getMatchingRequests with parameters: " +
s"priority: ${allocatedContainer.getPriority}, " +
s"location: $location, resource: $resourceForRP")
val matchingRequests = amClient.getMatchingRequests(allocatedContainer.getPriority, location,
resourceForRP)
// Match the allocation to a request
if (!matchingRequests.isEmpty) {
val containerRequest = matchingRequests.get(0).iterator.next
logDebug(s"Removing container request via AM client: $containerRequest")
amClient.removeContainerRequest(containerRequest)
containersToUse += allocatedContainer
} else {
remaining += allocatedContainer
}
}
/**
* Launches executors in the allocated containers.
*/
private def runAllocatedContainers(containersToUse: ArrayBuffer[Container]): Unit = synchronized {
for (container <- containersToUse) {
val rpId = getResourceProfileIdFromPriority(container.getPriority)
executorIdCounter += 1
val executorHostname = container.getNodeId.getHost
val containerId = container.getId
val executorId = executorIdCounter.toString
val yarnResourceForRpId = rpIdToYarnResource.get(rpId)
assert(container.getResource.getMemory >= yarnResourceForRpId.getMemory)
logInfo(s"Launching container $containerId on host $executorHostname " +
s"for executor with ID $executorId for ResourceProfile Id $rpId")
def updateInternalState(): Unit = synchronized {
getOrUpdateRunningExecutorForRPId(rpId).add(executorId)
getOrUpdateNumExecutorsStartingForRPId(rpId).decrementAndGet()
executorIdToContainer(executorId) = container
containerIdToExecutorIdAndResourceProfileId(container.getId) = (executorId, rpId)
val localallocatedHostToContainersMap = getOrUpdateAllocatedHostToContainersMapForRPId(rpId)
val containerSet = localallocatedHostToContainersMap.getOrElseUpdate(executorHostname,
new HashSet[ContainerId])
containerSet += containerId
allocatedContainerToHostMap.put(containerId, executorHostname)
}
val rp = rpIdToResourceProfile(rpId)
val containerMem = rp.executorResources.get(ResourceProfile.MEMORY).
map(_.amount.toInt).getOrElse(executorMemory)
val containerCores = rp.getExecutorCores.getOrElse(defaultExecutorCores)
val rpRunningExecs = getOrUpdateRunningExecutorForRPId(rpId).size
if (rpRunningExecs < getOrUpdateTargetNumExecutorsForRPId(rpId)) {
getOrUpdateNumExecutorsStartingForRPId(rpId).incrementAndGet()
if (launchContainers) {
launcherPool.execute(() => {
try {
new ExecutorRunnable(
Some(container),
conf,
sparkConf,
driverUrl,
executorId,
executorHostname,
containerMem,
containerCores,
appAttemptId.getApplicationId.toString,
securityMgr,
localResources,
rp.id
).run()
updateInternalState()
} catch {
case e: Throwable =>
getOrUpdateNumExecutorsStartingForRPId(rpId).decrementAndGet()
if (NonFatal(e)) {
logError(s"Failed to launch executor $executorId on container $containerId", e)
// Assigned container should be released immediately
// to avoid unnecessary resource occupation.
amClient.releaseAssignedContainer(containerId)
} else {
throw e
}
}
})
} else {
// For test only
updateInternalState()
}
} else {
logInfo(("Skip launching executorRunnable as running executors count: %d " +
"reached target executors count: %d.").format(rpRunningExecs,
getOrUpdateTargetNumExecutorsForRPId(rpId)))
}
}
}
// Visible for testing.
private[yarn] def processCompletedContainers(
completedContainers: Seq[ContainerStatus]): Unit = synchronized {
for (completedContainer <- completedContainers) {
val containerId = completedContainer.getContainerId
val (_, rpId) = containerIdToExecutorIdAndResourceProfileId.getOrElse(containerId,
("", DEFAULT_RESOURCE_PROFILE_ID))
val alreadyReleased = releasedContainers.remove(containerId)
val hostOpt = allocatedContainerToHostMap.get(containerId)
val onHostStr = hostOpt.map(host => s" on host: $host").getOrElse("")
val exitReason = if (!alreadyReleased) {
// Decrement the number of executors running. The next iteration of
// the ApplicationMaster's reporting thread will take care of allocating.
containerIdToExecutorIdAndResourceProfileId.get(containerId) match {
case Some((executorId, _)) =>
getOrUpdateRunningExecutorForRPId(rpId).remove(executorId)
case None => logWarning(s"Cannot find executorId for container: ${containerId.toString}")
}
logInfo("Completed container %s%s (state: %s, exit status: %s)".format(
containerId,
onHostStr,
completedContainer.getState,
completedContainer.getExitStatus))
// Hadoop 2.2.X added a ContainerExitStatus we should switch to use
// there are some exit status' we shouldn't necessarily count against us, but for
// now I think its ok as none of the containers are expected to exit.
val exitStatus = completedContainer.getExitStatus
val (exitCausedByApp, containerExitReason) = exitStatus match {
case ContainerExitStatus.SUCCESS =>
(false, s"Executor for container $containerId exited because of a YARN event (e.g., " +
"pre-emption) and not because of an error in the running job.")
case ContainerExitStatus.PREEMPTED =>
// Preemption is not the fault of the running tasks, since YARN preempts containers
// merely to do resource sharing, and tasks that fail due to preempted executors could
// just as easily finish on any other executor. See SPARK-8167.
(false, s"Container ${containerId}${onHostStr} was preempted.")
// Should probably still count memory exceeded exit codes towards task failures
case VMEM_EXCEEDED_EXIT_CODE =>
val vmemExceededPattern = raw"$MEM_REGEX of $MEM_REGEX virtual memory used".r
val diag = vmemExceededPattern.findFirstIn(completedContainer.getDiagnostics)
.map(_.concat(".")).getOrElse("")
val message = "Container killed by YARN for exceeding virtual memory limits. " +
s"$diag Consider boosting ${EXECUTOR_MEMORY_OVERHEAD.key} or boosting " +
s"${YarnConfiguration.NM_VMEM_PMEM_RATIO} or disabling " +
s"${YarnConfiguration.NM_VMEM_CHECK_ENABLED} because of YARN-4714."
(true, message)
case PMEM_EXCEEDED_EXIT_CODE =>
val pmemExceededPattern = raw"$MEM_REGEX of $MEM_REGEX physical memory used".r
val diag = pmemExceededPattern.findFirstIn(completedContainer.getDiagnostics)
.map(_.concat(".")).getOrElse("")
val message = "Container killed by YARN for exceeding physical memory limits. " +
s"$diag Consider boosting ${EXECUTOR_MEMORY_OVERHEAD.key}."
(true, message)
case other_exit_status =>
// SPARK-26269: follow YARN's blacklisting behaviour(see https://github
// .com/apache/hadoop/blob/228156cfd1b474988bc4fedfbf7edddc87db41e3/had
// oop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/ap
// ache/hadoop/yarn/util/Apps.java#L273 for details)
if (NOT_APP_AND_SYSTEM_FAULT_EXIT_STATUS.contains(other_exit_status)) {
(false, s"Container marked as failed: $containerId$onHostStr" +
s". Exit status: ${completedContainer.getExitStatus}" +
s". Diagnostics: ${completedContainer.getDiagnostics}.")
} else {
// completed container from a bad node
allocatorBlacklistTracker.handleResourceAllocationFailure(hostOpt)
(true, s"Container from a bad node: $containerId$onHostStr" +
s". Exit status: ${completedContainer.getExitStatus}" +
s". Diagnostics: ${completedContainer.getDiagnostics}.")
}
}
if (exitCausedByApp) {
logWarning(containerExitReason)
} else {
logInfo(containerExitReason)
}
ExecutorExited(exitStatus, exitCausedByApp, containerExitReason)
} else {
// If we have already released this container, then it must mean
// that the driver has explicitly requested it to be killed
ExecutorExited(completedContainer.getExitStatus, exitCausedByApp = false,
s"Container $containerId exited from explicit termination request.")
}
for {
host <- hostOpt
containerSet <- getOrUpdateAllocatedHostToContainersMapForRPId(rpId).get(host)
} {
containerSet.remove(containerId)
if (containerSet.isEmpty) {
getOrUpdateAllocatedHostToContainersMapForRPId(rpId).remove(host)
} else {
getOrUpdateAllocatedHostToContainersMapForRPId(rpId).update(host, containerSet)
}
allocatedContainerToHostMap.remove(containerId)
}
containerIdToExecutorIdAndResourceProfileId.remove(containerId).foreach { case (eid, _) =>
executorIdToContainer.remove(eid)
pendingLossReasonRequests.remove(eid) match {
case Some(pendingRequests) =>
// Notify application of executor loss reasons so it can decide whether it should abort
pendingRequests.foreach(_.reply(exitReason))
case None =>
// We cannot find executor for pending reasons. This is because completed container
// is processed before querying pending result. We should store it for later query.
// This is usually happened when explicitly killing a container, the result will be
// returned in one AM-RM communication. So query RPC will be later than this completed
// container process.
releasedExecutorLossReasons.put(eid, exitReason)
}
if (!alreadyReleased) {
// The executor could have gone away (like no route to host, node failure, etc)
// Notify backend about the failure of the executor
numUnexpectedContainerRelease += 1
driverRef.send(RemoveExecutor(eid, exitReason))
}
}
}
}
/**
* Register that some RpcCallContext has asked the AM why the executor was lost. Note that
* we can only find the loss reason to send back in the next call to allocateResources().
*/
private[yarn] def enqueueGetLossReasonRequest(
eid: String,
context: RpcCallContext): Unit = synchronized {
if (executorIdToContainer.contains(eid)) {
pendingLossReasonRequests
.getOrElseUpdate(eid, new ArrayBuffer[RpcCallContext]) += context
} else if (releasedExecutorLossReasons.contains(eid)) {
// Executor is already released explicitly before getting the loss reason, so directly send
// the pre-stored lost reason
context.reply(releasedExecutorLossReasons.remove(eid).get)
} else {
logWarning(s"Tried to get the loss reason for non-existent executor $eid")
context.sendFailure(
new SparkException(s"Fail to find loss reason for non-existent executor $eid"))
}
}
private def internalReleaseContainer(container: Container): Unit = synchronized {
releasedContainers.add(container.getId())
amClient.releaseAssignedContainer(container.getId())
}
private[yarn] def getNumUnexpectedContainerRelease: Long = synchronized {
numUnexpectedContainerRelease
}
private[yarn] def getNumPendingLossReasonRequests: Int = synchronized {
pendingLossReasonRequests.size
}
/**
* Split the pending container requests into 3 groups based on current localities of pending
* tasks.
* @param hostToLocalTaskCount a map of preferred hostname to possible task counts to be used as
* container placement hint.
* @param pendingAllocations A sequence of pending allocation container request.
* @return A tuple of 3 sequences, first is a sequence of locality matched container
* requests, second is a sequence of locality unmatched container requests, and third is a
* sequence of locality free container requests.
*/
private def splitPendingAllocationsByLocality(
hostToLocalTaskCount: Map[String, Int],
pendingAllocations: Seq[ContainerRequest]
): (Seq[ContainerRequest], Seq[ContainerRequest], Seq[ContainerRequest]) = {
val localityMatched = ArrayBuffer[ContainerRequest]()
val localityUnMatched = ArrayBuffer[ContainerRequest]()
val localityFree = ArrayBuffer[ContainerRequest]()
val preferredHosts = hostToLocalTaskCount.keySet
pendingAllocations.foreach { cr =>
val nodes = cr.getNodes
if (nodes == null) {
localityFree += cr
} else if (nodes.asScala.toSet.intersect(preferredHosts).nonEmpty) {
localityMatched += cr
} else {
localityUnMatched += cr
}
}
(localityMatched, localityUnMatched, localityFree)
}
}
private object YarnAllocator {
val MEM_REGEX = "[0-9.]+ [KMG]B"
val VMEM_EXCEEDED_EXIT_CODE = -103
val PMEM_EXCEEDED_EXIT_CODE = -104
val NOT_APP_AND_SYSTEM_FAULT_EXIT_STATUS = Set(
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,
ContainerExitStatus.KILLED_BY_APPMASTER,
ContainerExitStatus.KILLED_AFTER_APP_COMPLETION,
ContainerExitStatus.ABORTED,
ContainerExitStatus.DISKS_FAILED
)
}
|
goldmedal/spark
|
resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
|
Scala
|
apache-2.0
| 44,765 |
package sigmastate
import org.scalatest.{PropSpecLike, Tag}
import org.scalactic.source.Position
import sigmastate.eval.Profiler
import spire.syntax.all.cfor
import scala.util.DynamicVariable
trait CrossVersionProps extends PropSpecLike with TestsBase {
val printVersions: Boolean = false
/** Number of times each test property is warmed up (i.e. executed before final execution). */
def perTestWarmUpIters: Int = 0
private[sigmastate] val _warmupProfiler = new DynamicVariable[Option[Profiler]](None)
def warmupProfiler: Option[Profiler] = _warmupProfiler.value
protected def testFun_Run(testName: String, testFun: => Any): Unit = {
def msg = s"""property("$testName")(ActivatedVersion = $activatedVersionInTests; ErgoTree version = $ergoTreeVersionInTests)"""
if (printVersions) println(msg)
try testFun
catch {
case t: Throwable =>
if (!printVersions) {
// wasn't printed, print it now
println(msg)
}
throw t
}
}
override protected def property(testName: String, testTags: Tag*)
(testFun: => Any)
(implicit pos: Position): Unit = {
super.property(testName, testTags:_*) {
// do warmup if necessary
if (perTestWarmUpIters > 0) {
_warmupProfiler.withValue(Some(new Profiler)) {
cfor(0)(_ < perTestWarmUpIters, _ + 1) { _ =>
testFun_Run(testName, testFun)
}
}
System.gc()
Thread.sleep(100) // give it some time to finish warm-up
}
forEachScriptAndErgoTreeVersion(activatedVersions, ergoTreeVersions) {
testFun_Run(testName, testFun)
}
if (okRunTestsWithoutMCLowering) {
_lowerMethodCalls.withValue(false) {
testFun_Run(testName, testFun)
}
}
}
}
}
|
ScorexFoundation/sigmastate-interpreter
|
sigmastate/src/test/scala/sigmastate/CrossVersionProps.scala
|
Scala
|
mit
| 1,860 |
package regolic.lp
import regolic.asts.core.Trees._
import regolic.asts.core.Manip._
import regolic.asts.theories.real.Trees._
import regolic.asts.theories.real.Manip._
import regolic.asts.theories.real.Eval
import regolic.algebra.Rational
import regolic.algebra.Vector
import regolic.algebra.Matrix
import scala.collection.mutable.HashMap
import scala.collection.mutable.ListBuffer
//a standard linear program is a canonical form of a linear program where we try to minimize
//a function c.x under some constraints that can be writen Ax = b with x >= 0
//c, b and A are standard names in that situation, so we make use of them
class StandardLinearProgram(val c: Vector[Rational], val A: Matrix[Rational], val b: Vector[Rational]) {
val nbConstraints = b.size
val nbVariables = c.size
require(A.nbRow == nbConstraints && A.nbCol == nbVariables)
override def toString = {
"min: " + c.toArray.zipWithIndex.map{ case (r, i) => r.toString + "*x_" + i }.mkString(" + ") + "\\nsubject to:\\n" +
A.toRows.zipWithIndex.map{ case (row, index) => row.toArray.zipWithIndex.map{ case (r, i) => r.toString + "*x_" + i }.mkString(" + ") + " = " + b(index) }.mkString("\\n")
}
}
object StandardLinearProgram {
private def coefVar(v: Variable, t: Term): Rational = if(!contains(t, v)) Rational.zero else polynomialForm(t, v) match {
case Add(Mul(coeff :: Pow(v2, Num(r)) :: Nil) :: rest :: Nil) if v2==v && r.isOne => Eval(coeff, Map())
case Add(Mul(coeff :: Pow(v2, Num(r)) :: Nil) :: Nil) if v2==v && r.isZero => Rational.zero
case _ => throw new IllegalArgumentException("not a linear expression: " + t)
}
/*
* this takes a relaxed format with an objective function writen as a Term with variables and some constraints written as:
* ti <= bi,
* ti >= bi or
* ti = bi
* and transform this linear program in standard form.
*
* The returned array indicates the corresponding vector of variables and the Map gives a mapping from some original variables
* that have been transformed to an expression of two fresh variables. if we have (v -> (v1, v2)) in the mapping, this means that
* v has been replaced by v1-v2 in the linear program
*
*/
def fromMin(objectiveFunction: Term, constraints: List[PredicateApplication]): (StandardLinearProgram, Array[Variable], Map[Variable, (Variable, Variable)]) = {
val initialVars: Set[Variable] = (vars(objectiveFunction) ++ constraints.map(vars).flatten)
val (boundedVarsEq, restConstraints) = constraints.partition{
case GreaterEqual(Var(_), Zero()) => true
case LessEqual(Zero(), Var(_)) => true
case _ => false
}
val boundedVars = boundedVarsEq.map{
case GreaterEqual(v@Var(_), _)=> v
case LessEqual(_, v@Var(_)) => v
}
val allVars: ListBuffer[Variable] = new ListBuffer
allVars ++= boundedVars
val varMapping: HashMap[Variable, (Variable, Variable)] = new HashMap
val unboundedVars = initialVars.filterNot(v => boundedVars.contains(v))
unboundedVars.foreach(v => {
val v1 = freshVariable(v)
val v2 = freshVariable(v)
varMapping.put(v, (v1, v2))
allVars += v1
allVars += v2
})
val var2Terms = varMapping.map{case (v, (v1, v2)) => (v, Sub(v1, v2))}.toMap
val boundedConstraints = restConstraints.map(c => substitute(c, var2Terms))
val boundedObjectiveFunction = substitute(objectiveFunction, var2Terms)
val finalConstraints = boundedConstraints.map{
case LessEqual(t, n@Num(_)) => {
val slackVar = freshVar()
allVars += slackVar
Equals(Add(t, slackVar), n)
}
case GreaterEqual(t, Num(r)) => {
val slackVar = freshVar()
allVars += slackVar
Equals(Add(Neg(t), slackVar), Num(-r))
}
case eq@Equals(_, Num(_)) => eq
case _ => throw new IllegalArgumentException
}
val (coefTerms, constraintsTerms) = finalConstraints.map{ case Equals(t, Num(r)) => (t, r) }.unzip
val allVarsArray = allVars.toArray
val matrixCoef: Matrix[Rational] = Matrix(coefTerms.map(t => allVarsArray.map(v => coefVar(v, t))).toArray)
val vectorConstraints: Vector[Rational] = Vector(constraintsTerms.toArray)
val vectorObjective: Vector[Rational] = Vector(allVarsArray.map(v => coefVar(v, boundedObjectiveFunction)))
(new StandardLinearProgram(vectorObjective, matrixCoef, vectorConstraints), allVarsArray, varMapping.toMap)
}
def fromMax(objectiveFunction: Term, constraints: List[PredicateApplication]): (StandardLinearProgram, Array[Variable], Map[Variable, (Variable, Variable)]) = {
val minObjective = Neg(objectiveFunction)
fromMin(minObjective, constraints)
}
}
|
regb/scabolic
|
src/main/scala/regolic/lp/StandardLinearProgram.scala
|
Scala
|
mit
| 4,695 |
package org.littlewings.javaee7.rest
import javax.ws.rs.core.{Context, MediaType, Response, UriInfo}
import javax.ws.rs._
import io.swagger.annotations.{Api, ApiOperation}
import scala.collection.JavaConverters._
object BookResource {
private[rest] val books: scala.collection.mutable.Map[String, Book] =
new java.util.concurrent.ConcurrentHashMap[String, Book]().asScala
}
@Path("book")
@Api("book")
class BookResource {
@GET
@Produces(Array(MediaType.APPLICATION_JSON))
@ApiOperation(value = "find all books", response = classOf[Seq[Book]])
def fildAll: Seq[Book] =
BookResource.books.values.toVector
@GET
@Path("{isbn}")
@Produces(Array(MediaType.APPLICATION_JSON))
@ApiOperation(value = "find book", response = classOf[Book])
def find(@PathParam("isbn") isbn: String): Book =
BookResource.books.get(isbn).orNull
@PUT
@Path("{isbn}")
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MediaType.APPLICATION_JSON))
@ApiOperation("register book")
def register(book: Book, @Context uriInfo: UriInfo): Response = {
BookResource.books.put(book.isbn, book)
Response.created(uriInfo.getRequestUriBuilder.build(book.isbn)).build
}
}
|
kazuhira-r/javaee7-scala-examples
|
resteasy-swagger/src/main/scala/org/littlewings/javaee7/rest/BookResource.scala
|
Scala
|
mit
| 1,199 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.examples.scala
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
/**
* Simple example for demonstrating the use of SQL on a Stream Table in Scala.
*
* <p>Usage: <code>StreamSQLExample --planner <blink|flink></code><br>
*
* <p>This example shows how to:
* - Convert DataStreams to Tables
* - Register a Table under a name
* - Run a StreamSQL query on the registered Table
*
*/
object StreamSQLExample {
// *************************************************************************
// PROGRAM
// *************************************************************************
def main(args: Array[String]): Unit = {
val params = ParameterTool.fromArgs(args)
val planner = if (params.has("planner")) params.get("planner") else "blink"
// set up execution environment
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = if (planner == "blink") { // use blink planner in streaming mode
val settings = EnvironmentSettings.newInstance()
.useBlinkPlanner()
.inStreamingMode()
.build()
StreamTableEnvironment.create(env, settings)
} else if (planner == "flink") { // use flink planner in streaming mode
val settings = EnvironmentSettings.newInstance()
.useOldPlanner()
.inStreamingMode()
.build()
StreamTableEnvironment.create(env, settings)
} else {
System.err.println("The planner is incorrect. Please run 'StreamSQLExample --planner <planner>', " +
"where planner (it is either flink or blink, and the default is blink) indicates whether the " +
"example uses flink planner or blink planner.")
return
}
val orderA: DataStream[Order] = env.fromCollection(Seq(
Order(1L, "beer", 3),
Order(1L, "diaper", 4),
Order(3L, "rubber", 2)))
val orderB: DataStream[Order] = env.fromCollection(Seq(
Order(2L, "pen", 3),
Order(2L, "rubber", 3),
Order(4L, "beer", 1)))
// convert DataStream to Table
val tableA = tEnv.fromDataStream(orderA, $"user", $"product", $"amount")
// register DataStream as Table
tEnv.createTemporaryView("OrderB", orderB, $"user", $"product", $"amount")
// union the two tables
val result = tEnv.sqlQuery(
s"""
|SELECT * FROM $tableA WHERE amount > 2
|UNION ALL
|SELECT * FROM OrderB WHERE amount < 2
""".stripMargin)
result.toAppendStream[Order].print()
env.execute()
}
// *************************************************************************
// USER DATA TYPES
// *************************************************************************
case class Order(user: Long, product: String, amount: Int)
}
|
tzulitai/flink
|
flink-examples/flink-examples-table/src/main/scala/org/apache/flink/table/examples/scala/StreamSQLExample.scala
|
Scala
|
apache-2.0
| 3,802 |
package scalaz.stream
import Cause._
import scala.annotation.tailrec
import scala.collection.SortedMap
import scalaz.{Catchable, Functor, Monad, Monoid, Nondeterminism, \\/, -\\/, ~>}
import scalaz.\\/._
import scalaz.concurrent.{Actor, Strategy, Task}
import scalaz.stream.process1.Await1
/**
* An effectful stream of `O` values. In between emitting values
* a `Process` may request evaluation of `F` effects.
* A `Process[Nothing,A]` is a pure `Process` with no effects.
* A `Process[Task,A]` may have `Task` effects. A `Process`
* halts due to some `Cause`, generally `End` (indicating normal
* termination) or `Error(t)` for some `t: Throwable` indicating
* abnormal termination due to some uncaught error.
*/
sealed trait Process[+F[_], +O]
extends Process1Ops[F,O]
with TeeOps[F,O] {
import scalaz.stream.Process._
import scalaz.stream.Util._
/**
* Generate a `Process` dynamically for each output of this `Process`, and
* sequence these processes using `append`.
*/
final def flatMap[F2[x] >: F[x], O2](f: O => Process[F2, O2]): Process[F2, O2] = {
// Util.debug(s"FMAP $this")
this match {
case Halt(_) => this.asInstanceOf[Process[F2, O2]]
case Emit(os) if os.isEmpty => this.asInstanceOf[Process[F2, O2]]
case Emit(os) => os.tail.foldLeft(Try(f(os.head)))((p, n) => p ++ Try(f(n)))
case aw@Await(_, _) => aw.extend(_ flatMap f)
case ap@Append(p, n) => ap.extend(_ flatMap f)
}
}
/** Transforms the output values of this `Process` using `f`. */
final def map[O2](f: O => O2): Process[F, O2] =
flatMap { o => emit(f(o))}
/**
* If this process halts due to `Cause.End`, runs `p2` after `this`.
* Otherwise halts with whatever caused `this` to `Halt`.
*/
final def append[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] = {
onHalt {
case End => p2
case cause => Halt(cause)
}
}
/** Alias for `append` */
final def ++[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] = append(p2)
/** Alias for `append` */
final def fby[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] = append(p2)
/**
* Run one step of an incremental traversal of this `Process`.
* This function is mostly intended for internal use. As it allows
* a `Process` to be observed and captured during its execution,
* users are responsible for ensuring resource safety.
*/
final def step: HaltOrStep[F, O] = {
def go(cur: Process[F,O], stack: Vector[Cause => Trampoline[Process[F,O]]]) : HaltOrStep[F,O] = {
if (stack.nonEmpty) cur match {
case Halt(cause) => go(Try(stack.head(cause).run), stack.tail)
case Emit(os) if os.isEmpty => go(Try(stack.head(End).run), stack.tail)
case emt@(Emit(os)) => Step(emt,Cont(stack))
case awt@Await(_,_) => Step(awt,Cont(stack))
case Append(h,st) => go(h, st fast_++ stack)
} else cur match {
case hlt@Halt(cause) => hlt
case emt@Emit(os) if (os.isEmpty) => halt0
case emt@Emit(os) => Step(emt,Cont(Vector.empty))
case awt@Await(_,_) => Step(awt,Cont(Vector.empty))
case Append(h,st) => go(h,st)
}
}
go(this,Vector.empty)
}
/**
* `p.suspendStep` propagates exceptions to `p`.
*/
final def suspendStep: Process0[HaltOrStep[F, O]] =
halt onHalt {
case End => emit(step)
case early: EarlyCause => emit(injectCause(early).step)
}
/**
* When this `Process` halts, call `f` to produce the next state.
* Note that this function may be used to swallow or handle errors.
*/
final def onHalt[F2[x] >: F[x], O2 >: O](f: Cause => Process[F2, O2]): Process[F2, O2] = {
val next = (t: Cause) => Trampoline.delay(Try(f(t)))
this match {
case Append(h, stack) => Append(h, stack :+ next)
case emt@Emit(_) => Append(emt, Vector(next))
case awt@Await(_, _) => Append(awt, Vector(next))
case hlt@Halt(rsn) => Append(hlt, Vector(next))
}
}
//////////////////////////////////////////////////////////////////////////////////////
//
// Pipe and Tee
//
/////////////////////////////////////////////////////////////////////////////////////
/**
* Feed the output of this `Process` as input of `p1`. The implementation
* will fuse the two processes, so this process will only generate
* values as they are demanded by `p1`. If `p1` signals termination, `this`
* is killed with same reason giving it an opportunity to cleanup.
*/
final def pipe[O2](p1: Process1[O, O2]): Process[F, O2] =
p1.suspendStep.flatMap({ s1 =>
s1 match {
case s@Step(awt1@Await1(rcv1), cont1) =>
val nextP1 = s.toProcess
this.step match {
case Step(awt@Await(_, _), cont) => awt.extend(p => (p +: cont) pipe nextP1)
case Step(Emit(os), cont) => cont.continue pipe process1.feed(os)(nextP1)
case hlt@Halt(End) => hlt pipe nextP1.disconnect(Kill).swallowKill
case hlt@Halt(rsn: EarlyCause) => hlt pipe nextP1.disconnect(rsn)
}
case Step(emt@Emit(os), cont) =>
// When the pipe is killed from the outside it is killed at the beginning or after emit.
// This ensures that Kill from the outside is not swallowed.
emt onHalt {
case End => this.pipe(cont.continue)
case early => this.pipe(Halt(early) +: cont).causedBy(early)
}
case Halt(rsn) => this.kill onHalt { _ => Halt(rsn) }
}
})
/** Operator alias for `pipe`. */
final def |>[O2](p2: Process1[O, O2]): Process[F, O2] = pipe(p2)
/**
* Use a `Tee` to interleave or combine the outputs of `this` and
* `p2`. This can be used for zipping, interleaving, and so forth.
* Nothing requires that the `Tee` read elements from each
* `Process` in lockstep. It could read fifty elements from one
* side, then two elements from the other, then combine or
* interleave these values in some way, etc.
*
* If at any point the `Tee` awaits on a side that has halted,
* we gracefully kill off the other side, then halt.
*
* If at any point `t` terminates with cause `c`, both sides are killed, and
* the resulting `Process` terminates with `c`.
*/
final def tee[F2[x] >: F[x], O2, O3](p2: Process[F2, O2])(t: Tee[O, O2, O3]): Process[F2, O3] = {
import scalaz.stream.tee.{AwaitL, AwaitR, disconnectL, disconnectR, feedL, feedR}
t.suspendStep flatMap { ts =>
ts match {
case s@Step(AwaitL(_), contT) => this.step match {
case Step(awt@Await(rq, rcv), contL) => awt.extend { p => (p +: contL).tee(p2)(s.toProcess) }
case Step(Emit(os), contL) => contL.continue.tee(p2)(feedL[O, O2, O3](os)(s.toProcess))
case hlt@Halt(End) => hlt.tee(p2)(disconnectL(Kill)(s.toProcess).swallowKill)
case hlt@Halt(rsn: EarlyCause) => hlt.tee(p2)(disconnectL(rsn)(s.toProcess))
}
case s@Step(AwaitR(_), contT) => p2.step match {
case s2: Step[F2, O2]@unchecked =>
(s2.head, s2.next) match {
case (awt: Await[F2, Any, O2]@unchecked, contR) =>
awt.extend { (p: Process[F2, O2]) => this.tee(p +: contR)(s.toProcess) }
case (Emit(o2s), contR) =>
this.tee(contR.continue.asInstanceOf[Process[F2,O2]])(feedR[O, O2, O3](o2s)(s.toProcess))
}
case hlt@Halt(End) => this.tee(hlt)(disconnectR(Kill)(s.toProcess).swallowKill)
case hlt@Halt(rsn : EarlyCause) => this.tee(hlt)(disconnectR(rsn)(s.toProcess))
}
case Step(emt@Emit(o3s), contT) =>
// When the process is killed from the outside it is killed at the beginning or after emit.
// This ensures that Kill from the outside isn't swallowed.
emt onHalt {
case End => this.tee(p2)(contT.continue)
case early => this.tee(p2)(Halt(early) +: contT).causedBy(early)
}
case Halt(rsn) => this.kill onHalt { _ => p2.kill onHalt { _ => Halt(rsn) } }
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
//
// Alphabetically, Other combinators
//
/////////////////////////////////////////////////////////////////////////////////////
/**
* Catch exceptions produced by this `Process`, not including termination by `Continue`, `End`, `Kill`
* and uses `f` to decide whether to resume a second process.
*/
final def attempt[F2[x] >: F[x], O2](
f: Throwable => Process[F2, O2] = (t: Throwable) => emit(t)
): Process[F2, O2 \\/ O] =
this.map(right) onHalt {
case Error(t) => Try(f(t)).map(left)
case rsn => Halt(rsn)
}
/**
* Attached `cause` when this Process terminates. See `Cause.causedBy` for semantics.
*/
final def causedBy(cause: Cause): Process[F, O] =
cause.fold(this)(ec => this.onHalt(c => Halt(c.causedBy(ec))))
/**
* Used when a `Process1`, `Tee`, or `Wye` is terminated by awaiting
* on a branch that is in the halted state or was killed. Such a process
* is given the opportunity to emit any final values. All Awaits are
* converted to terminate with `cause`
*/
final def disconnect(cause: EarlyCause): Process0[O] =
this.step match {
case Step(emt@Emit(_), cont) => emt +: cont.extend(_.disconnect(cause))
case Step(awt@Await(_, rcv), cont) => suspend((Try(rcv(left(cause)).run) +: cont).disconnect(cause))
case hlt@Halt(rsn) => Halt(rsn)
}
/** Ignore all outputs of this `Process`. */
final def drain: Process[F, Nothing] = flatMap(_ => halt)
/**
* Map over this `Process` to produce a stream of `F`-actions,
* then evaluate these actions.
*/
def evalMap[F2[x]>:F[x],O2](f: O => F2[O2]): Process[F2,O2] =
map(f).eval
/** Prepend a sequence of elements to the output of this `Process`. */
def prepend[O2>:O](os:Seq[O2]) : Process[F,O2] = {
if (os.nonEmpty) {
emitAll(os) onHalt {
case End => this
case cause: EarlyCause => this.step match {
case Step(Await(_, rcv), cont) => Try(rcv(left(cause)).run) +: cont
case Step(Emit(_), cont) => Halt(cause) +: cont
case Halt(rsn) => Halt(rsn.causedBy(cause))
}
}
} else this
}
/**
* Map over this `Process` to produce a stream of `F`-actions,
* then evaluate these actions in batches of `bufSize`, allowing
* for nondeterminism in the evaluation order of each action in the
* batch.
*/
def gatherMap[F2[x]>:F[x],O2](bufSize: Int)(f: O => F2[O2])(
implicit F: Nondeterminism[F2]): Process[F2,O2] =
map(f).gather(bufSize)
/**
* Catch some of the exceptions generated by this `Process`, rethrowing any
* not handled by the given `PartialFunction` and stripping out any values
* emitted before the error.
*/
def handle[F2[x]>:F[x],O2](f: PartialFunction[Throwable, Process[F2,O2]])(implicit F: Catchable[F2]): Process[F2, O2] =
attempt(rsn => f.lift(rsn).getOrElse(fail(rsn)))
.dropWhile(_.isRight)
.map(_.fold(identity, _ => sys.error("unpossible")))
/** Returns true, if this process is halted */
final def isHalt: Boolean = this match {
case Halt(_) => true
case _ => false
}
/**
* Skip the first part of the process and pretend that it ended with `early`.
* The first part is the first `Halt` or the first `Emit` or request from the first `Await`.
*/
private[stream] final def injectCause(early: EarlyCause): Process[F, O] = (this match {
// Note: We cannot use `step` in the implementation since we want to inject `early` as soon as possible.
// Eg. Let `q` be `halt ++ halt ++ ... ++ p`. `step` reduces `q` to `p` so if `injectCause` was implemented
// by `step` then `q.injectCause` would be same as `p.injectCause`. But in our current implementation
// `q.injectCause` behaves as `Halt(early) ++ halt ++ ... ++ p` which behaves as `Halt(early)`
// (by the definition of `++` and the fact `early != End`).
case Halt(rsn) => Halt(rsn.causedBy(early))
case Emit(_) => Halt(early)
case Await(_, rcv) => Try(rcv(left(early)).run)
case Append(Halt(rsn), stack) => Append(Halt(rsn.causedBy(early)), stack)
case Append(Emit(_), stack) => Append(Halt(early), stack)
case Append(Await(_, rcv), stack) => Try(rcv(left(early)).run) +: Cont(stack)
})
/**
* Causes this process to be terminated immediately with `Kill` cause,
* giving chance for any cleanup actions to be run
*/
final def kill: Process[F, Nothing] = injectCause(Kill).drain.causedBy(Kill)
/**
* Run `p2` after this `Process` completes normally, or in the event of an error.
* This behaves almost identically to `append`, except that `p1 append p2` will
* not run `p2` if `p1` halts with an `Error` or is killed. Any errors raised by
* `this` are reraised after `p2` completes.
*
* Note that `p2` is made into a finalizer using `asFinalizer`, so we
* can be assured it is run even when this `Process` is being killed
* by a downstream consumer.
*/
final def onComplete[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] =
this.onHalt { cause => p2.asFinalizer.causedBy(cause) }
/**
* Mostly internal use function. Ensures this `Process` is run even
* when being `kill`-ed. Used to ensure resource safety in various
* combinators.
*/
final def asFinalizer: Process[F, O] = {
def mkAwait[F[_], A, O](req: F[A])(rcv: EarlyCause \\/ A => Trampoline[Process[F, O]]) = Await(req, rcv)
step match {
case Step(e@Emit(_), cont) => e onHalt {
case Kill => (halt +: cont).asFinalizer.causedBy(Kill)
case cause => (Halt(cause) +: cont).asFinalizer
}
case Step(Await(req, rcv), cont) => mkAwait(req) {
case -\\/(Kill) => Trampoline.delay(Await(req, rcv).asFinalizer.causedBy(Kill))
case x => rcv(x).map(p => (p +: cont).asFinalizer)
}
case hlt@Halt(_) => hlt
}
}
/**
* If this `Process` completes with an error, call `f` to produce
* the next state. `f` is responsible for reraising the error if that
* is the desired behavior. Since this function is often used for attaching
* resource deallocation logic, the result of `f` is made into a finalizer
* using `asFinalizer`, so we can be assured it is run even when this `Process`
* is being killed by a downstream consumer.
*/
final def onFailure[F2[x] >: F[x], O2 >: O](f: Throwable => Process[F2, O2]): Process[F2, O2] =
this.onHalt {
case err@Error(rsn) => f(rsn).asFinalizer
case other => Halt(other)
}
/**
* Attach supplied process only if process has been killed.
* Since this function is often used for attaching resource
* deallocation logic, the result of `f` is made into a finalizer
* using `asFinalizer`, so we can be assured it is run even when
* this `Process` is being killed by a downstream consumer.
*/
final def onKill[F2[x] >: F[x], O2 >: O](p: => Process[F2, O2]): Process[F2, O2] =
this.onHalt {
case Kill => p.asFinalizer
case other => Halt(other)
}
/**
* Like `attempt`, but accepts a partial function. Unhandled errors are rethrown.
*/
def partialAttempt[F2[x]>:F[x],O2](f: PartialFunction[Throwable, Process[F2,O2]])
(implicit F: Catchable[F2]): Process[F2, O2 \\/ O] =
attempt(err => f.lift(err).getOrElse(fail(err)))
/**
* Run this process until it halts, then run it again and again, as
* long as no errors or `Kill` occur.
*/
final def repeat: Process[F, O] = this.append(this.repeat)
/**
* For anly process terminating with `Kill`, this swallows the `Kill` and replaces it with `End` termination
*/
final def swallowKill: Process[F,O] =
this.onHalt {
case Kill | End => halt
case cause => Halt(cause)
}
/** Translate the request type from `F` to `G`, using the given polymorphic function. */
def translate[G[_]](f: F ~> G): Process[G,O] =
this.suspendStep.flatMap {
case Step(Emit(os),cont) => emitAll(os) +: cont.extend(_.translate(f))
case Step(Await(req,rcv),cont) =>
Await[G,Any,O](f(req), r => {
Trampoline.suspend(rcv(r)).map(_ translate f)
}) +: cont.extend(_.translate(f))
case hlt@Halt(rsn) => hlt
}
/**
* Remove any leading emitted values from this `Process`.
*/
@tailrec
final def trim: Process[F,O] =
this.step match {
case Step(Emit(_), cont) => cont.continue.trim
case _ => this
}
/**
* Removes all emitted elements from the front of this `Process`.
* The second argument returned by this method is guaranteed to be
* an `Await`, `Halt` or an `Append`-- if there are multiple `Emit'`s at the
* front of this process, the sequences are concatenated together.
*
* If this `Process` does not begin with an `Emit`, returns the empty
* sequence along with `this`.
*/
final def unemit:(Seq[O],Process[F,O]) = {
@tailrec
def go(cur: Process[F, O], acc: Vector[O]): (Seq[O], Process[F, O]) = {
cur.step match {
case Step(Emit(os),cont) => go(cont.continue, acc fast_++ os)
case Step(awt, cont) => (acc,awt +: cont)
case Halt(rsn) => (acc,Halt(rsn))
}
}
go(this, Vector())
}
///////////////////////////////////////////
//
// Interpreters, runXXX
//
///////////////////////////////////////////
/**
* Collect the outputs of this `Process[F,O]` into a Monoid `B`, given a `Monad[F]` in
* which we can catch exceptions. This function is not tail recursive and
* relies on the `Monad[F]` to ensure stack safety.
*/
final def runFoldMap[F2[x] >: F[x], B](f: O => B)(implicit F: Monad[F2], C: Catchable[F2], B: Monoid[B]): F2[B] = {
def go(cur: Process[F2, O], acc: B): F2[B] = {
cur.step match {
case s: Step[F2,O]@unchecked =>
(s.head, s.next) match {
case (Emit(os), cont) =>
F.bind(F.point(os.foldLeft(acc)((b, o) => B.append(b, f(o))))) { nacc =>
go(cont.continue.asInstanceOf[Process[F2,O]], nacc)
}
case (awt:Await[F2,Any,O]@unchecked, cont) =>
F.bind(C.attempt(awt.req)) { r =>
go((Try(awt.rcv(EarlyCause(r)).run) +: cont).asInstanceOf[Process[F2,O]]
, acc)
}
}
case Halt(End) => F.point(acc)
case Halt(Kill) => F.point(acc)
case Halt(Error(rsn)) => C.fail(rsn)
}
}
go(this, B.zero)
}
/**
* Collect the outputs of this `Process[F,O]`, given a `Monad[F]` in
* which we can catch exceptions. This function is not tail recursive and
* relies on the `Monad[F]` to ensure stack safety.
*/
final def runLog[F2[x] >: F[x], O2 >: O](implicit F: Monad[F2], C: Catchable[F2]): F2[IndexedSeq[O2]] = {
F.map(runFoldMap[F2, Vector[O2]](Vector(_))(
F, C,
// workaround for performance bug in Vector ++
Monoid.instance[Vector[O2]]((a, b) => a fast_++ b, Vector())
))(_.toIndexedSeq)
}
/** Run this `Process` solely for its final emitted value, if one exists. */
final def runLast[F2[x] >: F[x], O2 >: O](implicit F: Monad[F2], C: Catchable[F2]): F2[Option[O2]] =
F.map(this.last.runLog[F2,O2])(_.lastOption)
/** Run this `Process` solely for its final emitted value, if one exists, using `o2` otherwise. */
final def runLastOr[F2[x] >: F[x], O2 >: O](o2: => O2)(implicit F: Monad[F2], C: Catchable[F2]): F2[O2] =
F.map(this.last.runLog[F2,O2])(_.lastOption.getOrElse(o2))
/** Run this `Process`, purely for its effects. */
final def run[F2[x] >: F[x]](implicit F: Monad[F2], C: Catchable[F2]): F2[Unit] =
F.void(drain.runLog(F, C))
}
object Process extends ProcessInstances {
import scalaz.stream.Util._
//////////////////////////////////////////////////////////////////////////////////////
//
// Algebra
//
/////////////////////////////////////////////////////////////////////////////////////
type Trampoline[+A] = scalaz.Free.Trampoline[A]
val Trampoline = scalaz.Trampoline
/**
* Tags a state of process that has no appended tail, tha means can be Halt, Emit or Await
*/
sealed trait HaltEmitOrAwait[+F[_], +O] extends Process[F, O]
object HaltEmitOrAwait {
def unapply[F[_], O](p: Process[F, O]): Option[HaltEmitOrAwait[F, O]] = p match {
case emit: Emit[O@unchecked] => Some(emit)
case halt: Halt => Some(halt)
case aw: Await[F@unchecked, _, O@unchecked] => Some(aw)
case _ => None
}
}
/**
* Marker trait representing process in Emit or Await state.
* Is useful for more type safety.
*/
sealed trait EmitOrAwait[+F[_], +O] extends Process[F, O]
/**
* The `Halt` constructor instructs the driver
* that the last evaluation of Process completed with
* supplied cause.
*/
case class Halt(cause: Cause) extends HaltEmitOrAwait[Nothing, Nothing] with HaltOrStep[Nothing, Nothing]
/**
* The `Emit` constructor instructs the driver to emit
* the given sequence of values to the output
* and then halt execution with supplied reason.
*
* Instead calling this constructor directly, please use one
* of the following helpers:
*
* Process.emit
* Process.emitAll
*/
case class Emit[+O](seq: Seq[O]) extends HaltEmitOrAwait[Nothing, O] with EmitOrAwait[Nothing, O]
/**
* The `Await` constructor instructs the driver to evaluate
* `req`. If it returns successfully, `recv` is called with result on right side
* to transition to the next state.
*
* In case the req terminates with failure the `Error(failure)` is passed on left side
* giving chance for any fallback action.
*
* In case the process was killed before the request is evaluated `Kill` is passed on left side.
* `Kill` is passed on left side as well as when the request is already in progress, but process was killed.
*
* Note that
*
* Instead of this constructor directly, please use:
*
* Process.await
*
*/
case class Await[+F[_], A, +O](
req: F[A]
, rcv: (EarlyCause \\/ A) => Trampoline[Process[F, O]]
) extends HaltEmitOrAwait[F, O] with EmitOrAwait[F, O] {
/**
* Helper to modify the result of `rcv` parameter of await stack-safely on trampoline.
*/
def extend[F2[x] >: F[x], O2](f: Process[F, O] => Process[F2, O2]): Await[F2, A, O2] =
Await[F2, A, O2](req, r => Trampoline.suspend(rcv(r)).map(f))
}
/**
* The `Append` constructor instructs the driver to continue with
* evaluation of first step found in tail Vector.
*
* Instead of this constructor please use:
*
* Process.append
*/
case class Append[+F[_], +O](
head: HaltEmitOrAwait[F, O]
, stack: Vector[Cause => Trampoline[Process[F, O]]]
) extends Process[F, O] {
/**
* Helper to modify the head and appended processes
*/
def extend[F2[x] >: F[x], O2](f: Process[F, O] => Process[F2, O2]): Process[F2, O2] = {
val ms = stack.map(n => (cause: Cause) => Trampoline.suspend(n(cause)).map(f))
f(head) match {
case HaltEmitOrAwait(p) => Append(p, ms)
case app: Append[F2@unchecked, O2@unchecked] => Append(app.head, app.stack fast_++ ms)
}
}
}
/**
* Marker trait representing next step of process or terminated process in `Halt`
*/
sealed trait HaltOrStep[+F[_], +O]
/**
* Intermediate step of process.
* Used to step within the process to define complex combinators.
*/
case class Step[+F[_], +O](head: EmitOrAwait[F, O], next: Cont[F, O]) extends HaltOrStep[F, O] {
def toProcess : Process[F,O] = Append(head.asInstanceOf[HaltEmitOrAwait[F,O]],next.stack)
}
/**
* Continuation of the process. Represents process _stack_. Used in conjunction with `Step`.
*/
case class Cont[+F[_], +O](stack: Vector[Cause => Trampoline[Process[F, O]]]) {
/**
* Prepends supplied process to this stack
*/
def +:[F2[x] >: F[x], O2 >: O](p: Process[F2, O2]): Process[F2, O2] = prepend(p)
/** alias for +: */
def prepend[F2[x] >: F[x], O2 >: O](p: Process[F2, O2]): Process[F2, O2] = {
if (stack.isEmpty) p
else p match {
case app: Append[F2@unchecked, O2@unchecked] => Append[F2, O2](app.head, app.stack fast_++ stack)
case emt: Emit[O2@unchecked] => Append(emt, stack)
case awt: Await[F2@unchecked, _, O2@unchecked] => Append(awt, stack)
case hlt@Halt(_) => Append(hlt, stack)
}
}
/**
* Converts this stack to process, that is used
* when following process with normal termination.
*/
def continue: Process[F, O] = prepend(halt)
/**
* Applies transformation function `f` to all frames of this stack.
*/
def extend[F2[_], O2](f: Process[F, O] => Process[F2, O2]): Cont[F2, O2] =
Cont(stack.map(tf => (cause: Cause) => Trampoline.suspend(tf(cause).map(f))))
/**
* Returns true, when this continuation is empty, i.e. no more appends to process
*/
def isEmpty : Boolean = stack.isEmpty
}
///////////////////////////////////////////////////////////////////////////////////////
//
// CONSTRUCTORS
//
//////////////////////////////////////////////////////////////////////////////////////
/** Alias for emitAll */
def apply[O](o: O*): Process0[O] = emitAll(o)
/**
* Await the given `F` request and use its result.
* If you need to specify fallback, use `awaitOr`
*/
def await[F[_], A, O](req: F[A])(rcv: A => Process[F, O]): Process[F, O] =
awaitOr(req)(Halt.apply)(rcv)
/**
* Await a request, and if it fails, use `fb` to determine the next state.
* Otherwise, use `rcv` to determine the next state.
*/
def awaitOr[F[_], A, O](req: F[A])(
fb: EarlyCause => Process[F, O]
)(rcv: A => Process[F, O]): Process[F, O] = {
Await(req, (r: EarlyCause \\/ A) => Trampoline.delay(Try(r.fold(ec => fb(ec), a => rcv(a)))))
}
/** The `Process1` which awaits a single input, emits it, then halts normally. */
def await1[I]: Process1[I, I] =
receive1(emit)
/** Like `await1`, but consults `fb` when await fails to receive an `I` */
def await1Or[I](fb: => Process1[I, I]): Process1[I, I] =
receive1Or(fb)(emit)
/** The `Wye` which request from both branches concurrently. */
def awaitBoth[I, I2]: Wye[I, I2, ReceiveY[I, I2]] =
await(Both[I, I2])(emit)
/** The `Tee` which requests from the left branch, emits this value, then halts. */
def awaitL[I]: Tee[I, Any, I] =
await(L[I])(emit)
/** The `Tee` which requests from the right branch, emits this value, then halts. */
def awaitR[I2]: Tee[Any, I2, I2] =
await(R[I2])(emit)
/** The `Process` which emits the single value given, then halts. */
def emit[O](o: O): Process0[O] = Emit(Vector(o))
/** The `Process` which emits the given sequence of values, then halts. */
def emitAll[O](os: Seq[O]): Process0[O] = Emit(os)
/** The `Process` which emits no values and halts immediately with the given exception. */
def fail(rsn: Throwable): Process0[Nothing] = Halt(Error(rsn))
/** `halt` but with precise type. */
private[stream] val halt0: Halt = Halt(End)
/** The `Process` which emits no values and signals normal termination. */
val halt: Process0[Nothing] = halt0
/** Alias for `halt`. */
def empty[F[_],O]: Process[F, O] = halt
/**
* The `Process1` which awaits a single input and passes it to `rcv` to
* determine the next state.
*/
def receive1[I, O](rcv: I => Process1[I, O]): Process1[I, O] =
await(Get[I])(rcv)
/** Like `receive1`, but consults `fb` when it fails to receive an input. */
def receive1Or[I, O](fb: => Process1[I, O])(rcv: I => Process1[I, O]): Process1[I, O] =
awaitOr(Get[I])((rsn: EarlyCause) => fb.causedBy(rsn))(rcv)
///////////////////////////////////////////////////////////////////////////////////////
//
// CONSTRUCTORS -> Helpers
//
//////////////////////////////////////////////////////////////////////////////////////
/** `Writer` based version of `await1`. */
def await1W[A]: Writer1[Nothing, A, A] =
liftW(Process.await1[A])
/** `Writer` based version of `awaitL`. */
def awaitLW[I]: TeeW[Nothing, I, Any, I] =
liftW(Process.awaitL[I])
/** `Writer` based version of `awaitR`. */
def awaitRW[I2]: TeeW[Nothing, Any, I2, I2] =
liftW(Process.awaitR[I2])
/** `Writer` based version of `awaitBoth`. */
def awaitBothW[I, I2]: WyeW[Nothing, I, I2, ReceiveY[I, I2]] =
liftW(Process.awaitBoth[I, I2])
/**
* The infinite `Process`, always emits `a`.
* If for performance reasons it is good to emit `a` in chunks,
* specify size of chunk by `chunkSize` parameter
*/
def constant[A](a: A, chunkSize: Int = 1): Process0[A] = {
lazy val go: Process0[A] =
if (chunkSize.max(1) == 1) emit(a) ++ go
else emitAll(List.fill(chunkSize)(a)) ++ go
go
}
/** A `Writer` which emits one value to the output. */
def emitO[O](o: O): Process0[Nothing \\/ O] =
Process.emit(right(o))
/** A `Writer` which writes the given value. */
def emitW[W](s: W): Process0[W \\/ Nothing] =
Process.emit(left(s))
/** A `Process` which emits `n` repetitions of `a`. */
def fill[A](n: Int)(a: A, chunkSize: Int = 1): Process0[A] = {
val chunkN = chunkSize max 1
val chunk = emitAll(List.fill(chunkN)(a)) // we can reuse this for each step
def go(m: Int): Process0[A] =
if (m >= chunkN) chunk ++ go(m - chunkN)
else if (m <= 0) halt
else emitAll(List.fill(m)(a))
go(n max 0)
}
/**
* Produce a continuous stream from a discrete stream by using the
* most recent value.
*/
def forwardFill[A](p: Process[Task, A])(implicit S: Strategy): Process[Task, A] =
async.toSignal(p).continuous
/**
* An infinite `Process` that repeatedly applies a given function
* to a start value. `start` is the first value emitted, followed
* by `f(start)`, then `f(f(start))`, and so on.
*/
def iterate[A](start: A)(f: A => A): Process0[A] =
emit(start) ++ iterate(f(start))(f)
/**
* Like [[iterate]], but takes an effectful function for producing
* the next state. `start` is the first value emitted.
*/
def iterateEval[F[_], A](start: A)(f: A => F[A]): Process[F, A] =
emit(start) ++ await(f(start))(iterateEval(_)(f))
/** Promote a `Process` to a `Writer` that writes nothing. */
def liftW[F[_], A](p: Process[F, A]): Writer[F, Nothing, A] =
p.map(right)
/**
* Promote a `Process` to a `Writer` that writes and outputs
* all values of `p`.
*/
def logged[F[_], A](p: Process[F, A]): Writer[F, A, A] =
p.flatMap(a => emitAll(Vector(left(a), right(a))))
/** Lazily produce the range `[start, stopExclusive)`. If you want to produce the sequence in one chunk, instead of lazily, use `emitAll(start until stopExclusive)`. */
def range(start: Int, stopExclusive: Int, by: Int = 1): Process0[Int] =
unfold(start)(i => if (i < stopExclusive) Some((i, i + by)) else None)
/**
* Lazily produce a sequence of nonoverlapping ranges, where each range
* contains `size` integers, assuming the upper bound is exclusive.
* Example: `ranges(0, 1000, 10)` results in the pairs
* `(0, 10), (10, 20), (20, 30) ... (990, 1000)`
*
* Note: The last emitted range may be truncated at `stopExclusive`. For
* instance, `ranges(0,5,4)` results in `(0,4), (4,5)`.
*
* @throws IllegalArgumentException if `size` <= 0
*/
def ranges(start: Int, stopExclusive: Int, size: Int): Process0[(Int, Int)] = {
require(size > 0, "size must be > 0, was: " + size)
unfold(start){
lower =>
if (lower < stopExclusive)
Some((lower -> ((lower+size) min stopExclusive), lower+size))
else
None
}
}
/**
* Delay running `p` until `awaken` becomes true for the first time.
* The `awaken` process may be discrete.
*/
def sleepUntil[F[_], A](awaken: Process[F, Boolean])(p: Process[F, A]): Process[F, A] =
awaken.dropWhile(!_).once.flatMap(_ => p)
/**
* A supply of `Long` values, starting with `initial`.
* Each read is guaranteed to return a value which is unique
* across all threads reading from this `supply`.
*/
def supply(initial: Long): Process[Task, Long] = {
import java.util.concurrent.atomic.AtomicLong
val l = new AtomicLong(initial)
repeatEval { Task.delay { l.getAndIncrement }}
}
/** A `Writer` which writes the given value; alias for `emitW`. */
def tell[S](s: S): Process0[S \\/ Nothing] =
emitW(s)
/** Produce a (potentially infinite) source from an unfold. */
def unfold[S, A](s0: S)(f: S => Option[(A, S)]): Process0[A] = {
def go(s: S): Process0[A] =
f(s) match {
case Some((a, sn)) => emit(a) ++ go(sn)
case None => halt
}
suspend(go(s0))
}
/** Like [[unfold]], but takes an effectful function. */
def unfoldEval[F[_], S, A](s0: S)(f: S => F[Option[(A, S)]]): Process[F, A] = {
def go(s: S): Process[F, A] =
await(f(s)) {
case Some((a, sn)) => emit(a) ++ go(sn)
case None => halt
}
suspend(go(s0))
}
//////////////////////////////////////////////////////////////////////////////////////
//
// ENV, Tee, Wye et All
//
/////////////////////////////////////////////////////////////////////////////////////
case class Env[-I, -I2]() {
sealed trait Y[-X] {
def tag: Int
def fold[R](l: => R, r: => R, both: => R): R
}
sealed trait T[-X] extends Y[X]
sealed trait Is[-X] extends T[X]
case object Left extends Is[I] {
def tag = 0
def fold[R](l: => R, r: => R, both: => R): R = l
}
case object Right extends T[I2] {
def tag = 1
def fold[R](l: => R, r: => R, both: => R): R = r
}
case object Both extends Y[ReceiveY[I, I2]] {
def tag = 2
def fold[R](l: => R, r: => R, both: => R): R = both
}
}
private val Left_ = Env[Any, Any]().Left
private val Right_ = Env[Any, Any]().Right
private val Both_ = Env[Any, Any]().Both
def Get[I]: Env[I, Any]#Is[I] = Left_
def L[I]: Env[I, Any]#Is[I] = Left_
def R[I2]: Env[Any, I2]#T[I2] = Right_
def Both[I, I2]: Env[I, I2]#Y[ReceiveY[I, I2]] = Both_
//////////////////////////////////////////////////////////////////////////////////////
//
// SYNTAX
//
/////////////////////////////////////////////////////////////////////////////////////
/** Adds syntax for `Channel`. */
implicit class ChannelSyntax[F[_],I,O](val self: Channel[F,I,O]) extends AnyVal {
/** Transform the input of this `Channel`. */
def contramap[I0](f: I0 => I): Channel[F,I0,O] =
self.map(f andThen _)
/** Transform the output of this `Channel` */
def mapOut[O2](f: O => O2)(implicit F: Functor[F]): Channel[F,I,O2] =
self.map(_ andThen F.lift(f))
}
/** Adds syntax for `Sink`. */
implicit class SinkSyntax[F[_],I](val self: Sink[F,I]) extends AnyVal {
/** Converts `Sink` to `Channel`, that will perform the side effect and echo its input. */
def toChannel(implicit F: Functor[F]): Channel[F,I,I] =
self.map(f => (i: I) => F.map(f(i))(_ => i))
}
implicit class ProcessSyntax[F[_],O](val self: Process[F,O]) extends AnyVal {
/** Feed this `Process` through the given effectful `Channel`. */
def through[F2[x]>:F[x],O2](f: Channel[F2,O,O2]): Process[F2,O2] =
self.zipWith(f)((o,f) => f(o)).eval
/**
* Feed this `Process` through the given effectful `Channel`, signaling
* termination to `f` via `None`. Useful to allow `f` to flush any
* buffered values to the output when it detects termination, see
* [[scalaz.stream.io.bufferedChannel]] combinator.
*/
def throughOption[F2[x]>:F[x],O2](f: Channel[F2,Option[O],O2]): Process[F2,O2] =
self.terminated.through(f)
/** Attaches `Sink` to this `Process` */
def to[F2[x]>:F[x]](f: Sink[F2,O]): Process[F2,Unit] =
through(f)
/** Attach a `Sink` to the output of this `Process` but echo the original. */
def observe[F2[x]>:F[x]](f: Sink[F2,O]): Process[F2,O] =
self.zipWith(f)((o,f) => (o,f(o))).flatMap { case (orig,action) => emit(action).eval.drain ++ emit(orig) }
}
/**
* Provides infix syntax for `eval: Process[F,F[O]] => Process[F,O]`
*/
implicit class EvalProcess[F[_], O](val self: Process[F, F[O]]) extends AnyVal {
/**
* Evaluate the stream of `F` actions produced by this `Process`.
* This sequences `F` actions strictly--the first `F` action will
* be evaluated before work begins on producing the next `F`
* action. To allow for concurrent evaluation, use `sequence`
* or `gather`.
*
* If evaluation of `F` results to `Terminated(cause)`
* the evaluation of the stream is terminated with `cause`
*/
def eval: Process[F, O] = {
self.flatMap(f=> await(f)(emit)).onHalt {
case Error(Terminated(cause)) => Halt(cause)
case cause => Halt(cause)
}
}
/**
* Read chunks of `bufSize` from input, then use `Nondeterminism.gatherUnordered`
* to run all these actions to completion.
*/
def gather(bufSize: Int)(implicit F: Nondeterminism[F]): Process[F,O] =
self.pipe(process1.chunk(bufSize)).map(F.gatherUnordered).eval.flatMap(emitAll)
/**
* Read chunks of `bufSize` from input, then use `Nondeterminism.gather`
* to run all these actions to completion and return elements in order.
*/
def sequence(bufSize: Int)(implicit F: Nondeterminism[F]): Process[F,O] =
self.pipe(process1.chunk(bufSize)).map(F.gather).eval.flatMap(emitAll)
}
/**
* This class provides infix syntax specific to `Process0`.
*/
implicit class Process0Syntax[O](val self: Process0[O]) extends AnyVal {
/** Converts this `Process0` to a `Vector`. */
def toVector: Vector[O] =
self.unemit match {
case (_, Halt(Error(rsn))) => throw rsn
case (os, _) => os.toVector
}
/** Converts this `Process0` to an `IndexedSeq`. */
def toIndexedSeq: IndexedSeq[O] = toVector
/** Converts this `Process0` to a `List`. */
def toList: List[O] = toVector.toList
/** Converts this `Process0` to a `Seq`. */
def toSeq: Seq[O] = toVector
/** Converts this `Process0` to a `Stream`. */
def toStream: Stream[O] = {
def go(p: Process0[O]): Stream[O] =
p.step match {
case s: Step[Nothing, O] =>
s.head match {
case Emit(os) => os.toStream #::: go(s.next.continue)
case _ => sys.error("impossible")
}
case Halt(Error(rsn)) => throw rsn
case Halt(_) => Stream.empty
}
go(self)
}
/** Converts this `Process0` to a `Map`. */
def toMap[K, V](implicit isKV: O <:< (K, V)): Map[K, V] = toVector.toMap(isKV)
/** Converts this `Process0` to a `SortedMap`. */
def toSortedMap[K, V](implicit isKV: O <:< (K, V), ord: Ordering[K]): SortedMap[K, V] =
SortedMap(toVector.asInstanceOf[Seq[(K, V)]]: _*)
def toSource: Process[Task, O] = self
@deprecated("liftIO is deprecated in favor of toSource. It will be removed in a future release.", "0.7")
def liftIO: Process[Task, O] = self
}
/** Syntax for Sink, that is specialized for Task */
implicit class SinkTaskSyntax[I](val self: Sink[Task,I]) extends AnyVal {
/** converts sink to sink that first pipes received `I0` to supplied p1 */
def pipeIn[I0](p1: Process1[I0, I]): Sink[Task, I0] = Process.suspend {
import scalaz.Scalaz._
// Note: Function `f` from sink `self` may be used for more than 1 element emitted by `p1`.
@volatile var cur = p1.step
@volatile var lastF: Option[I => Task[Unit]] = None
self.takeWhile { _ =>
cur match {
case Halt(Cause.End) => false
case Halt(cause) => throw new Cause.Terminated(cause)
case _ => true
}
} map { (f: I => Task[Unit]) =>
lastF = f.some
(i0: I0) => Task.suspend {
cur match {
case Halt(_) => sys.error("Impossible")
case Step(Emit(piped), cont) =>
cur = process1.feed1(i0) { cont.continue }.step
piped.toList.traverse_(f)
case Step(hd, cont) =>
val (piped, tl) = process1.feed1(i0)(hd +: cont).unemit
cur = tl.step
piped.toList.traverse_(f)
}
}
} onHalt {
case Cause.Kill =>
lastF map { f =>
cur match {
case Halt(_) => sys.error("Impossible (2)")
case s@Step(_, _) =>
s.toProcess.disconnect(Cause.Kill).evalMap(f).drain
}
} getOrElse Halt(Cause.Kill)
case Cause.End => halt
case [email protected](_) => halt.causedBy(c)
}
}
}
/**
* This class provides infix syntax specific to `Process1`.
*/
implicit class Process1Syntax[I,O](val self: Process1[I,O]) extends AnyVal {
/** Apply this `Process` to an `Iterable`. */
def apply(input: Iterable[I]): IndexedSeq[O] =
Process(input.toSeq: _*).pipe(self).toIndexedSeq
/**
* Transform `self` to operate on the left hand side of an `\\/`, passing
* through any values it receives on the right. Note that this halts
* whenever `self` halts.
*/
def liftL[I2]: Process1[I \\/ I2, O \\/ I2] =
process1.liftL(self)
/**
* Transform `self` to operate on the right hand side of an `\\/`, passing
* through any values it receives on the left. Note that this halts
* whenever `self` halts.
*/
def liftR[I0]: Process1[I0 \\/ I, I0 \\/ O] =
process1.liftR(self)
/**
* Feed a single input to this `Process1`.
*/
def feed1(i: I): Process1[I,O] =
process1.feed1(i)(self)
/** Transform the input of this `Process1`. */
def contramap[I2](f: I2 => I): Process1[I2,O] =
process1.lift(f).pipe(self)
}
/**
* Syntax for processes that have its effects wrapped in Task
*/
implicit class SourceSyntax[O](val self: Process[Task, O]) extends WyeOps[O] {
/**
* Produce a continuous stream from a discrete stream by using the
* most recent value.
*/
def forwardFill(implicit S: Strategy): Process[Task, O] =
async.toSignal(self).continuous
/**
* Asynchronous execution of this Process. Note that this method is not resource safe unless
* callback is called with _left_ side completed. In that case it is guaranteed that all cleanups
* has been successfully completed.
* User of this method is responsible for any cleanup actions to be performed by running the
* next Process obtained on right side of callback.
*
* This method returns a function, that when applied, causes the running computation to be interrupted.
* That is useful of process contains any asynchronous code, that may be left with incomplete callbacks.
* If the evaluation of the process is interrupted, then the interruption is only active if the callback
* was not completed before, otherwise interruption is no-op.
*
* There is chance, that cleanup code of intermediate `Await` will get called twice on interrupt, but
* always at least once. The second cleanup invocation in that case may run on different thread, asynchronously.
*
*
* @param cb result of the asynchronous evaluation of the process. Note that, the callback is never called
* on the right side, if the sequence is empty.
* @param S Strategy to use when evaluating the process. Note that `Strategy.Sequential` may cause SOE.
* @return Function to interrupt the evaluation
*/
protected[stream] final def runAsync(
cb: Cause \\/ (Seq[O], Cont[Task,O]) => Unit
)(implicit S: Strategy): (EarlyCause) => Unit = {
sealed trait M
case class AwaitDone(res: Throwable \\/ Any, awt: Await[Task, Any, O], cont: Cont[Task,O]) extends M
case class Interrupt(cause: EarlyCause) extends M
//forward referenced actor here
var a: Actor[M] = null
// Set when the executin has been terminated with reason for termination
var completed: Option[Cause] = None
// contains reference that eventually builds
// a cleanup when the last await was interrupted
// this is consulted only, if await was interrupted
// volatile marked because of the first usage outside of actor
@volatile var cleanup: (EarlyCause => Process[Task,O]) = (c:EarlyCause) => Halt(c)
// runs single step of process.
// completes with callback if process is `Emit` or `Halt`.
// or asynchronously executes the Await and send result to actor `a`
// It returns on left side reason with which this process terminated,
// or on right side the cleanup code to be run when interrupted.
@tailrec
def runStep(p: Process[Task, O]): Cause \\/ (EarlyCause => Process[Task,O]) = {
val step = p.step
step match {
case Step(Emit(Seq()), cont) => runStep(cont.continue)
case Step(Emit(h), cont) => S(cb(right((h, cont)))); left(End)
case Step(awt@Await(req, rcv), cont) =>
req.runAsync(r => a ! AwaitDone(r, awt, cont))
right((c:EarlyCause) => rcv(left(c)).run +: cont)
case Halt(cause) => S(cb(left(cause))); left(cause)
}
}
a = new Actor[M]({ m =>
m match {
case AwaitDone(r, awt, cont) if completed.isEmpty =>
val step = Try(awt.rcv(EarlyCause(r)).run) +: cont
runStep(step).fold(
rsn => completed = Some(rsn)
, cln => cleanup = cln
)
// on interrupt we just run any cleanup code we have memo-ed
// from last `Await`
case Interrupt(cause) if completed.isEmpty =>
completed = Some(cause)
Try(cleanup(cause)).run.runAsync(_.fold(
rsn0 => cb(left(Error(rsn0).causedBy(cause)))
, _ => cb(left(cause))
))
// this indicates last await was interrupted.
// In case the request was successful and only then
// we have to get next state of the process and assure
// any cleanup will be run.
// note this won't consult any cleanup contained
// in `next` or `rcv` on left side
// as this was already run on `Interrupt`
case AwaitDone(r, awt, _) =>
Try(awt.rcv(EarlyCause(r)).run)
.kill
.run.runAsync(_ => ())
// Interrupt after we have been completed this is no-op
case Interrupt(_) => ()
}
})(S)
runStep(self).fold(
rsn => (_: Cause) => ()
, cln => {
cleanup = cln
(cause: EarlyCause) => a ! Interrupt(cause)
}
)
}
}
/**
* This class provides infix syntax specific to `Tee`. We put these here
* rather than trying to cram them into `Process` itself using implicit
* equality witnesses. This doesn't work out so well due to variance
* issues.
*/
implicit class TeeSyntax[I,I2,O](val self: Tee[I,I2,O]) extends AnyVal {
/** Transform the left input to a `Tee`. */
def contramapL[I0](f: I0 => I): Tee[I0,I2,O] =
self.contramapL_(f).asInstanceOf[Tee[I0,I2,O]]
/** Transform the right input to a `Tee`. */
def contramapR[I3](f: I3 => I2): Tee[I,I3,O] =
self.contramapR_(f).asInstanceOf[Tee[I,I3,O]]
}
/**
* Infix syntax for working with `Writer[F,W,O]`. We call
* the `W` parameter the 'write' side of the `Writer` and
* `O` the 'output' side. Many method in this class end
* with either `W` or `O`, depending on what side they
* operate on.
*/
implicit class WriterSyntax[F[_],W,O](val self: Writer[F,W,O]) extends AnyVal {
/** Transform the write side of this `Writer`. */
def flatMapW[F2[x]>:F[x],W2,O2>:O](f: W => Writer[F2,W2,O2]): Writer[F2,W2,O2] =
self.flatMap(_.fold(f, emitO))
/** Remove the write side of this `Writer`. */
def stripW: Process[F,O] =
self.flatMap(_.fold(_ => halt, emit))
/** Map over the write side of this `Writer`. */
def mapW[W2](f: W => W2): Writer[F,W2,O] =
self.map(_.leftMap(f))
/** pipe Write side of this `Writer` */
def pipeW[B](f: Process1[W,B]): Writer[F,B,O] =
self.pipe(process1.liftL(f))
/**
* Observe the write side of this `Writer` using the
* given `Sink`, keeping it available for subsequent
* processing. Also see `drainW`.
*/
def observeW(snk: Sink[F,W]): Writer[F,W,O] =
self.zipWith(snk)((a,f) =>
a.fold(
(s: W) => eval_ { f(s) } ++ Process.emitW(s),
(a: O) => Process.emitO(a)
)
).flatMap(identity)
/**
* Observe the write side of this `Writer` using the
* given `Sink`, then discard it. Also see `observeW`.
*/
def drainW(snk: Sink[F,W]): Process[F,O] =
observeW(snk).stripW
/**
* Observe the output side of this `Writer` using the
* given `Sink`, keeping it available for subsequent
* processing. Also see `drainO`.
*/
def observeO(snk: Sink[F,O]): Writer[F,W,O] =
self.map(_.swap).observeW(snk).map(_.swap)
/**
* Observe the output side of this Writer` using the
* given `Sink`, then discard it. Also see `observeW`.
*/
def drainO(snk: Sink[F,O]): Process[F,W] =
observeO(snk).stripO
/** Map over the output side of this `Writer`. */
def mapO[B](f: O => B): Writer[F,W,B] =
self.map(_.map(f))
def flatMapO[F2[x]>:F[x],W2>:W,B](f: O => Writer[F2,W2,B]): Writer[F2,W2,B] =
self.flatMap(_.fold(emitW, f))
def stripO: Process[F,W] =
self.flatMap(_.fold(emit, _ => halt))
def pipeO[B](f: Process1[O,B]): Writer[F,W,B] =
self.pipe(process1.liftR(f))
}
/**
* This class provides infix syntax specific to `Wye`. We put these here
* rather than trying to cram them into `Process` itself using implicit
* equality witnesses. This doesn't work out so well due to variance
* issues.
*/
implicit class WyeSyntax[I,I2,O](val self: Wye[I,I2,O]) extends AnyVal {
/**
* Apply a `Wye` to two `Iterable` inputs.
*/
def apply(input: Iterable[I], input2: Iterable[I2]): IndexedSeq[O] = {
// this is probably rather slow
val src1 = Process.emitAll(input.toSeq).toSource
val src2 = Process.emitAll(input2.toSeq).toSource
src1.wye(src2)(self).runLog.run
}
/**
* Transform the left input of the given `Wye` using a `Process1`.
*/
def attachL[I0](f: Process1[I0,I]): Wye[I0, I2, O] =
scalaz.stream.wye.attachL(f)(self)
/**
* Transform the right input of the given `Wye` using a `Process1`.
*/
def attachR[I1](f: Process1[I1,I2]): Wye[I, I1, O] =
scalaz.stream.wye.attachR(f)(self)
/** Transform the left input to a `Wye`. */
def contramapL[I0](f: I0 => I): Wye[I0, I2, O] =
contramapL_(f)
/** Transform the right input to a `Wye`. */
def contramapR[I3](f: I3 => I2): Wye[I, I3, O] =
contramapR_(f)
private[stream] def contramapL_[I0](f: I0 => I): Wye[I0, I2, O] =
self.attachL(process1.lift(f))
private[stream] def contramapR_[I3](f: I3 => I2): Wye[I, I3, O] =
self.attachR(process1.lift(f))
/**
* Converting requests for the left input into normal termination.
* Note that `Both` requests are rewritten to fetch from the only input.
*/
def detach1L: Wye[I,I2,O] = scalaz.stream.wye.detach1L(self)
/**
* Converting requests for the left input into normal termination.
* Note that `Both` requests are rewritten to fetch from the only input.
*/
def detach1R: Wye[I,I2,O] = scalaz.stream.wye.detach1R(self)
}
//////////////////////////////////////////////////////////////////////////////////////
//
// SYNTAX Functions
//
/////////////////////////////////////////////////////////////////////////////////////
/**
* Evaluate an arbitrary effect in a `Process`. The resulting
* `Process` emits a single value. To evaluate repeatedly, use
* `repeatEval(t)`.
* Do not use `eval.repeat` or `repeat(eval)` as that may cause infinite loop in certain situations.
*/
def eval[F[_], O](f: F[O]): Process[F, O] =
awaitOr(f)(_.asHalt)(emit)
/**
* Evaluate an arbitrary effect once, purely for its effects,
* ignoring its return value. This `Process` emits no values.
*/
def eval_[F[_], O](f: F[O]): Process[F, Nothing] =
eval(f).drain
/** Prefix syntax for `p.repeat`. */
def repeat[F[_], O](p: Process[F, O]): Process[F, O] = p.repeat
/**
* Evaluate an arbitrary effect in a `Process`. The resulting `Process` will emit values
* until evaluation of `f` signals termination with `End` or an error occurs.
*
* Note that if `f` results to failure of type `Terminated` the repeatEval will convert cause
* to respective process cause termination, and will halt with that cause.
*
*/
def repeatEval[F[_], O](f: F[O]): Process[F, O] =
awaitOr(f)(_.asHalt)(o => emit(o) ++ repeatEval(f))
/**
* Produce `p` lazily. Useful if producing the process involves allocation of
* some local mutable resource we want to ensure is freshly allocated
* for each consumer of `p`.
*
* Note that this implementation assures that:
* {{{
* suspend(p).kill === suspend(p.kill)
* suspend(p).kill === p.kill
*
* suspend(p).repeat === suspend(p.repeat)
* suspend(p).repeat === p.repeat
*
* suspend(p).eval === suspend(p.eval)
* suspend(p).eval === p.eval
*
* Halt(cause) ++ suspend(p) === Halt(cause) ++ p
* }}}
*
*/
def suspend[F[_], O](p: => Process[F, O]): Process[F, O] =
Append(halt0,Vector({
case End => Trampoline.done(p)
case early: EarlyCause => Trampoline.done(p.injectCause(early))
}))
}
|
rossabaker/scalaz-stream
|
src/main/scala/scalaz/stream/Process.scala
|
Scala
|
mit
| 54,798 |
/*******************************************************************************
* Copyright (c) 2014 Guillaume DUBUISSON DUPLESSIS <[email protected]>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <[email protected]> - initial API and implementation
******************************************************************************/
package multiwayTree.P72
import multiwayTree.MTree
import org.scalatest.Args
import scala.collection.immutable.List
class sol02 extends P72 {
def postorder[T](t: MTree[T]): List[T] =
t.children.flatMap(postorder(_)) ::: List(t.value)
}
|
GuillaumeDD/scala99problems
|
src/main/scala/multiwayTree/P72/sol02.scala
|
Scala
|
gpl-3.0
| 879 |
package com.olegych.scastie
package client
package components
import japgolly.scalajs.react._, vdom.all._, extra._
import org.scalajs.dom
import org.scalajs.dom.html
import org.scalajs.dom.{window, document}
final case class CopyModal(title: String, subtitle: String, content: String, modalId: String, isClosed: Boolean, close: Reusable[Callback]) {
@inline def render: VdomElement =
new CopyModal.ShareModalComponent().build(this)
}
object CopyModal {
implicit val reusability: Reusability[CopyModal] =
Reusability.derive[CopyModal]
private class ShareModalComponent() {
private val divRef = Ref[html.Div]
private def render(props: CopyModal): VdomElement = {
def copyLink: Callback = divRef.get.map { divRef =>
val range = dom.document.createRange()
val selection = dom.window.getSelection()
range.selectNodeContents(divRef)
selection.addRange(range)
if (!document.execCommand("copy")) {
window.alert("cannot copy link")
}
}
Modal(
title = props.title,
isClosed = props.isClosed,
close = props.close,
modalCss = TagMod(cls := "modal-share"),
modalId = props.modalId,
content = TagMod(
p(cls := "modal-intro")(
props.subtitle
),
div(cls := "snippet-link")(
div.withRef(divRef)(cls := "link-to-copy", onClick --> copyLink)(
props.content
),
div(onClick --> copyLink, title := "Copy to Clipboard", cls := "snippet-clip clipboard-copy")(
i(cls := "fa fa-clipboard")
)
)
)
).render
}
private val component =
ScalaComponent
.builder[CopyModal]("CopyModal")
.render_P(render)
.configure(Reusability.shouldComponentUpdate)
.build
def build(props: CopyModal): VdomElement = component(props)
}
}
|
scalacenter/scastie
|
client/src/main/scala/com.olegych.scastie.client/components/CopyModal.scala
|
Scala
|
apache-2.0
| 1,939 |
import mist.api._
import mist.api.dsl._
import mist.api.encoding.defaults._
import org.apache.spark.SparkContext
object PiExample extends MistFn {
override def handle: Handle = {
val samples = arg[Int]("samples").validated(_ > 0, "Samples should be positive")
withArgs(samples).onSparkContext((n: Int, sc: SparkContext) => {
val count = sc.parallelize(1 to n).filter(_ => {
val x = math.random
val y = math.random
x * x + y * y < 1
}).count()
val pi = (4.0 * count) / n
pi
}).asHandle
}
}
|
Hydrospheredata/mist
|
examples/examples/src/main/scala/PiExample.scala
|
Scala
|
apache-2.0
| 556 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka010
import java.{util => ju}
import scala.collection.JavaConverters._
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, KafkaConsumer}
import org.apache.kafka.common.{KafkaException, TopicPartition}
import org.apache.spark.TaskContext
import org.apache.spark.internal.Logging
import org.apache.spark.kafka010.KafkaConfigUpdater
private[kafka010] sealed trait KafkaDataConsumer[K, V] {
/**
* Get the record for the given offset if available.
*
* @param offset the offset to fetch.
* @param pollTimeoutMs timeout in milliseconds to poll data from Kafka.
*/
def get(offset: Long, pollTimeoutMs: Long): ConsumerRecord[K, V] = {
internalConsumer.get(offset, pollTimeoutMs)
}
/**
* Start a batch on a compacted topic
*
* @param offset the offset to fetch.
* @param pollTimeoutMs timeout in milliseconds to poll data from Kafka.
*/
def compactedStart(offset: Long, pollTimeoutMs: Long): Unit = {
internalConsumer.compactedStart(offset, pollTimeoutMs)
}
/**
* Get the next record in the batch from a compacted topic.
* Assumes compactedStart has been called first, and ignores gaps.
*
* @param pollTimeoutMs timeout in milliseconds to poll data from Kafka.
*/
def compactedNext(pollTimeoutMs: Long): ConsumerRecord[K, V] = {
internalConsumer.compactedNext(pollTimeoutMs)
}
/**
* Rewind to previous record in the batch from a compacted topic.
*
* @throws NoSuchElementException if no previous element
*/
def compactedPrevious(): ConsumerRecord[K, V] = {
internalConsumer.compactedPrevious()
}
/**
* Release this consumer from being further used. Depending on its implementation,
* this consumer will be either finalized, or reset for reuse later.
*/
def release(): Unit
/** Reference to the internal implementation that this wrapper delegates to */
def internalConsumer: InternalKafkaConsumer[K, V]
}
/**
* A wrapper around Kafka's KafkaConsumer.
* This is not for direct use outside this file.
*/
private[kafka010] class InternalKafkaConsumer[K, V](
val topicPartition: TopicPartition,
val kafkaParams: ju.Map[String, Object]) extends Logging {
private[kafka010] val groupId = kafkaParams.get(ConsumerConfig.GROUP_ID_CONFIG)
.asInstanceOf[String]
private val consumer = createConsumer
/** indicates whether this consumer is in use or not */
var inUse = true
/** indicate whether this consumer is going to be stopped in the next release */
var markedForClose = false
// TODO if the buffer was kept around as a random-access structure,
// could possibly optimize re-calculating of an RDD in the same batch
@volatile private var buffer = ju.Collections.emptyListIterator[ConsumerRecord[K, V]]()
@volatile private var nextOffset = InternalKafkaConsumer.UNKNOWN_OFFSET
override def toString: String = {
"InternalKafkaConsumer(" +
s"hash=${Integer.toHexString(hashCode)}, " +
s"groupId=$groupId, " +
s"topicPartition=$topicPartition)"
}
/** Create a KafkaConsumer to fetch records for `topicPartition` */
private def createConsumer: KafkaConsumer[K, V] = {
val updatedKafkaParams = KafkaConfigUpdater("executor", kafkaParams.asScala.toMap)
.setAuthenticationConfigIfNeeded()
.build()
val c = new KafkaConsumer[K, V](updatedKafkaParams)
val topics = ju.Arrays.asList(topicPartition)
c.assign(topics)
c
}
def close(): Unit = consumer.close()
/**
* Get the record for the given offset, waiting up to timeout ms if IO is necessary.
* Sequential forward access will use buffers, but random access will be horribly inefficient.
*/
def get(offset: Long, timeout: Long): ConsumerRecord[K, V] = {
logDebug(s"Get $groupId $topicPartition nextOffset $nextOffset requested $offset")
if (offset != nextOffset) {
logInfo(s"Initial fetch for $groupId $topicPartition $offset")
seek(offset)
poll(timeout)
}
if (!buffer.hasNext()) {
poll(timeout)
}
require(buffer.hasNext(),
s"Failed to get records for $groupId $topicPartition $offset after polling for $timeout")
var record = buffer.next()
if (record.offset != offset) {
logInfo(s"Buffer miss for $groupId $topicPartition $offset")
seek(offset)
poll(timeout)
require(buffer.hasNext(),
s"Failed to get records for $groupId $topicPartition $offset after polling for $timeout")
record = buffer.next()
require(record.offset == offset,
s"Got wrong record for $groupId $topicPartition even after seeking to offset $offset " +
s"got offset ${record.offset} instead. If this is a compacted topic, consider enabling " +
"spark.streaming.kafka.allowNonConsecutiveOffsets"
)
}
nextOffset = offset + 1
record
}
/**
* Start a batch on a compacted topic
*/
def compactedStart(offset: Long, pollTimeoutMs: Long): Unit = {
logDebug(s"compacted start $groupId $topicPartition starting $offset")
// This seek may not be necessary, but it's hard to tell due to gaps in compacted topics
if (offset != nextOffset) {
logInfo(s"Initial fetch for compacted $groupId $topicPartition $offset")
seek(offset)
poll(pollTimeoutMs)
}
}
/**
* Get the next record in the batch from a compacted topic.
* Assumes compactedStart has been called first, and ignores gaps.
*/
def compactedNext(pollTimeoutMs: Long): ConsumerRecord[K, V] = {
if (!buffer.hasNext()) {
poll(pollTimeoutMs)
}
require(buffer.hasNext(),
s"Failed to get records for compacted $groupId $topicPartition " +
s"after polling for $pollTimeoutMs")
val record = buffer.next()
nextOffset = record.offset + 1
record
}
/**
* Rewind to previous record in the batch from a compacted topic.
* @throws NoSuchElementException if no previous element
*/
def compactedPrevious(): ConsumerRecord[K, V] = {
buffer.previous()
}
private def seek(offset: Long): Unit = {
logDebug(s"Seeking to $topicPartition $offset")
consumer.seek(topicPartition, offset)
}
private def poll(timeout: Long): Unit = {
val p = consumer.poll(timeout)
val r = p.records(topicPartition)
logDebug(s"Polled ${p.partitions()} ${r.size}")
buffer = r.listIterator
}
}
private[kafka010] case class CacheKey(groupId: String, topicPartition: TopicPartition)
private[kafka010] object KafkaDataConsumer extends Logging {
private case class CachedKafkaDataConsumer[K, V](internalConsumer: InternalKafkaConsumer[K, V])
extends KafkaDataConsumer[K, V] {
assert(internalConsumer.inUse)
override def release(): Unit = KafkaDataConsumer.release(internalConsumer)
}
private case class NonCachedKafkaDataConsumer[K, V](internalConsumer: InternalKafkaConsumer[K, V])
extends KafkaDataConsumer[K, V] {
override def release(): Unit = internalConsumer.close()
}
// Don't want to depend on guava, don't want a cleanup thread, use a simple LinkedHashMap
private[kafka010] var cache: ju.Map[CacheKey, InternalKafkaConsumer[_, _]] = null
/**
* Must be called before acquire, once per JVM, to configure the cache.
* Further calls are ignored.
*/
def init(
initialCapacity: Int,
maxCapacity: Int,
loadFactor: Float): Unit = synchronized {
if (null == cache) {
logInfo(s"Initializing cache $initialCapacity $maxCapacity $loadFactor")
cache = new ju.LinkedHashMap[CacheKey, InternalKafkaConsumer[_, _]](
initialCapacity, loadFactor, true) {
override def removeEldestEntry(
entry: ju.Map.Entry[CacheKey, InternalKafkaConsumer[_, _]]): Boolean = {
// Try to remove the least-used entry if its currently not in use.
//
// If you cannot remove it, then the cache will keep growing. In the worst case,
// the cache will grow to the max number of concurrent tasks that can run in the executor,
// (that is, number of tasks slots) after which it will never reduce. This is unlikely to
// be a serious problem because an executor with more than 64 (default) tasks slots is
// likely running on a beefy machine that can handle a large number of simultaneously
// active consumers.
if (entry.getValue.inUse == false && this.size > maxCapacity) {
logWarning(
s"KafkaConsumer cache hitting max capacity of $maxCapacity, " +
s"removing consumer for ${entry.getKey}")
try {
entry.getValue.close()
} catch {
case x: KafkaException =>
logError("Error closing oldest Kafka consumer", x)
}
true
} else {
false
}
}
}
}
}
/**
* Get a cached consumer for groupId, assigned to topic and partition.
* If matching consumer doesn't already exist, will be created using kafkaParams.
* The returned consumer must be released explicitly using [[KafkaDataConsumer.release()]].
*
* Note: This method guarantees that the consumer returned is not currently in use by anyone
* else. Within this guarantee, this method will make a best effort attempt to re-use consumers by
* caching them and tracking when they are in use.
*/
def acquire[K, V](
topicPartition: TopicPartition,
kafkaParams: ju.Map[String, Object],
context: TaskContext,
useCache: Boolean): KafkaDataConsumer[K, V] = synchronized {
val groupId = kafkaParams.get(ConsumerConfig.GROUP_ID_CONFIG).asInstanceOf[String]
val key = new CacheKey(groupId, topicPartition)
val existingInternalConsumer = cache.get(key)
lazy val newInternalConsumer = new InternalKafkaConsumer[K, V](topicPartition, kafkaParams)
if (context != null && context.attemptNumber >= 1) {
// If this is reattempt at running the task, then invalidate cached consumers if any and
// start with a new one. If prior attempt failures were cache related then this way old
// problematic consumers can be removed.
logDebug(s"Reattempt detected, invalidating cached consumer $existingInternalConsumer")
if (existingInternalConsumer != null) {
// Consumer exists in cache. If its in use, mark it for closing later, or close it now.
if (existingInternalConsumer.inUse) {
existingInternalConsumer.markedForClose = true
} else {
existingInternalConsumer.close()
// Remove the consumer from cache only if it's closed.
// Marked for close consumers will be removed in release function.
cache.remove(key)
}
}
logDebug("Reattempt detected, new non-cached consumer will be allocated " +
s"$newInternalConsumer")
NonCachedKafkaDataConsumer(newInternalConsumer)
} else if (!useCache) {
// If consumer reuse turned off, then do not use it, return a new consumer
logDebug("Cache usage turned off, new non-cached consumer will be allocated " +
s"$newInternalConsumer")
NonCachedKafkaDataConsumer(newInternalConsumer)
} else if (existingInternalConsumer == null) {
// If consumer is not already cached, then put a new in the cache and return it
logDebug("No cached consumer, new cached consumer will be allocated " +
s"$newInternalConsumer")
cache.put(key, newInternalConsumer)
CachedKafkaDataConsumer(newInternalConsumer)
} else if (existingInternalConsumer.inUse) {
// If consumer is already cached but is currently in use, then return a new consumer
logDebug("Used cached consumer found, new non-cached consumer will be allocated " +
s"$newInternalConsumer")
NonCachedKafkaDataConsumer(newInternalConsumer)
} else {
// If consumer is already cached and is currently not in use, then return that consumer
logDebug(s"Not used cached consumer found, re-using it $existingInternalConsumer")
existingInternalConsumer.inUse = true
// Any given TopicPartition should have a consistent key and value type
CachedKafkaDataConsumer(existingInternalConsumer.asInstanceOf[InternalKafkaConsumer[K, V]])
}
}
private def release(internalConsumer: InternalKafkaConsumer[_, _]): Unit = synchronized {
// Clear the consumer from the cache if this is indeed the consumer present in the cache
val key = new CacheKey(internalConsumer.groupId, internalConsumer.topicPartition)
val cachedInternalConsumer = cache.get(key)
if (internalConsumer.eq(cachedInternalConsumer)) {
// The released consumer is the same object as the cached one.
if (internalConsumer.markedForClose) {
internalConsumer.close()
cache.remove(key)
} else {
internalConsumer.inUse = false
}
} else {
// The released consumer is either not the same one as in the cache, or not in the cache
// at all. This may happen if the cache was invalidate while this consumer was being used.
// Just close this consumer.
internalConsumer.close()
logInfo(s"Released a supposedly cached consumer that was not found in the cache " +
s"$internalConsumer")
}
}
}
private[kafka010] object InternalKafkaConsumer {
private val UNKNOWN_OFFSET = -2L
}
|
aosagie/spark
|
external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/KafkaDataConsumer.scala
|
Scala
|
apache-2.0
| 14,254 |
package org.bitcoins.testkit.util
import org.bitcoins.testkitcore.util.BitcoinSUnitTest
import org.scalatest.Assertion
import org.scalatest.exceptions.TestFailedException
import scala.concurrent.Future
class ScalaTestUtilTest extends BitcoinSUnitTest {
implicit val ec: scala.concurrent.ExecutionContext =
scala.concurrent.ExecutionContext.global
behavior of "ScalaTestUtilTest"
def t = assert(true)
def f = assert(false)
def futureFail =
Future {
//sleep for awhile and then eventually fail
Thread.sleep(1000)
f
}
it must "evaluate a Vector[Future[Assertions]] correctly" in {
val vec1: Vector[Future[Assertion]] =
Vector(Future.successful(t), Future.successful(t))
ScalaTestUtil.toAssertF(vec1)
try {
ScalaTestUtil.toAssertF(Vector(futureFail))
} catch {
case _: TestFailedException => succeed
}
}
}
|
bitcoin-s/bitcoin-s
|
testkit/src/test/scala/org/bitcoins/testkit/util/ScalaTestUtilTest.scala
|
Scala
|
mit
| 896 |
/**
*
* Copyright (c) 2015-2017 Rodney S.K. Lai
* https://github.com/rodney-lai
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
package com.rodneylai.database
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import javax.inject.{Inject,Singleton}
import com.google.inject.AbstractModule
import org.mongodb.scala._
import org.mongodb.scala.model.Filters._
import org.slf4j.{Logger,LoggerFactory}
import com.rodneylai.models.mongodb._
@Singleton
class MongoAccessHelper @Inject() (mongoHelper:MongoHelper,messageLogDao:MessageLogDao,resetPasswordDao:ResetPasswordDao) {
private val m_log:Logger = LoggerFactory.getLogger(this.getClass.getName)
m_log.debug("init")
def insertToMessageLog(emailUuid:java.util.UUID,email:String,emailType:String,toEmailAddress:String,now:java.util.Date):Future[Option[java.util.UUID]] = {
if (mongoHelper.isActive) {
for {
collection <- messageLogDao.collectionFuture
insertResult <- collection.insertOne(messageLogDao.toBson(
MessageLog(
emailUuid,
"email",
emailType,
toEmailAddress,
now
)
)).toFuture()
} yield {
Some(emailUuid)
}
} else {
Future.successful(Some(emailUuid))
}
}
def updateResetPassword(codeUuid:java.util.UUID,emailUuid:java.util.UUID,now:java.util.Date):Future[Option[java.util.UUID]] = {
if (mongoHelper.isActive) {
for {
collection <- resetPasswordDao.collectionFuture
updateMessageUuidResult <- collection.updateOne(
and(
equal("CodeUuid",MongoHelper.toStandardBinaryUUID(codeUuid)),
exists("MessageUuid",false)
),
Document(
"$set" -> Document(
"MessageUuid" -> MongoHelper.toStandardBinaryUUID(emailUuid),
"UpdateDate" -> now
)
)
).toFuture()
updateStatusResult <- collection.updateOne(
and(
equal("CodeUuid",MongoHelper.toStandardBinaryUUID(codeUuid)),
equal("Status","queued")
),
Document(
"$set" -> Document(
"Status" -> "sent",
"UpdateDate" -> now
)
)
).toFuture()
} yield {
Some(emailUuid)
}
} else {
Future.successful(Some(emailUuid))
}
}
}
class MongoAccessHelperModule extends AbstractModule {
override def configure() = {
bind(classOf[MongoAccessHelper]).asEagerSingleton
}
}
|
rodney-lai/test-site
|
emailer/src/main/scala/com/rodneylai/database/MongoAccessHelper.scala
|
Scala
|
isc
| 3,263 |
object funcionalSenSideEffect{
def main(args: Array[String]): Unit = {
senSide(args).foreach(println)
}
//no estilo funcional unha funcion que é Unit ou void
//xeralmente e a que ten side effects que se consideran
//que son dificiles de testear etc
//pro exemplo un side effect é un valor que non podemos substituir pola chamada a funcion que o devolve
//unha saida por pantalla é un side effect
//unha escritura a bbdd ou ficheiro
//porque non podemos substituir o que devolve a funcion pola operacion en si
def senSide(args: Array[String]): String = args.mkString("\n")
}
|
jmlb23/scala
|
libro_odersky/scripts_CH3/funcional/funcionalSenSideEffect.scala
|
Scala
|
gpl-3.0
| 601 |
/**
* Copyright (c) 2015, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.finance.ts
import scala.Double.NaN
import TimeSeries._
import org.scalatest.FunSuite
import org.scalatest.Matchers._
class TimeSeriesSuite extends FunSuite {
test("nearest") {
fillNearest(Array(1.0)) should be (Array(1.0))
fillNearest(Array(1.0, 1.0, 2.0)) should be (Array(1.0, 1.0, 2.0))
fillNearest(Array(1.0, NaN, NaN, 2.0)) should be (Array(1.0, 1.0, 2.0, 2.0))
// round down to previous
fillNearest(Array(1.0, NaN, 2.0)) should be (Array(1.0, 1.0, 2.0))
fillNearest(Array(1.0, NaN, NaN, NaN, 2.0)) should be (Array(1.0, 1.0, 1.0, 2.0, 2.0))
fillNearest(Array(1.0, NaN, 3.0, NaN, 2.0)) should be (Array(1.0, 1.0, 3.0, 3.0, 2.0))
}
test("previous") {
fillPrevious(Array(1.0)) should be (Array(1.0))
fillPrevious(Array(1.0, 1.0, 2.0)) should be (Array(1.0, 1.0, 2.0))
fillPrevious(Array(1.0, NaN, 2.0)) should be (Array(1.0, 1.0, 2.0))
fillPrevious(Array(1.0, NaN, NaN, 2.0)) should be (Array(1.0, 1.0, 1.0, 2.0))
fillPrevious(Array(1.0, NaN, NaN, NaN, 2.0)) should be (Array(1.0, 1.0, 1.0, 1.0, 2.0))
fillPrevious(Array(1.0, NaN, 3.0, NaN, 2.0)) should be (Array(1.0, 1.0, 3.0, 3.0, 2.0))
}
test("next") {
fillNext(Array(1.0)) should be (Array(1.0))
fillNext(Array(1.0, 1.0, 2.0)) should be (Array(1.0, 1.0, 2.0))
fillNext(Array(1.0, NaN, 2.0)) should be (Array(1.0, 2.0, 2.0))
fillNext(Array(1.0, NaN, NaN, 2.0)) should be (Array(1.0, 2.0, 2.0, 2.0))
fillNext(Array(1.0, NaN, NaN, NaN, 2.0)) should be (Array(1.0, 2.0, 2.0, 2.0, 2.0))
fillNext(Array(1.0, NaN, 3.0, NaN, 2.0)) should be (Array(1.0, 3.0, 3.0, 2.0, 2.0))
}
test("linear") {
fillLinear(Array(1.0)) should be (Array(1.0))
fillLinear(Array(1.0, 1.0, 2.0)) should be (Array(1.0, 1.0, 2.0))
fillLinear(Array(1.0, NaN, 2.0)) should be (Array(1.0, 1.5, 2.0))
fillLinear(Array(2.0, NaN, 1.0)) should be (Array(2.0, 1.5, 1.0))
fillLinear(Array(1.0, NaN, NaN, 4.0)) should be (Array(1.0, 2.0, 3.0, 4.0))
fillLinear(Array(1.0, NaN, NaN, NaN, 5.0)) should be (Array(1.0, 2.0, 3.0, 4.0, 5.0))
fillLinear(Array(1.0, NaN, 3.0, NaN, 2.0)) should be (Array(1.0, 2.0, 3.0, 1.5, 2.0))
}
}
|
helio9cn/spark-finance
|
src/test/scala/com/cloudera/finance/ts/TimeSeriesSuite.scala
|
Scala
|
apache-2.0
| 2,758 |
package com.lonelyplanet.scalahealthcheck
trait HealthChecker {
def check: HealthCheckResult
}
|
lonelyplanet/scala-healthcheck
|
src/main/scala/com/lonelyplanet/scalahealthcheck/HealthChecker.scala
|
Scala
|
mit
| 98 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import java.io.File
import java.util
import java.util.OptionalLong
import scala.collection.JavaConverters._
import test.org.apache.spark.sql.connector._
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.catalog.{SupportsRead, Table, TableCapability, TableProvider}
import org.apache.spark.sql.connector.catalog.TableCapability._
import org.apache.spark.sql.connector.read._
import org.apache.spark.sql.connector.read.partitioning.{ClusteredDistribution, Distribution, Partitioning}
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2Relation, DataSourceV2ScanRelation}
import org.apache.spark.sql.execution.exchange.{Exchange, ShuffleExchangeExec}
import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector
import org.apache.spark.sql.functions._
import org.apache.spark.sql.sources.{Filter, GreaterThan}
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.vectorized.ColumnarBatch
class DataSourceV2Suite extends QueryTest with SharedSparkSession {
import testImplicits._
private def getBatch(query: DataFrame): AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[AdvancedBatch]
}.head
}
private def getJavaBatch(query: DataFrame): JavaAdvancedDataSourceV2.AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[JavaAdvancedDataSourceV2.AdvancedBatch]
}.head
}
test("simplest implementation") {
Seq(classOf[SimpleDataSourceV2], classOf[JavaSimpleDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 10).map(i => Row(-i)))
checkAnswer(df.filter('i > 5), (6 until 10).map(i => Row(i, -i)))
}
}
}
test("advanced implementation") {
Seq(classOf[AdvancedDataSourceV2], classOf[JavaAdvancedDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
val q1 = df.select('j)
checkAnswer(q1, (0 until 10).map(i => Row(-i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
val q2 = df.filter('i > 3)
checkAnswer(q2, (4 until 10).map(i => Row(i, -i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
} else {
val batch = getJavaBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
}
val q3 = df.select('i).filter('i > 6)
checkAnswer(q3, (7 until 10).map(i => Row(i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
} else {
val batch = getJavaBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
}
val q4 = df.select('j).filter('j < -10)
checkAnswer(q4, Nil)
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
}
}
}
test("columnar batch scan implementation") {
Seq(classOf[ColumnarDataSourceV2], classOf[JavaColumnarDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 90).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 90).map(i => Row(-i)))
checkAnswer(df.filter('i > 50), (51 until 90).map(i => Row(i, -i)))
}
}
}
test("schema required data source") {
Seq(classOf[SchemaRequiredDataSource], classOf[JavaSchemaRequiredDataSource]).foreach { cls =>
withClue(cls.getName) {
val e = intercept[IllegalArgumentException](spark.read.format(cls.getName).load())
assert(e.getMessage.contains("requires a user-supplied schema"))
val schema = new StructType().add("i", "int").add("s", "string")
val df = spark.read.format(cls.getName).schema(schema).load()
assert(df.schema == schema)
assert(df.collect().isEmpty)
}
}
}
test("partitioning reporting") {
import org.apache.spark.sql.functions.{count, sum}
Seq(classOf[PartitionAwareDataSource], classOf[JavaPartitionAwareDataSource]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, Seq(Row(1, 4), Row(1, 4), Row(3, 6), Row(2, 6), Row(4, 2), Row(4, 2)))
val groupByColA = df.groupBy('i).agg(sum('j))
checkAnswer(groupByColA, Seq(Row(1, 8), Row(2, 6), Row(3, 6), Row(4, 4)))
assert(groupByColA.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColAB = df.groupBy('i, 'j).agg(count("*"))
checkAnswer(groupByColAB, Seq(Row(1, 4, 2), Row(2, 6, 1), Row(3, 6, 1), Row(4, 2, 2)))
assert(groupByColAB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColB = df.groupBy('j).agg(sum('i))
checkAnswer(groupByColB, Seq(Row(2, 8), Row(4, 2), Row(6, 5)))
assert(groupByColB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isDefined)
val groupByAPlusB = df.groupBy('i + 'j).agg(count("*"))
checkAnswer(groupByAPlusB, Seq(Row(5, 2), Row(6, 2), Row(8, 1), Row(9, 1)))
assert(groupByAPlusB.queryExecution.executedPlan.collectFirst {
case e: ShuffleExchangeExec => e
}.isDefined)
}
}
}
test ("statistics report data source") {
Seq(classOf[ReportStatisticsDataSource], classOf[JavaReportStatisticsDataSource]).foreach {
cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
val logical = df.queryExecution.optimizedPlan.collect {
case d: DataSourceV2ScanRelation => d
}.head
val statics = logical.computeStats()
assert(statics.rowCount.isDefined && statics.rowCount.get === 10,
"Row count statics should be reported by data source")
assert(statics.sizeInBytes === 80,
"Size in bytes statics should be reported by data source")
}
}
}
test("SPARK-23574: no shuffle exchange with single partition") {
val df = spark.read.format(classOf[SimpleSinglePartitionSource].getName).load().agg(count("*"))
assert(df.queryExecution.executedPlan.collect { case e: Exchange => e }.isEmpty)
}
test("simple writable data source") {
// TODO: java implementation.
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("append").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
// default save mode is ErrorIfExists
intercept[AnalysisException] {
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).save()
}
spark.range(10).select('id as 'i, -'id as 'j).write.mode("append").format(cls.getName)
.option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).union(spark.range(10)).select('id, -'id))
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("overwrite").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(5).select('id, -'id))
val e = intercept[AnalysisException] {
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("ignore").save()
}
assert(e.message.contains("please use Append or Overwrite modes instead"))
val e2 = intercept[AnalysisException] {
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("error").save()
}
assert(e2.getMessage.contains("please use Append or Overwrite modes instead"))
// test transaction
val failingUdf = org.apache.spark.sql.functions.udf {
var count = 0
(id: Long) => {
if (count > 5) {
throw new RuntimeException("testing error")
}
count += 1
id
}
}
// this input data will fail to read middle way.
val input = spark.range(10).select(failingUdf('id).as('i)).select('i, -'i as 'j)
val e3 = intercept[SparkException] {
input.write.format(cls.getName).option("path", path).mode("overwrite").save()
}
assert(e3.getMessage.contains("Writing job aborted"))
// make sure we don't have partial data.
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
}
}
}
test("simple counter in writer with onDataWriterCommit") {
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
val numPartition = 6
spark.range(0, 10, 1, numPartition).select('id as 'i, -'id as 'j).write.format(cls.getName)
.mode("append").option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
assert(SimpleCounter.getCounter == numPartition,
"method onDataWriterCommit should be called as many as the number of partitions")
}
}
}
test("SPARK-23293: data source v2 self join") {
val df = spark.read.format(classOf[SimpleDataSourceV2].getName).load()
val df2 = df.select(($"i" + 1).as("k"), $"j")
checkAnswer(df.join(df2, "j"), (0 until 10).map(i => Row(-i, i, i + 1)))
}
test("SPARK-23301: column pruning with arbitrary expressions") {
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
val q1 = df.select('i + 1)
checkAnswer(q1, (1 until 11).map(i => Row(i)))
val batch1 = getBatch(q1)
assert(batch1.requiredSchema.fieldNames === Seq("i"))
val q2 = df.select(lit(1))
checkAnswer(q2, (0 until 10).map(i => Row(1)))
val batch2 = getBatch(q2)
assert(batch2.requiredSchema.isEmpty)
// 'j === 1 can't be pushed down, but we should still be able do column pruning
val q3 = df.filter('j === -1).select('j * 2)
checkAnswer(q3, Row(-2))
val batch3 = getBatch(q3)
assert(batch3.filters.isEmpty)
assert(batch3.requiredSchema.fieldNames === Seq("j"))
// column pruning should work with other operators.
val q4 = df.sort('i).limit(1).select('i + 1)
checkAnswer(q4, Row(1))
val batch4 = getBatch(q4)
assert(batch4.requiredSchema.fieldNames === Seq("i"))
}
test("SPARK-23315: get output from canonicalized data source v2 related plans") {
def checkCanonicalizedOutput(
df: DataFrame, logicalNumOutput: Int, physicalNumOutput: Int): Unit = {
val logical = df.queryExecution.logical.collect {
case d: DataSourceV2Relation => d
}.head
assert(logical.canonicalized.output.length == logicalNumOutput)
val physical = df.queryExecution.executedPlan.collect {
case d: BatchScanExec => d
}.head
assert(physical.canonicalized.output.length == physicalNumOutput)
}
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
checkCanonicalizedOutput(df, 2, 2)
checkCanonicalizedOutput(df.select('i), 2, 1)
}
test("SPARK-25425: extra options should override sessions options during reading") {
val prefix = "spark.datasource.userDefinedDataSource."
val optionName = "optionA"
withSQLConf(prefix + optionName -> "true") {
val df = spark
.read
.option(optionName, false)
.format(classOf[DataSourceV2WithSessionConfig].getName).load()
val options = df.queryExecution.logical.collectFirst {
case d: DataSourceV2Relation => d.options
}.get
assert(options.get(optionName) === "false")
}
}
test("SPARK-25425: extra options should override sessions options during writing") {
withTempPath { path =>
val sessionPath = path.getCanonicalPath
withSQLConf("spark.datasource.simpleWritableDataSource.path" -> sessionPath) {
withTempPath { file =>
val optionPath = file.getCanonicalPath
val format = classOf[SimpleWritableDataSource].getName
val df = Seq((1L, 2L)).toDF("i", "j")
df.write.format(format).mode("append").option("path", optionPath).save()
assert(!new File(sessionPath).exists)
checkAnswer(spark.read.format(format).option("path", optionPath).load(), df)
}
}
}
}
test("SPARK-27411: DataSourceV2Strategy should not eliminate subquery") {
withTempView("t1") {
val t2 = spark.read.format(classOf[SimpleDataSourceV2].getName).load()
Seq(2, 3).toDF("a").createTempView("t1")
val df = t2.where("i < (select max(a) from t1)").select('i)
val subqueries = df.queryExecution.executedPlan.collect {
case p => p.subqueries
}.flatten
assert(subqueries.length == 1)
checkAnswer(df, (0 until 3).map(i => Row(i)))
}
}
}
case class RangeInputPartition(start: Int, end: Int) extends InputPartition
object SimpleReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = InternalRow(current, -current)
override def close(): Unit = {}
}
}
}
abstract class SimpleBatchTable extends Table with SupportsRead {
override def schema(): StructType = new StructType().add("i", "int").add("j", "int")
override def name(): String = this.getClass.toString
override def capabilities(): util.Set[TableCapability] = Set(BATCH_READ).asJava
}
abstract class SimpleScanBuilder extends ScanBuilder
with Batch with Scan {
override def build(): Scan = this
override def toBatch: Batch = this
override def readSchema(): StructType = new StructType().add("i", "int").add("j", "int")
override def createReaderFactory(): PartitionReaderFactory = SimpleReaderFactory
}
class SimpleSinglePartitionSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
// This class is used by pyspark tests. If this class is modified/moved, make sure pyspark
// tests still pass.
class SimpleDataSourceV2 extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
class AdvancedDataSourceV2 extends TableProvider {
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new AdvancedScanBuilder()
}
}
}
class AdvancedScanBuilder extends ScanBuilder
with Scan with SupportsPushDownFilters with SupportsPushDownRequiredColumns {
var requiredSchema = new StructType().add("i", "int").add("j", "int")
var filters = Array.empty[Filter]
override def pruneColumns(requiredSchema: StructType): Unit = {
this.requiredSchema = requiredSchema
}
override def readSchema(): StructType = requiredSchema
override def pushFilters(filters: Array[Filter]): Array[Filter] = {
val (supported, unsupported) = filters.partition {
case GreaterThan("i", _: Int) => true
case _ => false
}
this.filters = supported
unsupported
}
override def pushedFilters(): Array[Filter] = filters
override def build(): Scan = this
override def toBatch: Batch = new AdvancedBatch(filters, requiredSchema)
}
class AdvancedBatch(val filters: Array[Filter], val requiredSchema: StructType) extends Batch {
override def planInputPartitions(): Array[InputPartition] = {
val lowerBound = filters.collectFirst {
case GreaterThan("i", v: Int) => v
}
val res = scala.collection.mutable.ArrayBuffer.empty[InputPartition]
if (lowerBound.isEmpty) {
res.append(RangeInputPartition(0, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 4) {
res.append(RangeInputPartition(lowerBound.get + 1, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 9) {
res.append(RangeInputPartition(lowerBound.get + 1, 10))
}
res.toArray
}
override def createReaderFactory(): PartitionReaderFactory = {
new AdvancedReaderFactory(requiredSchema)
}
}
class AdvancedReaderFactory(requiredSchema: StructType) extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = {
val values = requiredSchema.map(_.name).map {
case "i" => current
case "j" => -current
}
InternalRow.fromSeq(values)
}
override def close(): Unit = {}
}
}
}
class SchemaRequiredDataSource extends TableProvider {
class MyScanBuilder(schema: StructType) extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = Array.empty
override def readSchema(): StructType = schema
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
throw new IllegalArgumentException("requires a user-supplied schema")
}
override def getTable(options: CaseInsensitiveStringMap, schema: StructType): Table = {
val userGivenSchema = schema
new SimpleBatchTable {
override def schema(): StructType = userGivenSchema
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder(userGivenSchema)
}
}
}
}
class ColumnarDataSourceV2 extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 50), RangeInputPartition(50, 90))
}
override def createReaderFactory(): PartitionReaderFactory = {
ColumnarReaderFactory
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
object ColumnarReaderFactory extends PartitionReaderFactory {
private final val BATCH_SIZE = 20
override def supportColumnarReads(partition: InputPartition): Boolean = true
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
throw new UnsupportedOperationException
}
override def createColumnarReader(partition: InputPartition): PartitionReader[ColumnarBatch] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[ColumnarBatch] {
private lazy val i = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val j = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val batch = new ColumnarBatch(Array(i, j))
private var current = start
override def next(): Boolean = {
i.reset()
j.reset()
var count = 0
while (current < end && count < BATCH_SIZE) {
i.putInt(count, current)
j.putInt(count, -current)
current += 1
count += 1
}
if (count == 0) {
false
} else {
batch.setNumRows(count)
true
}
}
override def get(): ColumnarBatch = batch
override def close(): Unit = batch.close()
}
}
}
class PartitionAwareDataSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder
with SupportsReportPartitioning{
override def planInputPartitions(): Array[InputPartition] = {
// Note that we don't have same value of column `a` across partitions.
Array(
SpecificInputPartition(Array(1, 1, 3), Array(4, 4, 6)),
SpecificInputPartition(Array(2, 4, 4), Array(6, 2, 2)))
}
override def createReaderFactory(): PartitionReaderFactory = {
SpecificReaderFactory
}
override def outputPartitioning(): Partitioning = new MyPartitioning
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
class MyPartitioning extends Partitioning {
override def numPartitions(): Int = 2
override def satisfy(distribution: Distribution): Boolean = distribution match {
case c: ClusteredDistribution => c.clusteredColumns.contains("i")
case _ => false
}
}
}
case class SpecificInputPartition(i: Array[Int], j: Array[Int]) extends InputPartition
object SpecificReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val p = partition.asInstanceOf[SpecificInputPartition]
new PartitionReader[InternalRow] {
private var current = -1
override def next(): Boolean = {
current += 1
current < p.i.length
}
override def get(): InternalRow = InternalRow(p.i(current), p.j(current))
override def close(): Unit = {}
}
}
}
class SchemaReadAttemptException(m: String) extends RuntimeException(m)
class SimpleWriteOnlyDataSource extends SimpleWritableDataSource {
override def getTable(options: CaseInsensitiveStringMap): Table = {
new MyTable(options) {
override def schema(): StructType = {
throw new SchemaReadAttemptException("schema should not be read.")
}
}
}
}
class ReportStatisticsDataSource extends TableProvider {
class MyScanBuilder extends SimpleScanBuilder
with SupportsReportStatistics {
override def estimateStatistics(): Statistics = {
new Statistics {
override def sizeInBytes(): OptionalLong = OptionalLong.of(80)
override def numRows(): OptionalLong = OptionalLong.of(10)
}
}
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder
}
}
}
}
|
jkbradley/spark
|
sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2Suite.scala
|
Scala
|
apache-2.0
| 26,320 |
package com.arcusys.valamis.certificate.service.export
import java.io.FileInputStream
import com.arcusys.valamis.certificate.model.{Certificate, CertificateFilter}
import com.arcusys.valamis.certificate.model.goal._
import com.arcusys.valamis.certificate.storage._
import com.arcusys.valamis.course.CourseService
import com.arcusys.valamis.util.export.ExportProcessor
import com.arcusys.valamis.file.service.FileService
import com.arcusys.valamis.util.ZipBuilder
import com.escalatesoft.subcut.inject.{BindingModule, Injectable}
class CertificateExportProcessor(
implicit val bindingModule: BindingModule)
extends ExportProcessor[Certificate, CertificateExportModel]
with Injectable {
private lazy val fileFacade = inject[FileService]
private lazy val courseService = inject[CourseService]
private lazy val courseGoalStorage = inject[CourseGoalStorage]
private lazy val activityGoalStorage = inject[ActivityGoalStorage]
private lazy val statementGoalStorage = inject[StatementGoalStorage]
private lazy val packageGoalStorage = inject[PackageGoalStorage]
private lazy val goalRepository = inject[CertificateGoalRepository]
private lazy val certificateRepository = inject[CertificateRepository]
def export(companyId: Long, certificateId: Long): FileInputStream = {
exportItems(Seq(certificateRepository.getById(certificateId)))
}
def export(companyId: Long): FileInputStream = {
exportItems(certificateRepository.getBy(CertificateFilter(companyId)))
}
override protected def exportItemsImpl(zip: ZipBuilder, items: Seq[Certificate]): Seq[CertificateExportModel] = {
items.map(c => {
val logo = exportLogo(zip, c)
toExportModel(c, logo)
})
}
private def exportLogo(zip: ZipBuilder, c: Certificate): String = {
if (c.logo == null && c.logo.isEmpty) {
c.logo
} else {
val logo = c.id.toString + "_" + c.logo
try {
zip.addFile(logo, fileFacade.getFileContent(c.id.toString, c.logo))
logo
} catch {
case _: Throwable => null
}
}
}
private def toExportModel(certificate: Certificate, newLogo: String): CertificateExportModel = {
val courseGoals = courseGoalStorage.getByCertificateId(certificate.id).map(toExportModel)
val statementGoals = statementGoalStorage.getByCertificateId(certificate.id).map(toExportModel)
val packageGoals = packageGoalStorage.getByCertificateId(certificate.id).map(toExportModel)
val activityGoals = activityGoalStorage.getByCertificateId(certificate.id).map(toExportModel)
CertificateExportModel(
certificate.title,
certificate.shortDescription,
certificate.description,
newLogo, //certificate.logo,
certificate.isPermanent,
certificate.isPublishBadge,
certificate.validPeriodType.toString,
certificate.validPeriod,
courseGoals,
statementGoals,
packageGoals,
activityGoals
)
}
private def toExportModel(goal: CourseGoal): CourseGoalExport = {
val course = courseService.getById(goal.courseId)
val goalData = goalRepository.getById(goal.goalId)
CourseGoalExport(
course.map(_.getDescriptiveName).getOrElse(""),
course.map(_.getFriendlyURL).getOrElse(""),
goalData.periodValue,
goalData.periodType.toString,
goalData.arrangementIndex)
}
private def toExportModel(goal: StatementGoal): StatementGoalExport = {
val goalData = goalRepository.getById(goal.goalId)
StatementGoalExport(goal.obj, goal.verb, goalData.periodValue, goalData.periodType.toString, goalData.arrangementIndex)
}
private def toExportModel(goal: PackageGoal): PackageGoalExport = {
val goalData = goalRepository.getById(goal.goalId)
PackageGoalExport(goal.packageId, goalData.periodValue, goalData.periodType.toString, goalData.arrangementIndex)
}
private def toExportModel(goal: ActivityGoal): ActivityGoalExport = {
val goalData = goalRepository.getById(goal.goalId)
ActivityGoalExport(goal.count, goal.activityName, goalData.periodValue, goalData.periodType.toString, goalData.arrangementIndex)
}
}
|
igor-borisov/valamis
|
valamis-certificate/src/main/scala/com/arcusys/valamis/certificate/service/export/CertificateExportProcessor.scala
|
Scala
|
gpl-3.0
| 4,118 |
package com.twitter.finagle.zipkin.core
/**
* The `Span` is the core datastructure in RPC tracing. It denotes the
* issuance and handling of a single RPC request.
*/
import com.twitter.finagle.thrift.thrift
import com.twitter.finagle.tracing.TraceId
import com.twitter.util.Time
/**
* The span itself is an immutable datastructure. Mutations are done
* through copying & updating span references elsewhere.
*
* @param traceId Contains the Trace id (same for whole trace), Span id (same for just one
* rpc call) and parent id (links to the parent span in this trace, if None this is the root span)
* @param _serviceName The name of the service handling the RPC
* @param _name The name of the RPC method
* @param annotations A sequence of annotations made in this span
* @param bAnnotations Key-Value annotations, used to attach non timestamped data
* @param endpoint This is the local endpoint the span was created on.
* @param created Optional span creation time.
*/
case class Span(
traceId: TraceId,
_serviceName: Option[String],
_name: Option[String],
annotations: Seq[ZipkinAnnotation],
bAnnotations: Seq[BinaryAnnotation],
endpoint: Endpoint,
created: Time) {
def this(
traceId: TraceId,
_serviceName: Option[String],
_name: Option[String],
annotations: Seq[ZipkinAnnotation],
bAnnotations: Seq[BinaryAnnotation],
endpoint: Endpoint
) = this(traceId, _serviceName, _name, annotations, bAnnotations, endpoint, created = Time.now)
// If necessary, we compute the timestamp of when the span was created
// which we serialize and send to the collector.
private[this] lazy val timestamp: Time = {
// If we have annotations which were created before
// the span, we synthesize the span creation time
// to match since it's illogical for the span to be
// created before annotations.
(created +: annotations.map(_.timestamp)).min
}
val serviceName = _serviceName getOrElse "Unknown"
val name = _name getOrElse "Unknown"
/**
* @return a pretty string for this span ID.
*/
def idString: String = {
val spanString = traceId.spanId.toString
val parentSpanString = traceId._parentId map (_.toString)
parentSpanString match {
case Some(parentSpanString) => "%s<:%s".format(spanString, parentSpanString)
case None => spanString
}
}
def toThrift: thrift.Span = {
val span = new thrift.Span
span.setTimestamp(timestamp.inMicroseconds)
span.setId(traceId.spanId.toLong)
traceId._parentId match {
case Some(id) => span.setParent_id(id.toLong)
case None => ()
}
span.setTrace_id(traceId.traceId.toLong)
span.setName(name)
span.setDebug(traceId.flags.isDebug)
// fill in the host/service data for all the annotations
annotations foreach { ann =>
val a = ann.toThrift
val ep = if (a.isSetHost) a.getHost() else endpoint.boundEndpoint.toThrift
ep.setService_name(serviceName)
a.setHost(ep)
span.addToAnnotations(a)
}
bAnnotations foreach { ann =>
val a = ann.toThrift
val ep = if (a.isSetHost) a.getHost() else endpoint.boundEndpoint.toThrift
ep.setService_name(serviceName)
a.setHost(ep)
span.addToBinary_annotations(a)
}
span
}
}
object Span {
def apply(traceId: TraceId): Span =
Span(traceId, None, None, Nil, Nil, Endpoint.Unknown, Time.now)
def apply(
traceId: TraceId,
_serviceName: Option[String],
_name: Option[String],
annotations: Seq[ZipkinAnnotation],
bAnnotations: Seq[BinaryAnnotation],
endpoint: Endpoint
): Span =
Span(traceId, _serviceName, _name, annotations, bAnnotations, endpoint, Time.now)
}
|
luciferous/finagle
|
finagle-zipkin-core/src/main/scala/com/twitter/finagle/zipkin/core/Span.scala
|
Scala
|
apache-2.0
| 3,735 |
package reactivemongo.core.actors
import scala.util.control.NonFatal
import scala.collection.immutable.ListSet
import reactivemongo.api.{
AuthenticationMode,
ReadPreference,
ScramSha1Authentication,
ScramSha256Authentication
}
import reactivemongo.core.errors.CommandException
import reactivemongo.api.commands.{
Command,
CommandKind,
FailedAuthentication,
SuccessfulAuthentication,
ScramFinalNegociation,
ScramInitiate,
ScramNegociation,
ScramStartNegociation
}
import reactivemongo.core.protocol.Response
import reactivemongo.core.nodeset.{
Authenticate,
Connection,
ScramAuthenticating
}
private[reactivemongo] trait MongoScramSha1Authentication
extends MongoScramAuthentication[ScramSha1Authentication.type] {
system: MongoDBSystem =>
val mechanism = ScramSha1Authentication
import reactivemongo.api.commands.{
ScramSha1Initiate,
ScramSha1StartNegociation
}
protected def initiate(user: String) = ScramSha1Initiate(user)
protected lazy val challengeReader = ScramSha1Initiate.reader[pack.type](pack)
protected def startNegociation(
user: String,
password: String,
conversationId: Int,
payload: Array[Byte],
randomPrefix: String,
message: String) = ScramSha1StartNegociation(user, password,
conversationId, payload, randomPrefix, message)
}
private[reactivemongo] trait MongoScramSha256Authentication
extends MongoScramAuthentication[ScramSha256Authentication.type] {
system: MongoDBSystem =>
val mechanism = ScramSha256Authentication
import reactivemongo.api.commands.{
ScramSha256Initiate,
ScramSha256StartNegociation
}
protected def initiate(user: String) = ScramSha256Initiate(user)
protected lazy val challengeReader =
ScramSha256Initiate.reader[pack.type](pack)
protected def startNegociation(
user: String,
password: String,
conversationId: Int,
payload: Array[Byte],
randomPrefix: String,
message: String) = ScramSha256StartNegociation(user, password,
conversationId, payload, randomPrefix, message)
}
// ---
private[reactivemongo] sealed trait MongoScramAuthentication[M <: AuthenticationMode.Scram] { system: MongoDBSystem =>
import org.apache.commons.codec.binary.Base64
protected val mechanism: M
/** @param user The user name */
protected def initiate(user: String): ScramInitiate[M]
protected def challengeReader: pack.Reader[ScramInitiate.Result[M]]
protected def startNegociation(
user: String,
password: String,
conversationId: Int,
payload: Array[Byte],
randomPrefix: String,
message: String): ScramStartNegociation[M]
// ---
private lazy val initiateWriter =
ScramInitiate.writer[pack.type, M](pack, mechanism)
protected final def sendAuthenticate(connection: Connection, nextAuth: Authenticate): Connection = {
val start = initiate(nextAuth.user)
val maker = Command.buildRequestMaker(pack)(
CommandKind.Authenticate,
start, initiateWriter, ReadPreference.primary, nextAuth.db)
connection.send(
maker(RequestIdGenerator.getNonce.next), compression = ListSet.empty)
nextAuth.password match {
case Some(password) => connection.copy(authenticating = Some(
ScramAuthenticating(nextAuth.db, nextAuth.user, password,
start.randomPrefix, start.message)))
case _ => {
warn(s"Unexpected missing password: ${nextAuth.user}@${nextAuth.db}")
connection
}
}
}
private lazy val negociationWriter =
ScramStartNegociation.writer[pack.type, M](pack)
private lazy val negociationReader =
ScramStartNegociation.reader(pack, mechanism)
private lazy val finalWriter = ScramFinalNegociation.writer(pack)
protected val authReceive: Receive = {
case resp: Response if RequestIdGenerator.getNonce accepts resp => {
val chaRes: ScramInitiate.Result[M] = try {
pack.readAndDeserialize(resp, challengeReader)
} catch {
case NonFatal(error) =>
Left(FailedAuthentication(pack)(error.getMessage, None, None))
}
chaRes.fold(
{ err =>
val respTo = resp.header.responseTo
val msg = s"Fails to process ${mechanism} nonce for $respTo"
warn(msg, err)
updateNodeSet(s"ScramNonceFailure($mechanism, $respTo)") { ns =>
handleAuthResponse(ns, resp)(Left(
FailedAuthentication(pack)(msg, None, None)))
}
()
}, { challenge =>
val chanId = resp.info.channelId
debug(s"Got $mechanism nonce on channel #${chanId}: $challenge")
updateNodeSet(s"ScramNonce($mechanism, $chanId)") { ns =>
ns.pickByChannelId(chanId).fold(ns) { byChan =>
val con = byChan._2
con.authenticating match {
case Some(a @ ScramAuthenticating(
db, user, pwd, rand, msg, _, _, step)) => {
val negociation = startNegociation(user, pwd,
challenge.conversationId, challenge.payload, rand, msg)
negociation.serverSignature.fold(
{ err => handleAuthResponse(ns, resp)(Left(err)) },
{ sig =>
ns.updateConnectionByChannelId(chanId) { con =>
val maker = Command.buildRequestMaker(pack)(
CommandKind.GetNonce, negociation, negociationWriter,
ReadPreference.primary, db)
con.send(
maker(RequestIdGenerator.authenticate.next),
compression = ListSet.empty).
addListener(new OperationHandler(
{ cause =>
error(s"Fails to send request after ${mechanism} nonce #${chanId}", cause)
},
{ _ => () }))
con.copy(authenticating = Some(a.copy(
conversationId = Some(challenge.conversationId),
serverSignature = Some(sig),
step = step + 1)))
}
})
}
case authing => {
val msg = s"Unexpected authentication: $authing"
warn(msg)
handleAuthResponse(ns, resp)(
Left(FailedAuthentication(pack)(msg, None, None)))
}
}
}
}
()
})
}
case response: Response if RequestIdGenerator.authenticate accepts response => {
val chanId = response.info.channelId
debug(s"Got authenticated response #${chanId}!")
@inline def resp: Either[Either[CommandException, SuccessfulAuthentication], Array[Byte]] = try {
pack.readAndDeserialize(response, negociationReader) match {
case Left(err) => Left(Left(err))
case Right(Left(authed)) => Left(Right(authed))
case Right(Right(payload)) => Right(payload)
}
} catch {
case NonFatal(error) =>
Left(Left(FailedAuthentication(pack)(error.getMessage, None, None)))
}
updateNodeSet(s"ScramNegociation($mechanism, $chanId)") { ns =>
resp.fold(
{ r => handleAuthResponse(ns, response)(r) },
{ (payload: Array[Byte]) =>
debug(s"2-phase $mechanism negotiation")
ns.pickByChannelId(chanId).fold(ns) { byChan =>
val con = byChan._2
con.authenticating match {
case Some(a @ ScramAuthenticating(
db, _, _, _, _, Some(cid), Some(sig), 1 /* step */ )) => {
val serverSig: Option[String] =
ScramNegociation.parsePayload(payload).get("v")
if (!serverSig.contains(Base64.encodeBase64String(sig))) {
val msg = s"${mechanism} server signature is invalid"
warn(msg)
handleAuthResponse(ns, response)(
Left(FailedAuthentication(pack)(msg, None, None)))
} else {
val maker = Command.buildRequestMaker(pack)(
CommandKind.Authenticate,
ScramFinalNegociation(cid, payload), finalWriter,
ReadPreference.primary, db)
con.send(
maker(RequestIdGenerator.authenticate.next),
compression = ListSet.empty).
addListener(new OperationHandler(
{ e =>
error(s"Fails to negociate $mechanism #${chanId}", e)
},
{ _ => () }))
ns.updateConnectionByChannelId(chanId) { _ =>
con.copy(authenticating = Some(a.copy(step = 2)))
}
}
}
case authing => {
val msg = s"Unexpected authentication: $authing"
warn(msg)
handleAuthResponse(ns, response)(
Left(FailedAuthentication(pack)(msg, None, None)))
}
}
}
})
}
()
}
}
}
|
ReactiveMongo/ReactiveMongo
|
driver/src/main/scala/core/actors/MongoScramAuthentication.scala
|
Scala
|
apache-2.0
| 9,381 |
package io.buoyant.linkerd
import com.fasterxml.jackson.annotation.{JsonIgnore, JsonProperty, JsonSubTypes}
import com.twitter.conversions.time._
import com.twitter.finagle.{param, Stack}
import com.twitter.finagle.buoyant.TotalTimeout
import com.twitter.finagle.service._
import com.twitter.util.Duration
import io.buoyant.config.PolymorphicConfig
import io.buoyant.router.{ClassifiedRetries, RetryBudgetConfig}
import io.buoyant.router.RetryBudgetModule.{param => ev}
/**
* SvcConfig is a trait containing protocol agnostic configuration options
* that apply at the level of the logical name (i.e. the path stack). This
* trait can be mixed into a class to allow these options to be set on that
* class as part of config deserialization.
*/
trait SvcConfig {
var totalTimeoutMs: Option[Int] = None
var retries: Option[RetriesConfig] = None
@JsonIgnore
def params(vars: Map[String, String]): Stack.Params = Stack.Params.empty
.maybeWith(totalTimeoutMs.map(timeout => TotalTimeout.Param(timeout.millis)))
.maybeWith(retries.flatMap(_.mkBackoff))
.maybeWith(retries.flatMap(_.budget))
.maybeWith(responseClassifier.map(param.ResponseClassifier(_)))
/*
* responseClassifier categorizes responses to determine whether
* they are failures and if they are retryable.
*/
@JsonProperty("responseClassifier")
var _responseClassifier: Option[ResponseClassifierConfig] = None
@JsonIgnore
def baseResponseClassifier: ResponseClassifier =
ClassifiedRetries.Default
@JsonIgnore
def responseClassifier: Option[ResponseClassifier] =
_responseClassifier.map { classifier =>
ClassifiedRetries.orElse(classifier.mk, baseResponseClassifier)
}
}
case class RetriesConfig(
backoff: Option[BackoffConfig] = None,
budget: Option[RetryBudgetConfig] = None
) {
@JsonIgnore
def mkBackoff: Option[ClassifiedRetries.Backoffs] =
backoff.map(_.mk).map(ClassifiedRetries.Backoffs(_))
}
@JsonSubTypes(Array(
new JsonSubTypes.Type(value = classOf[ConstantBackoffConfig], name = "constant"),
new JsonSubTypes.Type(value = classOf[JitteredBackoffConfig], name = "jittered")
))
abstract class BackoffConfig extends PolymorphicConfig {
@JsonIgnore
def mk: Stream[Duration]
}
case class ConstantBackoffConfig(ms: Int) extends BackoffConfig {
// ms defaults to 0 when not specified
def mk = Backoff.constant(ms.millis)
}
/** See http://www.awsarchitectureblog.com/2015/03/backoff.html */
case class JitteredBackoffConfig(minMs: Option[Int], maxMs: Option[Int]) extends BackoffConfig {
def mk = {
val min = minMs match {
case Some(ms) => ms.millis
case None => throw new IllegalArgumentException("'minMs' must be specified")
}
val max = maxMs match {
case Some(ms) => ms.millis
case None => throw new IllegalArgumentException("'maxMs' must be specified")
}
Backoff.decorrelatedJittered(min, max)
}
}
|
pawelprazak/linkerd
|
linkerd/core/src/main/scala/io/buoyant/linkerd/SvcConfig.scala
|
Scala
|
apache-2.0
| 2,916 |
package com.twitter.algebird
import java.nio._
/**
* MinHasher as a Monoid operates on this class to avoid the too generic Array[Byte]. The bytes are assumed to
* be never modified. The only reason we did not use IndexedSeq[Byte] instead of Array[Byte] is because a
* ByteBuffer is used internally in MinHasher and it can wrap Array[Byte].
*/
case class MinHashSignature(bytes: Array[Byte]) extends AnyVal
object MinHasher {
/** Numerically solve the inverse of estimatedThreshold, given numBands*numRows */
def pickBands(threshold: Double, hashes: Int): Int = {
val target = hashes * -1 * math.log(threshold)
var bands = 1
while (bands * math.log(bands) < target) bands += 1
bands
}
def pickHashesAndBands(threshold: Double, maxHashes: Int): (Int, Int) = {
val bands = pickBands(threshold, maxHashes)
val hashes = (maxHashes / bands) * bands
(hashes, bands)
}
}
/**
* Instances of MinHasher can create, combine, and compare fixed-sized signatures of arbitrarily sized sets.
*
* A signature is represented by a byte array of approx maxBytes size. You can initialize a signature with a
* single element, usually a Long or String. You can combine any two set's signatures to produce the signature
* of their union. You can compare any two set's signatures to estimate their Jaccard similarity. You can use
* a set's signature to estimate the number of distinct values in the set. You can also use a combination of
* the above to estimate the size of the intersection of two sets from their signatures. The more bytes in the
* signature, the more accurate all of the above will be.
*
* You can also use these signatures to quickly find similar sets without doing n^2 comparisons. Each
* signature is assigned to several buckets; sets whose signatures end up in the same bucket are likely to be
* similar. The targetThreshold controls the desired level of similarity - the higher the threshold, the more
* efficiently you can find all the similar sets.
*
* This abstract superclass is generic with regards to the size of the hash used. Depending on the number of
* unique values in the domain of the sets, you may want a MinHasher16, a MinHasher32, or a new custom
* subclass.
*
* This implementation is modeled after Chapter 3 of Ullman and Rajaraman's Mining of Massive Datasets:
* http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf
*/
abstract class MinHasher[H](val numHashes: Int, val numBands: Int)(implicit n: Numeric[H])
extends Monoid[MinHashSignature] {
/** The number of bytes used for each hash in the signature */
def hashSize: Int
/** For explanation of the "bands" and "rows" see Ullman and Rajaraman */
val numBytes: Int = numHashes * hashSize
val numRows: Int = numHashes / numBands
/** This seed could be anything */
private val seed = 123456789
/**
* We always use a 128 bit hash function, so the number of hash functions is different (and usually smaller)
* than the number of hashes in the signature.
*/
private val hashFunctions = {
val r = new scala.util.Random(seed)
val numHashFunctions = math.ceil(numBytes / 16.0).toInt
(1 to numHashFunctions).map(_ => MurmurHash128(r.nextLong))
}
/** Signature for empty set, needed to be a proper Monoid */
override val zero: MinHashSignature = MinHashSignature(buildArray(maxHash))
/** Set union */
override def plus(left: MinHashSignature, right: MinHashSignature): MinHashSignature =
MinHashSignature(buildArray(left.bytes, right.bytes)((l, r) => n.min(l, r)))
/** Esimate Jaccard similarity (size of union / size of intersection) */
def similarity(left: MinHashSignature, right: MinHashSignature): Double =
buildArray(left.bytes, right.bytes)((l, r) => if (l == r) n.one else n.zero)
.map(_.toDouble)
.sum / numHashes
/** Bucket keys to use for quickly finding other similar items via locality sensitive hashing */
def buckets(sig: MinHashSignature): List[Long] =
sig.bytes
.grouped(numRows * hashSize)
.filter(_.size == numRows * hashSize)
.map(hashFunctions.head(_)._1)
.toList
/** Create a signature for a single Long value */
def init(value: Long): MinHashSignature = init(_(value))
/** Create a signature for a single String value */
def init(value: String): MinHashSignature = init(_(value))
/** Create a signature for an arbitrary value */
def init(fn: MurmurHash128 => (Long, Long)): MinHashSignature = {
val bytes = new Array[Byte](numBytes)
val buffer = ByteBuffer.allocate(hashFunctions.size * 16)
val longBuffer = buffer.asLongBuffer
hashFunctions.foreach { h =>
val (long1, long2) = fn(h)
longBuffer.put(long1)
longBuffer.put(long2)
}
buffer.rewind
buffer.get(bytes)
MinHashSignature(bytes)
}
/** Useful for understanding the effects of numBands and numRows */
val estimatedThreshold: Double = math.pow(1.0 / numBands, 1.0 / numRows)
/** Useful for understanding the effects of numBands and numRows */
def probabilityOfInclusion(sim: Double): Double =
1.0 - math.pow(1.0 - math.pow(sim, numRows), numBands)
/** Maximum value the hash can take on (not 2*hashSize because of signed types) */
def maxHash: H
/** Initialize a byte array by generating hash values */
protected def buildArray(fn: => H): Array[Byte]
/** Decode two signatures into hash values, combine them somehow, and produce a new array */
protected def buildArray(left: Array[Byte], right: Array[Byte])(fn: (H, H) => H): Array[Byte]
}
class MinHasher32(numHashes: Int, numBands: Int) extends MinHasher[Int](numHashes, numBands) {
private def this(x: (Int, Int)) = this(x._1, x._2)
def this(targetThreshold: Double, maxBytes: Int) =
this(MinHasher.pickHashesAndBands(targetThreshold, maxBytes / 4))
override def hashSize: Int = 4
override def maxHash: Int = Int.MaxValue
override protected def buildArray(fn: => Int): Array[Byte] = {
val byteBuffer = ByteBuffer.allocate(numBytes)
val writeBuffer = byteBuffer.asIntBuffer
1.to(numHashes).foreach(_ => writeBuffer.put(fn))
byteBuffer.array
}
override protected def buildArray(left: Array[Byte], right: Array[Byte])(
fn: (Int, Int) => Int
): Array[Byte] = {
val leftBuffer = ByteBuffer.wrap(left).asIntBuffer
val rightBuffer = ByteBuffer.wrap(right).asIntBuffer
buildArray(fn(leftBuffer.get, rightBuffer.get))
}
/** Seems to work, but experimental and not generic yet */
def approxCount(sig: Array[Byte]): Long = {
val buffer = ByteBuffer.wrap(sig).asIntBuffer
val mean = 1
.to(numHashes)
.map(_ => buffer.get.toLong)
.sum / numHashes
(2L << 31) / (mean.toLong + (2L << 30))
}
}
class MinHasher16(numHashes: Int, numBands: Int) extends MinHasher[Char](numHashes, numBands) {
private def this(x: (Int, Int)) = this(x._1, x._2)
def this(targetThreshold: Double, maxBytes: Int) =
this(MinHasher.pickHashesAndBands(targetThreshold, maxBytes / 2))
override def hashSize: Int = 2
override def maxHash: Char = Char.MaxValue
override protected def buildArray(fn: => Char): Array[Byte] = {
val byteBuffer = ByteBuffer.allocate(numBytes)
val writeBuffer = byteBuffer.asCharBuffer
1.to(numHashes).foreach(_ => writeBuffer.put(fn))
byteBuffer.array
}
override protected def buildArray(left: Array[Byte], right: Array[Byte])(
fn: (Char, Char) => Char
): Array[Byte] = {
val leftBuffer = ByteBuffer.wrap(left).asCharBuffer
val rightBuffer = ByteBuffer.wrap(right).asCharBuffer
buildArray(fn(leftBuffer.get, rightBuffer.get))
}
}
|
twitter/algebird
|
algebird-core/src/main/scala/com/twitter/algebird/MinHasher.scala
|
Scala
|
apache-2.0
| 7,647 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kafka.tools.export
import java.util.concurrent.{BlockingQueue, LinkedBlockingQueue, TimeUnit}
import com.beust.jcommander.{ParameterException, Parameters}
import org.geotools.data.{FeatureEvent, FeatureListener, Query}
import org.locationtech.geomesa.features.TransformSimpleFeature
import org.locationtech.geomesa.kafka.data.KafkaDataStore
import org.locationtech.geomesa.kafka.tools.ConsumerDataStoreParams
import org.locationtech.geomesa.kafka.tools.KafkaDataStoreCommand.KafkaDistributedCommand
import org.locationtech.geomesa.kafka.tools.export.KafkaExportCommand._
import org.locationtech.geomesa.kafka.utils.KafkaFeatureEvent.KafkaFeatureChanged
import org.locationtech.geomesa.tools.export.ExportCommand
import org.locationtech.geomesa.tools.export.ExportCommand.ExportParams
import org.locationtech.geomesa.tools.export.formats.FeatureExporter
import org.locationtech.geomesa.tools.{Command, RequiredTypeNameParam}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
import scala.util.control.NonFatal
class KafkaExportCommand extends ExportCommand[KafkaDataStore] with KafkaDistributedCommand {
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
override val params = new KafkaExportParameters()
private val queue: BlockingQueue[SimpleFeature] = new LinkedBlockingQueue[SimpleFeature]
override protected def export(ds: KafkaDataStore, query: Query, exporter: FeatureExporter): Option[Long] = {
val sft = ds.getSchema(params.featureName)
if (sft == null) {
throw new ParameterException(s"Type ${params.featureName} does not exist at path ${params.zkPath}")
}
val filter = Option(query.getFilter).filter(_ != Filter.INCLUDE)
val transform = query.getHints.getTransform
val listener = new ExportFeatureListener(sft, filter, transform, queue)
Command.user.info(s"Exporting from kafka topic '${sft.getUserData.get(KafkaDataStore.TopicKey)}' " +
"- use `ctrl-c` to stop")
val features: Iterator[SimpleFeature] = new Iterator[SimpleFeature] {
private var current: SimpleFeature = _
override def hasNext: Boolean = {
if (current == null) {
current = queue.poll(100, TimeUnit.MILLISECONDS)
}
current != null
}
override def next(): SimpleFeature = {
val res = current
current = null
res
}
}
val fs = ds.getFeatureSource(query.getTypeName)
fs.addFeatureListener(listener)
try {
exporter.start(query.getHints.getReturnSft)
query.getHints.getMaxFeatures match {
case None => exportContinuously(exporter, features)
case Some(m) => exportWithMax(exporter, features, m)
}
} catch {
case NonFatal(e) =>
throw new RuntimeException("Could not execute export query. Please ensure that all arguments are correct", e)
} finally {
fs.removeFeatureListener(listener)
}
}
private def exportContinuously(exporter: FeatureExporter, features: Iterator[SimpleFeature]): Option[Long] = {
// try to close the exporter when user cancels to finish off whatever the export was
sys.addShutdownHook(exporter.close())
var count = 0L
while (true) {
// hasNext may return false one time, and then true the next if more data is read from kafka
if (features.hasNext) {
exporter.export(features).foreach(count += _)
} else {
Thread.sleep(1000)
}
}
Some(count)
}
private def exportWithMax(exporter: FeatureExporter, features: Iterator[SimpleFeature], max: Int): Option[Long] = {
// noinspection LoopVariableNotUpdated
var count = 0L
while (count < max) {
// hasNext may return false one time, and then true the next if more data is read from kafka
if (features.hasNext) {
// note: side effect in map - do count here in case exporter doesn't report counts
val batch = features.take(max - count.toInt).map { f => count += 1; f }
exporter.export(batch)
} else {
Thread.sleep(1000)
}
}
Some(count)
}
}
object KafkaExportCommand {
@Parameters(commandDescription = "Export features from a GeoMesa Kafka topic")
class KafkaExportParameters extends ConsumerDataStoreParams with RequiredTypeNameParam with ExportParams
class ExportFeatureListener(sft: SimpleFeatureType,
filter: Option[Filter],
transform: Option[(String, SimpleFeatureType)],
queue: BlockingQueue[SimpleFeature]) extends FeatureListener {
private val attributes = transform.map { case (tdefs, tsft) =>
(tsft, TransformSimpleFeature.attributes(sft, tsft, tdefs))
}
override def changed(event: FeatureEvent): Unit = {
event match {
case e: KafkaFeatureChanged => added(e.feature)
case _ => // no-op
}
}
def added(sf: SimpleFeature): Unit = {
if (filter.forall(_.evaluate(sf))) {
queue.put(attributes.map { case (tsft, a) => new TransformSimpleFeature(tsft, a, sf) }.getOrElse(sf))
}
}
}
}
|
elahrvivaz/geomesa
|
geomesa-kafka/geomesa-kafka-tools/src/main/scala/org/locationtech/geomesa/kafka/tools/export/KafkaExportCommand.scala
|
Scala
|
apache-2.0
| 5,652 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib
import scala.scalajs.js
import org.scalajs.jasminetest.JasmineTest
object StackTraceElementTest extends JasmineTest {
import scala.scalajs.runtime.StackTrace.Implicits._
describe("java.lang.StackTraceElement") {
it("should use the additional columnNumber field in its toString") {
val st = new StackTraceElement("MyClass", "myMethod", "myFile.scala", 1)
st.setColumnNumber(5)
expect(st.toString).toEqual("MyClass.myMethod(myFile.scala:1:5)")
}
it("should leave toString unmodified if columnNumber is not specified") {
val st = new StackTraceElement("MyClass", "myMethod", "myFile.scala", 1)
expect(st.toString).toEqual("MyClass.myMethod(myFile.scala:1)")
}
}
}
|
jmnarloch/scala-js
|
test-suite/src/test/scala/org/scalajs/testsuite/javalib/StackTraceElementTest.scala
|
Scala
|
bsd-3-clause
| 1,283 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.expression
/**
* Expression that references a run time field
*/
trait ReferenceExpression extends Expression {
def objectName: Option[String] = None
def referenceName: String
def isRuntime: Boolean
def content: String =
objectName.map(_ + ".").getOrElse("") + (if (isRuntime) "@" + referenceName else referenceName)
}
|
realstraw/hyperion
|
core/src/main/scala/com/krux/hyperion/expression/ReferenceExpression.scala
|
Scala
|
bsd-3-clause
| 593 |
package controllers
import play.api._
import play.api.mvc._
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits._
// Play Json imports
import play.api.libs.json._
object Application extends Controller {
def index = Action {
Ok(views.html.index("Your new application is ready."))
}
}
|
lukiano/networkblame
|
app/controllers/Application.scala
|
Scala
|
mit
| 325 |
package com.twitter.finagle.builder
import com.twitter.finagle._
import com.twitter.finagle.channel.OpenConnectionsThresholds
import com.twitter.finagle.netty3.Netty3Listener
import com.twitter.finagle.ssl.{Ssl, Engine}
import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver}
import com.twitter.finagle.tracing.{NullTracer, Tracer}
import com.twitter.finagle.util._
import com.twitter.finagle.transport.Transport
import com.twitter.util.{Closable, Duration, Future, Monitor, NullMonitor, Time}
import java.net.SocketAddress
import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.{Logger, Level}
import javax.net.ssl.SSLEngine
import org.jboss.netty.channel.ServerChannelFactory
import scala.annotation.implicitNotFound
import scala.collection.mutable
/**
* A listening server.
*/
trait Server extends Closable {
/**
* Close the underlying server gracefully with the given grace
* period. close() will drain the current channels, waiting up to
* ``timeout'', after which channels are forcibly closed.
*/
def close(timeout: Duration = Duration.Top): Future[Unit] =
close(timeout.fromNow)
/**
* When a server is bound to an ephemeral port, gets back the address
* with concrete listening port picked.
*/
def localAddress: SocketAddress
}
/**
* Factory for [[com.twitter.finagle.builder.ServerBuilder]] instances
*/
object ServerBuilder {
type Complete[Req, Rep] = ServerBuilder[
Req, Rep, ServerConfig.Yes,
ServerConfig.Yes, ServerConfig.Yes]
def apply() = new ServerBuilder()
def get() = apply()
/**
* Provides a typesafe `build` for Java.
*/
def safeBuild[Req, Rep](service: Service[Req, Rep], builder: Complete[Req, Rep]): Server =
builder.build(service)(ServerConfigEvidence.FullyConfigured)
}
object ServerConfig {
sealed abstract trait Yes
type FullySpecified[Req, Rep] = ServerConfig[Req, Rep, Yes, Yes, Yes]
}
@implicitNotFound("Builder is not fully configured: Codec: ${HasCodec}, BindTo: ${HasBindTo}, Name: ${HasName}")
trait ServerConfigEvidence[HasCodec, HasBindTo, HasName]
private[builder] object ServerConfigEvidence {
implicit object FullyConfigured extends ServerConfigEvidence[ServerConfig.Yes, ServerConfig.Yes, ServerConfig.Yes]
}
private[builder] case class BufferSize(
send: Option[Int] = None,
recv: Option[Int] = None
)
private[builder] case class TimeoutConfig(
hostConnectionMaxIdleTime: Option[Duration] = None,
hostConnectionMaxLifeTime: Option[Duration] = None,
requestTimeout: Option[Duration] = None,
readTimeout: Option[Duration] = None,
writeCompletionTimeout: Option[Duration] = None
)
/**
* A configuration object that represents what shall be built.
*/
private[builder] final case class ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName](
private val _codecFactory: Option[CodecFactory[Req, Rep]#Server] = None,
private val _statsReceiver: Option[StatsReceiver] = None,
private val _monitor: Option[(String, SocketAddress) => Monitor] = None,
private val _name: Option[String] = None,
private val _bufferSize: BufferSize = BufferSize(),
private val _keepAlive: Option[Boolean] = None,
private val _backlog: Option[Int] = None,
private val _bindTo: Option[SocketAddress] = None,
private val _logger: Option[Logger] = None,
private val _newEngine: Option[() => Engine] = None,
private val _channelFactory: ServerChannelFactory = Netty3Listener.channelFactory,
private val _maxConcurrentRequests: Option[Int] = None,
private val _timeoutConfig: TimeoutConfig = TimeoutConfig(),
private val _tracer: Tracer = NullTracer,
private val _openConnectionsThresholds: Option[OpenConnectionsThresholds] = None,
private val _cancelOnHangup: Boolean = true,
private val _logChannelActivity: Boolean = false,
private val _daemon: Boolean = false)
{
import ServerConfig._
/**
* The Scala compiler errors if the case class members don't have underscores.
* Nevertheless, we want a friendly public API so we create delegators without
* underscores.
*/
lazy val codecFactory = _codecFactory.get
val statsReceiver = _statsReceiver
val monitor = _monitor
lazy val name = _name.get
val bufferSize = _bufferSize
val keepAlive = _keepAlive
val backlog = _backlog
lazy val bindTo = _bindTo.get
val logger = _logger
val newEngine = _newEngine
val channelFactory = _channelFactory
val maxConcurrentRequests = _maxConcurrentRequests
val hostConnectionMaxIdleTime = _timeoutConfig.hostConnectionMaxIdleTime
val hostConnectionMaxLifeTime = _timeoutConfig.hostConnectionMaxLifeTime
val requestTimeout = _timeoutConfig.requestTimeout
val readTimeout = _timeoutConfig.readTimeout
val writeCompletionTimeout = _timeoutConfig.writeCompletionTimeout
val timeoutConfig = _timeoutConfig
val tracer = _tracer
val openConnectionsThresholds = _openConnectionsThresholds
val cancelOnHangup = _cancelOnHangup
val logChannelActivity = _logChannelActivity
val daemon = _daemon
def toMap = Map(
"codecFactory" -> _codecFactory,
"statsReceiver" -> _statsReceiver,
"monitor" -> _monitor,
"name" -> _name,
"bufferSize" -> _bufferSize,
"keepAlive" -> _keepAlive,
"backlog" -> _backlog,
"bindTo" -> _bindTo,
"logger" -> _logger,
"newEngine" -> _newEngine,
"channelFactory" -> Some(_channelFactory),
"maxConcurrentRequests" -> _maxConcurrentRequests,
"hostConnectionMaxIdleTime" -> _timeoutConfig.hostConnectionMaxIdleTime,
"hostConnectionMaxLifeTime" -> _timeoutConfig.hostConnectionMaxLifeTime,
"requestTimeout" -> _timeoutConfig.requestTimeout,
"readTimeout" -> _timeoutConfig.readTimeout,
"writeCompletionTimeout" -> _timeoutConfig.writeCompletionTimeout,
"tracer" -> Some(_tracer),
"openConnectionsThresholds" -> Some(_openConnectionsThresholds),
"cancelOnHangup" -> Some(_cancelOnHangup),
"logChannelActivity" -> Some(_logChannelActivity),
"daemon" -> Some(_daemon)
)
override def toString = {
"ServerConfig(%s)".format(
toMap flatMap {
case (k, Some(v)) =>
Some("%s=%s".format(k, v))
case _ =>
None
} mkString(", "))
}
def validated: ServerConfig[Req, Rep, Yes, Yes, Yes] = {
_codecFactory getOrElse { throw new IncompleteSpecification("No codec was specified") }
_bindTo getOrElse { throw new IncompleteSpecification("No bindTo was specified") }
_name getOrElse { throw new IncompleteSpecification("No name were specified") }
copy()
}
}
/**
* A handy Builder for constructing Servers (i.e., binding Services to
* a port). This class is subclassable. Override copy() and build()
* to do your own dirty work.
*
* The main class to use is [[com.twitter.finagle.builder.ServerBuilder]], as so
* {{{
* ServerBuilder()
* .codec(Http)
* .hostConnectionMaxLifeTime(5.minutes)
* .readTimeout(2.minutes)
* .name("servicename")
* .bindTo(new InetSocketAddress(serverPort))
* .build(plusOneService)
* }}}
*
* The `ServerBuilder` requires the definition of `codec`, `bindTo`
* and `name`. In Scala, these are statically type
* checked, and in Java the lack of any of the above causes a runtime
* error.
*
* The `build` method uses an implicit argument to statically
* typecheck the builder (to ensure completeness, see above). The Java
* compiler cannot provide such implicit, so we provide a separate
* function in Java to accomplish this. Thus, the Java code for the
* above is
*
* {{{
* ServerBuilder.safeBuild(
* plusOneService,
* ServerBuilder.get()
* .codec(Http)
* .hostConnectionMaxLifeTime(5.minutes)
* .readTimeout(2.minutes)
* .name("servicename")
* .bindTo(new InetSocketAddress(serverPort)));
* }}}
*
* Alternatively, using the `unsafeBuild` method on `ServerBuilder`
* verifies the builder dynamically, resulting in a runtime error
* instead of a compiler error.
*/
class ServerBuilder[Req, Rep, HasCodec, HasBindTo, HasName] private[builder](
val config: ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName]
) {
import ServerConfig._
// Convenient aliases.
type FullySpecifiedConfig = FullySpecified[Req, Rep]
type ThisConfig = ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName]
type This = ServerBuilder[Req, Rep, HasCodec, HasBindTo, HasName]
private[builder] def this() = this(new ServerConfig)
override def toString() = "ServerBuilder(%s)".format(config.toString)
protected def copy[Req1, Rep1, HasCodec1, HasBindTo1, HasName1](
config: ServerConfig[Req1, Rep1, HasCodec1, HasBindTo1, HasName1]
): ServerBuilder[Req1, Rep1, HasCodec1, HasBindTo1, HasName1] =
new ServerBuilder(config)
protected def withConfig[Req1, Rep1, HasCodec1, HasBindTo1, HasName1](
f: ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName] =>
ServerConfig[Req1, Rep1, HasCodec1, HasBindTo1, HasName1]
): ServerBuilder[Req1, Rep1, HasCodec1, HasBindTo1, HasName1] = copy(f(config))
def codec[Req1, Rep1](
codec: Codec[Req1, Rep1]
): ServerBuilder[Req1, Rep1, Yes, HasBindTo, HasName] =
withConfig(_.copy(_codecFactory = Some(Function.const(codec) _)))
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]#Server
): ServerBuilder[Req1, Rep1, Yes, HasBindTo, HasName] =
withConfig(_.copy(_codecFactory = Some(codecFactory)))
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]
): ServerBuilder[Req1, Rep1, Yes, HasBindTo, HasName] =
withConfig(_.copy(_codecFactory = Some(codecFactory.server)))
def reportTo(receiver: StatsReceiver): This =
withConfig(_.copy(_statsReceiver = Some(receiver)))
def name(value: String): ServerBuilder[Req, Rep, HasCodec, HasBindTo, Yes] =
withConfig(_.copy(_name = Some(value)))
def sendBufferSize(value: Int): This =
withConfig(_.copy(_bufferSize = config.bufferSize.copy(send = Some(value))))
def recvBufferSize(value: Int): This =
withConfig(_.copy(_bufferSize = config.bufferSize.copy(recv = Some(value))))
def keepAlive(value: Boolean): This =
withConfig(_.copy(_keepAlive = Some(value)))
def backlog(value: Int): This =
withConfig(_.copy(_backlog = Some(value)))
def bindTo(address: SocketAddress): ServerBuilder[Req, Rep, HasCodec, Yes, HasName] =
withConfig(_.copy(_bindTo = Some(address)))
def channelFactory(cf: ServerChannelFactory): This =
withConfig(_.copy(_channelFactory = cf))
def logger(logger: Logger): This =
withConfig(_.copy(_logger = Some(logger)))
def logChannelActivity(v: Boolean): This =
withConfig(_.copy(_logChannelActivity = v))
def tls(certificatePath: String, keyPath: String,
caCertificatePath: String = null, ciphers: String = null, nextProtos: String = null): This =
newFinagleSslEngine(() => Ssl.server(certificatePath, keyPath, caCertificatePath, ciphers, nextProtos))
/**
* Provide a raw SSL engine that is used to establish SSL sessions.
*/
def newSslEngine(newSsl: () => SSLEngine): This =
newFinagleSslEngine(() => new Engine(newSsl()))
def newFinagleSslEngine(v: () => Engine): This =
withConfig(_.copy(_newEngine = Some(v)))
def maxConcurrentRequests(max: Int): This =
withConfig(_.copy(_maxConcurrentRequests = Some(max)))
def hostConnectionMaxIdleTime(howlong: Duration): This =
withConfig(c => c.copy(_timeoutConfig = c.timeoutConfig.copy(hostConnectionMaxIdleTime = Some(howlong))))
def hostConnectionMaxLifeTime(howlong: Duration): This =
withConfig(c => c.copy(_timeoutConfig = c.timeoutConfig.copy(hostConnectionMaxLifeTime = Some(howlong))))
def requestTimeout(howlong: Duration): This =
withConfig(c => c.copy(_timeoutConfig = c.timeoutConfig.copy(requestTimeout = Some(howlong))))
def readTimeout(howlong: Duration): This =
withConfig(c => c.copy(_timeoutConfig = c.timeoutConfig.copy(readTimeout = Some(howlong))))
def writeCompletionTimeout(howlong: Duration): This =
withConfig(c => c.copy(_timeoutConfig = c.timeoutConfig.copy(writeCompletionTimeout = Some(howlong))))
def monitor(mFactory: (String, SocketAddress) => Monitor): This =
withConfig(_.copy(_monitor = Some(mFactory)))
@deprecated("Use tracer() instead", "7.0.0")
def tracerFactory(factory: Tracer.Factory): This =
tracer(factory())
// API compatibility method
@deprecated("Use tracer() instead", "7.0.0")
def tracerFactory(t: Tracer): This =
tracer(t)
def tracer(t: Tracer): This =
withConfig(_.copy(_tracer = t))
/**
* Cancel pending futures whenever the the connection is shut down.
* This defaults to true.
*/
def cancelOnHangup(yesOrNo: Boolean): This =
withConfig(_.copy(_cancelOnHangup = yesOrNo))
def openConnectionsThresholds(thresholds: OpenConnectionsThresholds): This =
withConfig(_.copy(_openConnectionsThresholds = Some(thresholds)))
/**
* When true, the server is daemonized. As with java threads, a
* process can only exit only when all remaining servers are daemonized.
* False by default.
*/
def daemon(daemonize: Boolean): This =
withConfig(_.copy(_daemon = daemonize))
/* Builder methods follow */
/**
* Construct the Server, given the provided Service.
*/
def build(service: Service[Req, Rep]) (
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ServerConfigEvidence[HasCodec, HasBindTo, HasName]
): Server = build(ServiceFactory.const(service))
@deprecated("Used for ABI compat", "5.0.1")
def build(service: Service[Req, Rep],
THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Server = build(ServiceFactory.const(service), THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION)
/**
* Construct the Server, given the provided Service factory.
*/
@deprecated("Use the ServiceFactory variant instead", "5.0.1")
def build(serviceFactory: () => Service[Req, Rep])(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Server = build((_:ClientConnection) => serviceFactory())(THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION)
/**
* Construct the Server, given the provided ServiceFactory. This
* is useful if the protocol is stateful (e.g., requires authentication
* or supports transactions).
*/
@deprecated("Use the ServiceFactory variant instead", "5.0.1")
def build(serviceFactory: (ClientConnection) => Service[Req, Rep])(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Server = build(new ServiceFactory[Req, Rep] {
def apply(conn: ClientConnection) = Future.value(serviceFactory(conn))
def close(deadline: Time) = Future.Done
}, THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION)
/**
* Construct the Server, given the provided ServiceFactory. This
* is useful if the protocol is stateful (e.g., requires authentication
* or supports transactions).
*/
def build(serviceFactory: ServiceFactory[Req, Rep])(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ServerConfigEvidence[HasCodec, HasBindTo, HasName]
): Server = new Server {
import com.twitter.finagle.server._
import com.twitter.finagle.netty3._
val codecConfig = ServerCodecConfig(
serviceName = config.name, boundAddress = config.bindTo)
val codec = config.codecFactory(codecConfig)
val statsReceiver = config.statsReceiver map(_.scope(config.name)) getOrElse NullStatsReceiver
val logger = config.logger getOrElse Logger.getLogger(config.name)
val monitor = config.monitor map(_(config.name, config.bindTo)) getOrElse NullMonitor
val tracer = config.tracer
val timer = DefaultTimer.twitter
val nettyTimer = DefaultTimer
val listener = Netty3Listener[Rep, Req](
name = config.name,
pipelineFactory = codec.pipelineFactory,
channelSnooper =
if (config.logChannelActivity) Some(ChannelSnooper(config.name)(logger.info))
else None,
channelFactory = config.channelFactory,
bootstrapOptions = {
val o = new mutable.MapBuilder[String, Object, Map[String, Object]](Map())
o += "soLinger" -> (0: java.lang.Integer)
o += "reuseAddress" -> java.lang.Boolean.TRUE
o += "child.tcpNoDelay" -> java.lang.Boolean.TRUE
for (v <- config.backlog) o += "backlog" -> (v: java.lang.Integer)
for (v <- config.bufferSize.send) o += "child.sendBufferSize" -> (v: java.lang.Integer)
for (v <- config.bufferSize.recv) o += "child.receiveBufferSize" -> (v: java.lang.Integer)
for (v <- config.keepAlive) o += "child.keepAlive" -> (v: java.lang.Boolean)
o.result()
},
channelMaxIdleTime = config.hostConnectionMaxIdleTime getOrElse Duration.Top,
channelMaxLifeTime = config.hostConnectionMaxLifeTime getOrElse Duration.Top,
channelReadTimeout = config.readTimeout getOrElse Duration.Top,
channelWriteCompletionTimeout = config.writeCompletionTimeout getOrElse Duration.Top,
tlsConfig = config.newEngine map(Netty3ListenerTLSConfig),
timer = timer,
nettyTimer = nettyTimer,
statsReceiver = statsReceiver,
monitor = monitor,
logger = logger
)
val server = DefaultServer[Req, Rep, Rep, Req](
name = config.name,
listener = listener,
serviceTransport = codec.newServerDispatcher _,
requestTimeout = config.requestTimeout getOrElse Duration.Top,
maxConcurrentRequests = config.maxConcurrentRequests getOrElse Int.MaxValue,
cancelOnHangup = config.cancelOnHangup,
prepare = codec.prepareConnFactory(_),
timer = timer,
monitor = monitor,
logger = logger,
statsReceiver = statsReceiver,
tracer = tracer
)
val listeningServer = server.serve(config.bindTo, serviceFactory)
val closed = new AtomicBoolean(false)
if (!config.daemon) ExitGuard.guard()
def close(deadline: Time): Future[Unit] = {
if (!closed.compareAndSet(false, true)) {
logger.log(Level.WARNING, "Server closed multiple times!",
new Exception/*stack trace please*/)
return Future.exception(new IllegalStateException)
}
listeningServer.close(deadline) ensure {
if (!config.daemon) ExitGuard.unguard()
}
}
val localAddress = listeningServer.boundAddress
}
@deprecated("Used for ABI compat", "5.0.1")
def build(serviceFactory: ServiceFactory[Req, Rep],
THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Server = build(serviceFactory)(
new ServerConfigEvidence[HasCodec, HasBindTo, HasName]{})
/**
* Construct a Service, with runtime checks for builder
* completeness.
*/
def unsafeBuild(service: Service[Req, Rep]): Server =
withConfig(_.validated).build(service)
}
|
joshbedo/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/builder/ServerBuilder.scala
|
Scala
|
apache-2.0
| 20,650 |
package controllers
import lila.app._
import lila.notify.Notification.Notifies
import play.api.libs.json._
import views.html
object Notify extends LilaController {
val env = Env.notifyModule
import env.jsonHandlers._
def recent(page: Int) = Auth { implicit ctx =>
me =>
val notifies = Notifies(me.id)
env.api.getNotificationsAndCount(notifies, page) map { res =>
Ok(Json toJson res) as JSON
}
}
}
|
clarkerubber/lila
|
app/controllers/Notify.scala
|
Scala
|
agpl-3.0
| 441 |
package slamdata.engine.std
import scalaz._
import Scalaz._
import slamdata.engine.{Data, Func, Type, Mapping, SemanticError}
import SemanticError._
import Validation.{success, failure}
import NonEmptyList.nel
trait StringLib extends Library {
private def stringApply(f: (String, String) => String): Func.Typer = {
case Type.Const(Data.Str(a)) :: Type.Const(Data.Str(b)) :: Nil => success(Type.Const(Data.Str(f(a, b))))
case Type.Str :: Type.Const(Data.Str(_)) :: Nil => success(Type.Str)
case Type.Const(Data.Str(_)) :: Type.Str :: Nil => success(Type.Str)
case Type.Str :: Type.Str :: Nil => success(Type.Str)
case t :: _ => failure(nel(TypeError(Type.Str, t, None), Nil))
case Nil => failure(nel(GenericError("expected arguments"), Nil))
}
private val StringUnapply: Func.Untyper = {
case Type.Str => success(Type.Str :: Type.Str :: Nil)
case t => failure(nel(TypeError(Type.Str, t, None), Nil))
}
// TODO: variable arity
val Concat = Mapping("concat", "Concatenates two (or more) string values", Type.Str :: Type.Str :: Nil,
stringApply(_ + _),
StringUnapply
)
val Like = Mapping(
"(like)",
"Determines if a string value matches a pattern",
Type.Str :: Type.Str :: Nil,
ts => ts match {
case Type.Str :: Type.Const(Data.Str(_)) :: Nil =>
success(Type.Bool)
case Type.Str :: t :: Nil =>
failure(nel(GenericError("expected string constant for LIKE"), Nil))
case t :: Type.Const(Data.Str(_)) :: Nil =>
failure(nel(TypeError(Type.Str, t, None), Nil))
case _ =>
failure(nel(GenericError("expected arguments"), Nil))
},
Type.typecheck(_, Type.Bool) map { _ => Type.Str :: Type.Str :: Nil }
)
val Length = Mapping(
"length",
"Counts the number of characters in a string.",
Type.Str :: Nil,
_ match {
case Type.Const(Data.Str(str)) :: Nil =>
success(Type.Const(Data.Int(str.length)))
case Type.Str :: Nil => success(Type.Int)
case t :: Nil => failure(nel(TypeError(Type.Str, t, None), Nil))
case _ => failure(nel(GenericError("expected arguments"), Nil))
},
Type.typecheck(_, Type.Int) map { _ => Type.Str :: Nil }
)
val Lower = Mapping(
"lower",
"Converts the string to lower case.",
Type.Str :: Nil,
_ match {
case Type.Const(Data.Str(str)) :: Nil =>
success(Type.Const (Data.Str(str.toLowerCase)))
case Type.Str :: Nil => success(Type.Int)
case t :: Nil => failure(nel(TypeError(Type.Str, t, None), Nil))
case _ => failure(nel(GenericError("expected arguments"), Nil))
},
Type.typecheck(_, Type.Str) map { _ => Type.Str :: Nil }
)
val Upper = Mapping(
"upper",
"Converts the string to upper case.",
Type.Str :: Nil,
_ match {
case Type.Const(Data.Str(str)) :: Nil =>
success(Type.Const (Data.Str(str.toUpperCase)))
case Type.Str :: Nil => success(Type.Int)
case t :: Nil => failure(nel(TypeError(Type.Str, t, None), Nil))
case _ => failure(nel(GenericError("expected arguments"), Nil))
},
Type.typecheck(_, Type.Str) map { _ => Type.Str :: Nil }
)
val Substring = Mapping(
"substring",
"Extracts a portion of the string",
Type.Str :: Type.Int :: Type.Int :: Nil,
_ match {
case Type.Const(Data.Str(str))
:: Type.Const(Data.Int(from0))
:: Type.Const(Data.Int(for0))
:: Nil => {
val from = from0.intValue - 1
success(Type.Const(Data.Str(str.substring(from, from + for0.intValue))))
}
case List(Type.Str, Type.Const(Data.Int(_)), Type.Const(Data.Int(_))) =>
success(Type.Str)
case List(Type.Str, Type.Const(Data.Int(_)), Type.Int) =>
success(Type.Str)
case List(Type.Str, Type.Int, Type.Const(Data.Int(_))) =>
success(Type.Str)
case List(Type.Str, Type.Int, Type.Int) =>
success(Type.Str)
case Type.Str :: _ :: _ :: Nil =>
failure(nel(GenericError("expected integer arguments for SUBSTRING"), Nil))
case t :: _ :: _ :: Nil =>
failure(nel(TypeError(Type.Str, t, None), Nil))
case _ =>
failure(nel(GenericError("expected arguments"), Nil))
},
Type.typecheck(_, Type.Str) map { _ => Type.Str :: Type.Int :: Type.Int :: Nil }
)
def functions = Concat :: Like :: Length :: Lower :: Upper :: Substring :: Nil
}
object StringLib extends StringLib
|
mossprescott/quasar
|
src/main/scala/slamdata/engine/std/string.scala
|
Scala
|
agpl-3.0
| 4,562 |
package com.bwsw.cloudstack.pulse.models
import com.bwsw.cloudstack.pulse.influx.{CounterField, QueryBuilder}
/**
* Created by Ivan Kudryavtsev on 27.07.17.
*/
class DiskInfluxModel extends InfluxModel {
override def prepareQuery(params: Map[String, String]): String = {
val aggregation = params("aggregation")
val shift = params("shift")
val range = params("range")
val q = QueryBuilder()
.select
.field("ioErrors", CounterField("ioErrors", aggregation))
.field("readBytes", CounterField("readBytes", aggregation))
.field("writeBytes", CounterField("writeBytes", aggregation))
.field("readIOPS", CounterField("readIOPS", aggregation))
.field("writeIOPS", CounterField("writeIOPS", aggregation))
.from("disk")
.where
.andEq("vmUuid", params("uuid"))
.andEq("image", params("diskUuid"))
.timeSpan(aggregation, range, shift)
.groupByAggregation
.build
q
}
}
|
bwsw/cs-pulse-server
|
src/main/scala-2.12/com/bwsw/cloudstack/pulse/models/DiskInfluxModel.scala
|
Scala
|
apache-2.0
| 986 |
package frp
import scala.util.DynamicVariable
class Signal[T](expr: => T) {
import Signal._
private var myExpr: () => T = _
private var myValue: T = _
private var observers: Set[Signal[_]] = Set()
private var observed: List[Signal[_]] = Nil
update(expr)
protected def computeValue(): Unit = {
for (sig <- observed) sig.observers -= this
observed = Nil
val newValue: T = caller.withValue(this)(myExpr())
if (myValue != newValue) {
myValue = newValue
val obs = observers
observers = Set()
obs.foreach(_.computeValue())
}
}
protected def update(expr: => T): Unit = {
myExpr = () => expr
computeValue()
}
def apply(): T = {
observers += caller.value
assert(!caller.value.observers.contains(this), "Cyclic signal definition")
caller.value.observed ::= this
myValue
}
}
class Var[T](expr: => T) extends Signal[T](expr) {
override def update(expr: => T): Unit = super.update(expr)
}
object Var {
def apply[T](expr: => T) = new Var(expr)
}
object NoSignal extends Signal[Nothing](???) {
override def computeValue() = ()
}
object Signal {
// private val caller = new DynamicVariable[Signal[_]](NoSignal)
private val caller = new StackableVariable[Signal[_]](NoSignal)
def apply[T](expr: => T) = new Signal(expr)
}
|
mpakhomov/scala-progfun-2015
|
src/main/scala/frp/Signal.scala
|
Scala
|
mit
| 1,321 |
package com.mdataset.service.api.exchange
import com.ecfront.common.Resp
import com.ecfront.ez.framework.service.eventbus.EventBusProcessor
import com.mdataset.lib.basic.BasicContext
import com.mdataset.lib.basic.model.{MdsCollectStatusDTO, MdsSourceMainDTO}
/**
* Worker交互接口的消息默认实现
*
* 使用EventBus通道
*/
object MdsDefaultAPIExchangeMaster extends MdsAPIExchangeMaster {
override protected def fetchRegisterResp(callback: MdsSourceMainDTO => Resp[Void]): Unit = {
EventBusProcessor.Async.consumerAdv[MdsSourceMainDTO](BasicContext.FLAG_API_REGISTER, {
(source, reply) =>
reply(callback(source))
})
}
override protected def fetchUnRegisterResp(callback: String => Resp[Void]): Unit = {
EventBusProcessor.Async.consumerAdv[String](BasicContext.FLAG_API_UN_REGISTER, {
(code, reply) =>
reply(callback(code))
})
}
override protected def fetchCollectExecReq(status: MdsCollectStatusDTO, callback: Resp[MdsCollectStatusDTO] => Unit): Unit = {
EventBusProcessor.Async.sendAdv[Resp[MdsCollectStatusDTO]](BasicContext.FLAG_API_COLLECT_EXEC + status.code + "_" + status.item_code, status, {
(resp, _) =>
callback(resp)
})
}
override protected def fetchCollectTestReq(code: String, itemCode: String, callback: Resp[Void] => Unit): Unit = {
EventBusProcessor.Async.sendAdv[Resp[Void]](BasicContext.FLAG_API_COLLECT_TEST + code + "_" + itemCode, itemCode, {
(resp, _) =>
callback(resp)
})
}
}
|
MDataSet/mds
|
modules/service_api/src/main/scala/com/mdataset/service/api/exchange/MdsDefaultAPIExchangeMaster.scala
|
Scala
|
apache-2.0
| 1,527 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import javax.annotation.Nullable
import scala.annotation.tailrec
import scala.collection.mutable
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types._
/**
* A collection of [[Rule]] that can be used to coerce differing types that participate in
* operations into compatible ones.
*
* Notes about type widening / tightest common types: Broadly, there are two cases when we need
* to widen data types (e.g. union, binary comparison). In case 1, we are looking for a common
* data type for two or more data types, and in this case no loss of precision is allowed. Examples
* include type inference in JSON (e.g. what's the column's data type if one row is an integer
* while the other row is a long?). In case 2, we are looking for a widened data type with
* some acceptable loss of precision (e.g. there is no common type for double and decimal because
* double's range is larger than decimal, and yet decimal is more precise than double, but in
* union we would cast the decimal into double).
*/
object TypeCoercion {
val typeCoercionRules =
PropagateTypes ::
InConversion ::
WidenSetOperationTypes ::
PromoteStrings ::
DecimalPrecision ::
BooleanEquality ::
FunctionArgumentConversion ::
CaseWhenCoercion ::
IfCoercion ::
Division ::
PropagateTypes ::
ImplicitTypeCasts ::
DateTimeOperations ::
Nil
// See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types.
// The conversion for integral and floating point types have a linear widening hierarchy:
val numericPrecedence =
IndexedSeq(
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType)
/**
* Case 1 type widening (see the classdoc comment above for TypeCoercion).
*
* Find the tightest common type of two types that might be used in a binary expression.
* This handles all numeric types except fixed-precision decimals interacting with each other or
* with primitive types, because in that case the precision and scale of the result depends on
* the operation. Those rules are implemented in [[DecimalPrecision]].
*/
val findTightestCommonType: (DataType, DataType) => Option[DataType] = {
case (t1, t2) if t1 == t2 => Some(t1)
case (NullType, t1) => Some(t1)
case (t1, NullType) => Some(t1)
case (t1: IntegralType, t2: DecimalType) if t2.isWiderThan(t1) =>
Some(t2)
case (t1: DecimalType, t2: IntegralType) if t1.isWiderThan(t2) =>
Some(t1)
// Promote numeric types to the highest of the two
case (t1: NumericType, t2: NumericType)
if !t1.isInstanceOf[DecimalType] && !t2.isInstanceOf[DecimalType] =>
val index = numericPrecedence.lastIndexWhere(t => t == t1 || t == t2)
Some(numericPrecedence(index))
case (_: TimestampType, _: DateType) | (_: DateType, _: TimestampType) =>
Some(TimestampType)
case _ => None
}
/** Promotes all the way to StringType. */
private def stringPromotion(dt1: DataType, dt2: DataType): Option[DataType] = (dt1, dt2) match {
case (StringType, t2: AtomicType) if t2 != BinaryType && t2 != BooleanType => Some(StringType)
case (t1: AtomicType, StringType) if t1 != BinaryType && t1 != BooleanType => Some(StringType)
case _ => None
}
/**
* This function determines the target type of a comparison operator when one operand
* is a String and the other is not. It also handles when one op is a Date and the
* other is a Timestamp by making the target type to be String.
*/
val findCommonTypeForBinaryComparison: (DataType, DataType) => Option[DataType] = {
// We should cast all relative timestamp/date/string comparison into string comparisons
// This behaves as a user would expect because timestamp strings sort lexicographically.
// i.e. TimeStamp(2013-01-01 00:00 ...) < "2014" = true
case (StringType, DateType) => Some(StringType)
case (DateType, StringType) => Some(StringType)
case (StringType, TimestampType) => Some(StringType)
case (TimestampType, StringType) => Some(StringType)
case (TimestampType, DateType) => Some(StringType)
case (DateType, TimestampType) => Some(StringType)
case (StringType, NullType) => Some(StringType)
case (NullType, StringType) => Some(StringType)
case (l: StringType, r: AtomicType) if r != StringType => Some(r)
case (l: AtomicType, r: StringType) if (l != StringType) => Some(l)
case (l, r) => None
}
/**
* Case 2 type widening (see the classdoc comment above for TypeCoercion).
*
* i.e. the main difference with [[findTightestCommonType]] is that here we allow some
* loss of precision when widening decimal and double, and promotion to string.
*/
private[analysis] def findWiderTypeForTwo(t1: DataType, t2: DataType): Option[DataType] = {
findTightestCommonType(t1, t2)
.orElse(findWiderTypeForDecimal(t1, t2))
.orElse(stringPromotion(t1, t2))
.orElse((t1, t2) match {
case (ArrayType(et1, containsNull1), ArrayType(et2, containsNull2)) =>
findWiderTypeForTwo(et1, et2).map(ArrayType(_, containsNull1 || containsNull2))
case _ => None
})
}
private def findWiderCommonType(types: Seq[DataType]): Option[DataType] = {
types.foldLeft[Option[DataType]](Some(NullType))((r, c) => r match {
case Some(d) => findWiderTypeForTwo(d, c)
case None => None
})
}
/**
* Similar to [[findWiderTypeForTwo]] that can handle decimal types, but can't promote to
* string. If the wider decimal type exceeds system limitation, this rule will truncate
* the decimal type before return it.
*/
private[analysis] def findWiderTypeWithoutStringPromotionForTwo(
t1: DataType,
t2: DataType): Option[DataType] = {
findTightestCommonType(t1, t2)
.orElse(findWiderTypeForDecimal(t1, t2))
.orElse((t1, t2) match {
case (ArrayType(et1, containsNull1), ArrayType(et2, containsNull2)) =>
findWiderTypeWithoutStringPromotionForTwo(et1, et2)
.map(ArrayType(_, containsNull1 || containsNull2))
case _ => None
})
}
def findWiderTypeWithoutStringPromotion(types: Seq[DataType]): Option[DataType] = {
types.foldLeft[Option[DataType]](Some(NullType))((r, c) => r match {
case Some(d) => findWiderTypeWithoutStringPromotionForTwo(d, c)
case None => None
})
}
/**
* Finds a wider type when one or both types are decimals. If the wider decimal type exceeds
* system limitation, this rule will truncate the decimal type. If a decimal and other fractional
* types are compared, returns a double type.
*/
private def findWiderTypeForDecimal(dt1: DataType, dt2: DataType): Option[DataType] = {
(dt1, dt2) match {
case (t1: DecimalType, t2: DecimalType) =>
Some(DecimalPrecision.widerDecimalType(t1, t2))
case (t: IntegralType, d: DecimalType) =>
Some(DecimalPrecision.widerDecimalType(DecimalType.forType(t), d))
case (d: DecimalType, t: IntegralType) =>
Some(DecimalPrecision.widerDecimalType(DecimalType.forType(t), d))
case (_: FractionalType, _: DecimalType) | (_: DecimalType, _: FractionalType) =>
Some(DoubleType)
case _ => None
}
}
private def haveSameType(exprs: Seq[Expression]): Boolean =
exprs.map(_.dataType).distinct.length == 1
/**
* Applies any changes to [[AttributeReference]] data types that are made by other rules to
* instances higher in the query tree.
*/
object PropagateTypes extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
// No propagation required for leaf nodes.
case q: LogicalPlan if q.children.isEmpty => q
// Don't propagate types from unresolved children.
case q: LogicalPlan if !q.childrenResolved => q
case q: LogicalPlan =>
val inputMap = q.inputSet.toSeq.map(a => (a.exprId, a)).toMap
q transformExpressions {
case a: AttributeReference =>
inputMap.get(a.exprId) match {
// This can happen when an Attribute reference is born in a non-leaf node, for
// example due to a call to an external script like in the Transform operator.
// TODO: Perhaps those should actually be aliases?
case None => a
// Leave the same if the dataTypes match.
case Some(newType) if a.dataType == newType.dataType => a
case Some(newType) =>
logDebug(s"Promoting $a to $newType in ${q.simpleString}")
newType
}
}
}
}
/**
* Widens numeric types and converts strings to numbers when appropriate.
*
* Loosely based on rules from "Hadoop: The Definitive Guide" 2nd edition, by Tom White
*
* The implicit conversion rules can be summarized as follows:
* - Any integral numeric type can be implicitly converted to a wider type.
* - All the integral numeric types, FLOAT, and (perhaps surprisingly) STRING can be implicitly
* converted to DOUBLE.
* - TINYINT, SMALLINT, and INT can all be converted to FLOAT.
* - BOOLEAN types cannot be converted to any other type.
* - Any integral numeric type can be implicitly converted to decimal type.
* - two different decimal types will be converted into a wider decimal type for both of them.
* - decimal type will be converted into double if there float or double together with it.
*
* Additionally, all types when UNION-ed with strings will be promoted to strings.
* Other string conversions are handled by PromoteStrings.
*
* Widening types might result in loss of precision in the following cases:
* - IntegerType to FloatType
* - LongType to FloatType
* - LongType to DoubleType
* - DecimalType to Double
*
* This rule is only applied to Union/Except/Intersect
*/
object WidenSetOperationTypes extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p if p.analyzed => p
case s @ SetOperation(left, right) if s.childrenResolved &&
left.output.length == right.output.length && !s.resolved =>
val newChildren: Seq[LogicalPlan] = buildNewChildrenWithWiderTypes(left :: right :: Nil)
assert(newChildren.length == 2)
s.makeCopy(Array(newChildren.head, newChildren.last))
case s: Union if s.childrenResolved &&
s.children.forall(_.output.length == s.children.head.output.length) && !s.resolved =>
val newChildren: Seq[LogicalPlan] = buildNewChildrenWithWiderTypes(s.children)
s.makeCopy(Array(newChildren))
}
/** Build new children with the widest types for each attribute among all the children */
private def buildNewChildrenWithWiderTypes(children: Seq[LogicalPlan]): Seq[LogicalPlan] = {
require(children.forall(_.output.length == children.head.output.length))
// Get a sequence of data types, each of which is the widest type of this specific attribute
// in all the children
val targetTypes: Seq[DataType] =
getWidestTypes(children, attrIndex = 0, mutable.Queue[DataType]())
if (targetTypes.nonEmpty) {
// Add an extra Project if the targetTypes are different from the original types.
children.map(widenTypes(_, targetTypes))
} else {
// Unable to find a target type to widen, then just return the original set.
children
}
}
/** Get the widest type for each attribute in all the children */
@tailrec private def getWidestTypes(
children: Seq[LogicalPlan],
attrIndex: Int,
castedTypes: mutable.Queue[DataType]): Seq[DataType] = {
// Return the result after the widen data types have been found for all the children
if (attrIndex >= children.head.output.length) return castedTypes.toSeq
// For the attrIndex-th attribute, find the widest type
findWiderCommonType(children.map(_.output(attrIndex).dataType)) match {
// If unable to find an appropriate widen type for this column, return an empty Seq
case None => Seq.empty[DataType]
// Otherwise, record the result in the queue and find the type for the next column
case Some(widenType) =>
castedTypes.enqueue(widenType)
getWidestTypes(children, attrIndex + 1, castedTypes)
}
}
/** Given a plan, add an extra project on top to widen some columns' data types. */
private def widenTypes(plan: LogicalPlan, targetTypes: Seq[DataType]): LogicalPlan = {
val casted = plan.output.zip(targetTypes).map {
case (e, dt) if e.dataType != dt => Alias(Cast(e, dt), e.name)()
case (e, _) => e
}
Project(casted, plan)
}
}
/**
* Promotes strings that appear in arithmetic expressions.
*/
object PromoteStrings extends Rule[LogicalPlan] {
private def castExpr(expr: Expression, targetType: DataType): Expression = {
(expr.dataType, targetType) match {
case (NullType, dt) => Literal.create(null, targetType)
case (l, dt) if (l != dt) => Cast(expr, targetType)
case _ => expr
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case a @ BinaryArithmetic(left @ StringType(), right) =>
a.makeCopy(Array(Cast(left, DoubleType), right))
case a @ BinaryArithmetic(left, right @ StringType()) =>
a.makeCopy(Array(left, Cast(right, DoubleType)))
// For equality between string and timestamp we cast the string to a timestamp
// so that things like rounding of subsecond precision does not affect the comparison.
case p @ Equality(left @ StringType(), right @ TimestampType()) =>
p.makeCopy(Array(Cast(left, TimestampType), right))
case p @ Equality(left @ TimestampType(), right @ StringType()) =>
p.makeCopy(Array(left, Cast(right, TimestampType)))
case p @ BinaryComparison(left, right)
if findCommonTypeForBinaryComparison(left.dataType, right.dataType).isDefined =>
val commonType = findCommonTypeForBinaryComparison(left.dataType, right.dataType).get
p.makeCopy(Array(castExpr(left, commonType), castExpr(right, commonType)))
case Sum(e @ StringType()) => Sum(Cast(e, DoubleType))
case Average(e @ StringType()) => Average(Cast(e, DoubleType))
case StddevPop(e @ StringType()) => StddevPop(Cast(e, DoubleType))
case StddevSamp(e @ StringType()) => StddevSamp(Cast(e, DoubleType))
case VariancePop(e @ StringType()) => VariancePop(Cast(e, DoubleType))
case VarianceSamp(e @ StringType()) => VarianceSamp(Cast(e, DoubleType))
case Skewness(e @ StringType()) => Skewness(Cast(e, DoubleType))
case Kurtosis(e @ StringType()) => Kurtosis(Cast(e, DoubleType))
}
}
/**
* Handles type coercion for both IN expression with subquery and IN
* expressions without subquery.
* 1. In the first case, find the common type by comparing the left hand side (LHS)
* expression types against corresponding right hand side (RHS) expression derived
* from the subquery expression's plan output. Inject appropriate casts in the
* LHS and RHS side of IN expression.
*
* 2. In the second case, convert the value and in list expressions to the
* common operator type by looking at all the argument types and finding
* the closest one that all the arguments can be cast to. When no common
* operator type is found the original expression will be returned and an
* Analysis Exception will be raised at the type checking phase.
*/
object InConversion extends Rule[LogicalPlan] {
private def flattenExpr(expr: Expression): Seq[Expression] = {
expr match {
// Multi columns in IN clause is represented as a CreateNamedStruct.
// flatten the named struct to get the list of expressions.
case cns: CreateNamedStruct => cns.valExprs
case expr => Seq(expr)
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
// Handle type casting required between value expression and subquery output
// in IN subquery.
case i @ In(a, Seq(ListQuery(sub, children, exprId)))
if !i.resolved && flattenExpr(a).length == sub.output.length =>
// LHS is the value expression of IN subquery.
val lhs = flattenExpr(a)
// RHS is the subquery output.
val rhs = sub.output
val commonTypes = lhs.zip(rhs).flatMap { case (l, r) =>
findCommonTypeForBinaryComparison(l.dataType, r.dataType)
.orElse(findTightestCommonType(l.dataType, r.dataType))
}
// The number of columns/expressions must match between LHS and RHS of an
// IN subquery expression.
if (commonTypes.length == lhs.length) {
val castedRhs = rhs.zip(commonTypes).map {
case (e, dt) if e.dataType != dt => Alias(Cast(e, dt), e.name)()
case (e, _) => e
}
val castedLhs = lhs.zip(commonTypes).map {
case (e, dt) if e.dataType != dt => Cast(e, dt)
case (e, _) => e
}
// Before constructing the In expression, wrap the multi values in LHS
// in a CreatedNamedStruct.
val newLhs = castedLhs match {
case Seq(lhs) => lhs
case _ => CreateStruct(castedLhs)
}
In(newLhs, Seq(ListQuery(Project(castedRhs, sub), children, exprId)))
} else {
i
}
case i @ In(a, b) if b.exists(_.dataType != a.dataType) =>
findWiderCommonType(i.children.map(_.dataType)) match {
case Some(finalDataType) => i.withNewChildren(i.children.map(Cast(_, finalDataType)))
case None => i
}
}
}
/**
* Changes numeric values to booleans so that expressions like true = 1 can be evaluated.
*/
object BooleanEquality extends Rule[LogicalPlan] {
private val trueValues = Seq(1.toByte, 1.toShort, 1, 1L, Decimal.ONE)
private val falseValues = Seq(0.toByte, 0.toShort, 0, 0L, Decimal.ZERO)
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
// Hive treats (true = 1) as true and (false = 0) as true,
// all other cases are considered as false.
// We may simplify the expression if one side is literal numeric values
// TODO: Maybe these rules should go into the optimizer.
case EqualTo(bool @ BooleanType(), Literal(value, _: NumericType))
if trueValues.contains(value) => bool
case EqualTo(bool @ BooleanType(), Literal(value, _: NumericType))
if falseValues.contains(value) => Not(bool)
case EqualTo(Literal(value, _: NumericType), bool @ BooleanType())
if trueValues.contains(value) => bool
case EqualTo(Literal(value, _: NumericType), bool @ BooleanType())
if falseValues.contains(value) => Not(bool)
case EqualNullSafe(bool @ BooleanType(), Literal(value, _: NumericType))
if trueValues.contains(value) => And(IsNotNull(bool), bool)
case EqualNullSafe(bool @ BooleanType(), Literal(value, _: NumericType))
if falseValues.contains(value) => And(IsNotNull(bool), Not(bool))
case EqualNullSafe(Literal(value, _: NumericType), bool @ BooleanType())
if trueValues.contains(value) => And(IsNotNull(bool), bool)
case EqualNullSafe(Literal(value, _: NumericType), bool @ BooleanType())
if falseValues.contains(value) => And(IsNotNull(bool), Not(bool))
case EqualTo(left @ BooleanType(), right @ NumericType()) =>
EqualTo(Cast(left, right.dataType), right)
case EqualTo(left @ NumericType(), right @ BooleanType()) =>
EqualTo(left, Cast(right, left.dataType))
case EqualNullSafe(left @ BooleanType(), right @ NumericType()) =>
EqualNullSafe(Cast(left, right.dataType), right)
case EqualNullSafe(left @ NumericType(), right @ BooleanType()) =>
EqualNullSafe(left, Cast(right, left.dataType))
}
}
/**
* This ensure that the types for various functions are as expected.
*/
object FunctionArgumentConversion extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case a @ CreateArray(children) if !haveSameType(children) =>
val types = children.map(_.dataType)
findWiderCommonType(types) match {
case Some(finalDataType) => CreateArray(children.map(Cast(_, finalDataType)))
case None => a
}
case m @ CreateMap(children) if m.keys.length == m.values.length &&
(!haveSameType(m.keys) || !haveSameType(m.values)) =>
val newKeys = if (haveSameType(m.keys)) {
m.keys
} else {
val types = m.keys.map(_.dataType)
findWiderCommonType(types) match {
case Some(finalDataType) => m.keys.map(Cast(_, finalDataType))
case None => m.keys
}
}
val newValues = if (haveSameType(m.values)) {
m.values
} else {
val types = m.values.map(_.dataType)
findWiderCommonType(types) match {
case Some(finalDataType) => m.values.map(Cast(_, finalDataType))
case None => m.values
}
}
CreateMap(newKeys.zip(newValues).flatMap { case (k, v) => Seq(k, v) })
// Promote SUM, SUM DISTINCT and AVERAGE to largest types to prevent overflows.
case s @ Sum(e @ DecimalType()) => s // Decimal is already the biggest.
case Sum(e @ IntegralType()) if e.dataType != LongType => Sum(Cast(e, LongType))
case Sum(e @ FractionalType()) if e.dataType != DoubleType => Sum(Cast(e, DoubleType))
case s @ Average(e @ DecimalType()) => s // Decimal is already the biggest.
case Average(e @ IntegralType()) if e.dataType != LongType =>
Average(Cast(e, LongType))
case Average(e @ FractionalType()) if e.dataType != DoubleType =>
Average(Cast(e, DoubleType))
// Hive lets you do aggregation of timestamps... for some reason
case Sum(e @ TimestampType()) => Sum(Cast(e, DoubleType))
case Average(e @ TimestampType()) => Average(Cast(e, DoubleType))
// Coalesce should return the first non-null value, which could be any column
// from the list. So we need to make sure the return type is deterministic and
// compatible with every child column.
case c @ Coalesce(es) if !haveSameType(es) =>
val types = es.map(_.dataType)
findWiderCommonType(types) match {
case Some(finalDataType) => Coalesce(es.map(Cast(_, finalDataType)))
case None => c
}
// When finding wider type for `Greatest` and `Least`, we should handle decimal types even if
// we need to truncate, but we should not promote one side to string if the other side is
// string.g
case g @ Greatest(children) if !haveSameType(children) =>
val types = children.map(_.dataType)
findWiderTypeWithoutStringPromotion(types) match {
case Some(finalDataType) => Greatest(children.map(Cast(_, finalDataType)))
case None => g
}
case l @ Least(children) if !haveSameType(children) =>
val types = children.map(_.dataType)
findWiderTypeWithoutStringPromotion(types) match {
case Some(finalDataType) => Least(children.map(Cast(_, finalDataType)))
case None => l
}
case NaNvl(l, r) if l.dataType == DoubleType && r.dataType == FloatType =>
NaNvl(l, Cast(r, DoubleType))
case NaNvl(l, r) if l.dataType == FloatType && r.dataType == DoubleType =>
NaNvl(Cast(l, DoubleType), r)
case NaNvl(l, r) if r.dataType == NullType => NaNvl(l, Cast(r, l.dataType))
}
}
/**
* Hive only performs integral division with the DIV operator. The arguments to / are always
* converted to fractional types.
*/
object Division extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who has not been resolved yet,
// as this is an extra rule which should be applied at last.
case e if !e.childrenResolved => e
// Decimal and Double remain the same
case d: Divide if d.dataType == DoubleType => d
case d: Divide if d.dataType.isInstanceOf[DecimalType] => d
case Divide(left, right) if isNumericOrNull(left) && isNumericOrNull(right) =>
Divide(Cast(left, DoubleType), Cast(right, DoubleType))
}
private def isNumericOrNull(ex: Expression): Boolean = {
// We need to handle null types in case a query contains null literals.
ex.dataType.isInstanceOf[NumericType] || ex.dataType == NullType
}
}
/**
* Coerces the type of different branches of a CASE WHEN statement to a common type.
*/
object CaseWhenCoercion extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case c: CaseWhen if c.childrenResolved && !c.valueTypesEqual =>
val maybeCommonType = findWiderCommonType(c.valueTypes)
maybeCommonType.map { commonType =>
var changed = false
val newBranches = c.branches.map { case (condition, value) =>
if (value.dataType.sameType(commonType)) {
(condition, value)
} else {
changed = true
(condition, Cast(value, commonType))
}
}
val newElseValue = c.elseValue.map { value =>
if (value.dataType.sameType(commonType)) {
value
} else {
changed = true
Cast(value, commonType)
}
}
if (changed) CaseWhen(newBranches, newElseValue) else c
}.getOrElse(c)
}
}
/**
* Coerces the type of different branches of If statement to a common type.
*/
object IfCoercion extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case e if !e.childrenResolved => e
// Find tightest common type for If, if the true value and false value have different types.
case i @ If(pred, left, right) if left.dataType != right.dataType =>
findWiderTypeForTwo(left.dataType, right.dataType).map { widestType =>
val newLeft = if (left.dataType == widestType) left else Cast(left, widestType)
val newRight = if (right.dataType == widestType) right else Cast(right, widestType)
If(pred, newLeft, newRight)
}.getOrElse(i) // If there is no applicable conversion, leave expression unchanged.
case If(Literal(null, NullType), left, right) =>
If(Literal.create(null, BooleanType), left, right)
case If(pred, left, right) if pred.dataType == NullType =>
If(Cast(pred, BooleanType), left, right)
}
}
/**
* Turns Add/Subtract of DateType/TimestampType/StringType and CalendarIntervalType
* to TimeAdd/TimeSub
*/
object DateTimeOperations extends Rule[LogicalPlan] {
private val acceptedTypes = Seq(DateType, TimestampType, StringType)
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case Add(l @ CalendarIntervalType(), r) if acceptedTypes.contains(r.dataType) =>
Cast(TimeAdd(r, l), r.dataType)
case Add(l, r @ CalendarIntervalType()) if acceptedTypes.contains(l.dataType) =>
Cast(TimeAdd(l, r), l.dataType)
case Subtract(l, r @ CalendarIntervalType()) if acceptedTypes.contains(l.dataType) =>
Cast(TimeSub(l, r), l.dataType)
}
}
/**
* Casts types according to the expected input types for [[Expression]]s.
*/
object ImplicitTypeCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case b @ BinaryOperator(left, right) if left.dataType != right.dataType =>
findTightestCommonType(left.dataType, right.dataType).map { commonType =>
if (b.inputType.acceptsType(commonType)) {
// If the expression accepts the tightest common type, cast to that.
val newLeft = if (left.dataType == commonType) left else Cast(left, commonType)
val newRight = if (right.dataType == commonType) right else Cast(right, commonType)
b.withNewChildren(Seq(newLeft, newRight))
} else {
// Otherwise, don't do anything with the expression.
b
}
}.getOrElse(b) // If there is no applicable conversion, leave expression unchanged.
case e: ImplicitCastInputTypes if e.inputTypes.nonEmpty =>
val children: Seq[Expression] = e.children.zip(e.inputTypes).map { case (in, expected) =>
// If we cannot do the implicit cast, just use the original input.
implicitCast(in, expected).getOrElse(in)
}
e.withNewChildren(children)
case e: ExpectsInputTypes if e.inputTypes.nonEmpty =>
// Convert NullType into some specific target type for ExpectsInputTypes that don't do
// general implicit casting.
val children: Seq[Expression] = e.children.zip(e.inputTypes).map { case (in, expected) =>
if (in.dataType == NullType && !expected.acceptsType(NullType)) {
Literal.create(null, expected.defaultConcreteType)
} else {
in
}
}
e.withNewChildren(children)
}
/**
* Given an expected data type, try to cast the expression and return the cast expression.
*
* If the expression already fits the input type, we simply return the expression itself.
* If the expression has an incompatible type that cannot be implicitly cast, return None.
*/
def implicitCast(e: Expression, expectedType: AbstractDataType): Option[Expression] = {
implicitCast(e.dataType, expectedType).map { dt =>
if (dt == e.dataType) e else Cast(e, dt)
}
}
private def implicitCast(inType: DataType, expectedType: AbstractDataType): Option[DataType] = {
// Note that ret is nullable to avoid typing a lot of Some(...) in this local scope.
// We wrap immediately an Option after this.
@Nullable val ret: DataType = (inType, expectedType) match {
// If the expected type is already a parent of the input type, no need to cast.
case _ if expectedType.acceptsType(inType) => inType
// Cast null type (usually from null literals) into target types
case (NullType, target) => target.defaultConcreteType
// If the function accepts any numeric type and the input is a string, we follow the hive
// convention and cast that input into a double
case (StringType, NumericType) => NumericType.defaultConcreteType
// Implicit cast among numeric types. When we reach here, input type is not acceptable.
// If input is a numeric type but not decimal, and we expect a decimal type,
// cast the input to decimal.
case (d: NumericType, DecimalType) => DecimalType.forType(d)
// For any other numeric types, implicitly cast to each other, e.g. long -> int, int -> long
case (_: NumericType, target: NumericType) => target
// Implicit cast between date time types
case (DateType, TimestampType) => TimestampType
case (TimestampType, DateType) => DateType
// Implicit cast from/to string
case (StringType, DecimalType) => DecimalType.SYSTEM_DEFAULT
case (StringType, target: NumericType) => target
case (StringType, DateType) => DateType
case (StringType, TimestampType) => TimestampType
case (StringType, BinaryType) => BinaryType
// Cast any atomic type to string.
case (any: AtomicType, StringType) if any != StringType => StringType
// When we reach here, input type is not acceptable for any types in this type collection,
// try to find the first one we can implicitly cast.
case (_, TypeCollection(types)) =>
types.flatMap(implicitCast(inType, _)).headOption.orNull
// Implicit cast between array types.
//
// Compare the nullabilities of the from type and the to type, check whether the cast of
// the nullability is resolvable by the following rules:
// 1. If the nullability of the to type is true, the cast is always allowed;
// 2. If the nullability of the to type is false, and the nullability of the from type is
// true, the cast is never allowed;
// 3. If the nullabilities of both the from type and the to type are false, the cast is
// allowed only when Cast.forceNullable(fromType, toType) is false.
case (ArrayType(fromType, fn), ArrayType(toType: DataType, true)) =>
implicitCast(fromType, toType).map(ArrayType(_, true)).orNull
case (ArrayType(fromType, true), ArrayType(toType: DataType, false)) => null
case (ArrayType(fromType, false), ArrayType(toType: DataType, false))
if !Cast.forceNullable(fromType, toType) =>
implicitCast(fromType, toType).map(ArrayType(_, false)).orNull
case _ => null
}
Option(ret)
}
}
}
|
bOOm-X/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
|
Scala
|
apache-2.0
| 35,045 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.{Connection, Date, Timestamp}
import java.time.{Instant, LocalDate}
import scala.collection.mutable.ArrayBuilder
import org.apache.commons.lang3.StringUtils
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.util.{DateFormatter, DateTimeUtils, TimestampFormatter}
import org.apache.spark.sql.connector.catalog.TableChange
import org.apache.spark.sql.connector.catalog.TableChange._
import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* :: DeveloperApi ::
* A database type definition coupled with the jdbc type needed to send null
* values to the database.
* @param databaseTypeDefinition The database type definition
* @param jdbcNullType The jdbc type (as defined in java.sql.Types) used to
* send a null value to the database.
*/
@DeveloperApi
case class JdbcType(databaseTypeDefinition : String, jdbcNullType : Int)
/**
* :: DeveloperApi ::
* Encapsulates everything (extensions, workarounds, quirks) to handle the
* SQL dialect of a certain database or jdbc driver.
* Lots of databases define types that aren't explicitly supported
* by the JDBC spec. Some JDBC drivers also report inaccurate
* information---for instance, BIT(n{@literal >}1) being reported as a BIT type is quite
* common, even though BIT in JDBC is meant for single-bit values. Also, there
* does not appear to be a standard name for an unbounded string or binary
* type; we use BLOB and CLOB by default but override with database-specific
* alternatives when these are absent or do not behave correctly.
*
* Currently, the only thing done by the dialect is type mapping.
* `getCatalystType` is used when reading from a JDBC table and `getJDBCType`
* is used when writing to a JDBC table. If `getCatalystType` returns `null`,
* the default type handling is used for the given JDBC type. Similarly,
* if `getJDBCType` returns `(null, None)`, the default type handling is used
* for the given Catalyst type.
*/
@DeveloperApi
abstract class JdbcDialect extends Serializable with Logging{
/**
* Check if this dialect instance can handle a certain jdbc url.
* @param url the jdbc url.
* @return True if the dialect can be applied on the given jdbc url.
* @throws NullPointerException if the url is null.
*/
def canHandle(url : String): Boolean
/**
* Get the custom datatype mapping for the given jdbc meta information.
* @param sqlType The sql type (see java.sql.Types)
* @param typeName The sql type name (e.g. "BIGINT UNSIGNED")
* @param size The size of the type.
* @param md Result metadata associated with this type.
* @return The actual DataType (subclasses of [[org.apache.spark.sql.types.DataType]])
* or null if the default type mapping should be used.
*/
def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = None
/**
* Retrieve the jdbc / sql type for a given datatype.
* @param dt The datatype (e.g. [[org.apache.spark.sql.types.StringType]])
* @return The new JdbcType if there is an override for this DataType
*/
def getJDBCType(dt: DataType): Option[JdbcType] = None
/**
* Quotes the identifier. This is used to put quotes around the identifier in case the column
* name is a reserved keyword, or in case it contains characters that require quotes (e.g. space).
*/
def quoteIdentifier(colName: String): String = {
s""""$colName""""
}
/**
* Get the SQL query that should be used to find if the given table exists. Dialects can
* override this method to return a query that works best in a particular database.
* @param table The name of the table.
* @return The SQL query to use for checking the table.
*/
def getTableExistsQuery(table: String): String = {
s"SELECT * FROM $table WHERE 1=0"
}
/**
* The SQL query that should be used to discover the schema of a table. It only needs to
* ensure that the result set has the same schema as the table, such as by calling
* "SELECT * ...". Dialects can override this method to return a query that works best in a
* particular database.
* @param table The name of the table.
* @return The SQL query to use for discovering the schema.
*/
@Since("2.1.0")
def getSchemaQuery(table: String): String = {
s"SELECT * FROM $table WHERE 1=0"
}
/**
* The SQL query that should be used to truncate a table. Dialects can override this method to
* return a query that is suitable for a particular database. For PostgreSQL, for instance,
* a different query is used to prevent "TRUNCATE" affecting other tables.
* @param table The table to truncate
* @return The SQL query to use for truncating a table
*/
@Since("2.3.0")
def getTruncateQuery(table: String): String = {
getTruncateQuery(table, isCascadingTruncateTable)
}
/**
* The SQL query that should be used to truncate a table. Dialects can override this method to
* return a query that is suitable for a particular database. For PostgreSQL, for instance,
* a different query is used to prevent "TRUNCATE" affecting other tables.
* @param table The table to truncate
* @param cascade Whether or not to cascade the truncation
* @return The SQL query to use for truncating a table
*/
@Since("2.4.0")
def getTruncateQuery(
table: String,
cascade: Option[Boolean] = isCascadingTruncateTable): String = {
s"TRUNCATE TABLE $table"
}
/**
* Override connection specific properties to run before a select is made. This is in place to
* allow dialects that need special treatment to optimize behavior.
* @param connection The connection object
* @param properties The connection properties. This is passed through from the relation.
*/
def beforeFetch(connection: Connection, properties: Map[String, String]): Unit = {
}
/**
* Escape special characters in SQL string literals.
* @param value The string to be escaped.
* @return Escaped string.
*/
@Since("2.3.0")
protected[jdbc] def escapeSql(value: String): String =
if (value == null) null else StringUtils.replace(value, "'", "''")
/**
* Converts value to SQL expression.
* @param value The value to be converted.
* @return Converted value.
*/
@Since("2.3.0")
def compileValue(value: Any): Any = value match {
case stringValue: String => s"'${escapeSql(stringValue)}'"
case timestampValue: Timestamp => "'" + timestampValue + "'"
case timestampValue: Instant =>
val timestampFormatter = TimestampFormatter.getFractionFormatter(
DateTimeUtils.getZoneId(SQLConf.get.sessionLocalTimeZone))
s"'${timestampFormatter.format(timestampValue)}'"
case dateValue: Date => "'" + dateValue + "'"
case dateValue: LocalDate =>
val dateFormatter = DateFormatter(DateTimeUtils.getZoneId(SQLConf.get.sessionLocalTimeZone))
s"'${dateFormatter.format(dateValue)}'"
case arrayValue: Array[Any] => arrayValue.map(compileValue).mkString(", ")
case _ => value
}
/**
* Return Some[true] iff `TRUNCATE TABLE` causes cascading default.
* Some[true] : TRUNCATE TABLE causes cascading.
* Some[false] : TRUNCATE TABLE does not cause cascading.
* None: The behavior of TRUNCATE TABLE is unknown (default).
*/
def isCascadingTruncateTable(): Option[Boolean] = None
/**
* Rename an existing table.
*
* @param oldTable The existing table.
* @param newTable New name of the table.
* @return The SQL statement to use for renaming the table.
*/
def renameTable(oldTable: String, newTable: String): String = {
s"ALTER TABLE $oldTable RENAME TO $newTable"
}
/**
* Alter an existing table.
*
* @param tableName The name of the table to be altered.
* @param changes Changes to apply to the table.
* @return The SQL statements to use for altering the table.
*/
def alterTable(
tableName: String,
changes: Seq[TableChange],
dbMajorVersion: Int): Array[String] = {
val updateClause = ArrayBuilder.make[String]
for (change <- changes) {
change match {
case add: AddColumn if add.fieldNames.length == 1 =>
val dataType = JdbcUtils.getJdbcType(add.dataType(), this).databaseTypeDefinition
val name = add.fieldNames
updateClause += getAddColumnQuery(tableName, name(0), dataType)
case rename: RenameColumn if rename.fieldNames.length == 1 =>
val name = rename.fieldNames
updateClause += getRenameColumnQuery(tableName, name(0), rename.newName, dbMajorVersion)
case delete: DeleteColumn if delete.fieldNames.length == 1 =>
val name = delete.fieldNames
updateClause += getDeleteColumnQuery(tableName, name(0))
case updateColumnType: UpdateColumnType if updateColumnType.fieldNames.length == 1 =>
val name = updateColumnType.fieldNames
val dataType = JdbcUtils.getJdbcType(updateColumnType.newDataType(), this)
.databaseTypeDefinition
updateClause += getUpdateColumnTypeQuery(tableName, name(0), dataType)
case updateNull: UpdateColumnNullability if updateNull.fieldNames.length == 1 =>
val name = updateNull.fieldNames
updateClause += getUpdateColumnNullabilityQuery(tableName, name(0), updateNull.nullable())
case _ =>
throw new AnalysisException(s"Unsupported TableChange $change in JDBC catalog.")
}
}
updateClause.result()
}
def getAddColumnQuery(tableName: String, columnName: String, dataType: String): String =
s"ALTER TABLE $tableName ADD COLUMN ${quoteIdentifier(columnName)} $dataType"
def getRenameColumnQuery(
tableName: String,
columnName: String,
newName: String,
dbMajorVersion: Int): String =
s"ALTER TABLE $tableName RENAME COLUMN ${quoteIdentifier(columnName)} TO" +
s" ${quoteIdentifier(newName)}"
def getDeleteColumnQuery(tableName: String, columnName: String): String =
s"ALTER TABLE $tableName DROP COLUMN ${quoteIdentifier(columnName)}"
def getUpdateColumnTypeQuery(
tableName: String,
columnName: String,
newDataType: String): String =
s"ALTER TABLE $tableName ALTER COLUMN ${quoteIdentifier(columnName)} $newDataType"
def getUpdateColumnNullabilityQuery(
tableName: String,
columnName: String,
isNullable: Boolean): String = {
val nullable = if (isNullable) "NULL" else "NOT NULL"
s"ALTER TABLE $tableName ALTER COLUMN ${quoteIdentifier(columnName)} SET $nullable"
}
def getTableCommentQuery(table: String, comment: String): String = {
s"COMMENT ON TABLE $table IS '$comment'"
}
def getSchemaCommentQuery(schema: String, comment: String): String = {
s"COMMENT ON SCHEMA ${quoteIdentifier(schema)} IS '$comment'"
}
def removeSchemaCommentQuery(schema: String): String = {
s"COMMENT ON SCHEMA ${quoteIdentifier(schema)} IS NULL"
}
/**
* Gets a dialect exception, classifies it and wraps it by `AnalysisException`.
* @param message The error message to be placed to the returned exception.
* @param e The dialect specific exception.
* @return `AnalysisException` or its sub-class.
*/
def classifyException(message: String, e: Throwable): AnalysisException = {
new AnalysisException(message, cause = Some(e))
}
}
/**
* :: DeveloperApi ::
* Registry of dialects that apply to every new jdbc `org.apache.spark.sql.DataFrame`.
*
* If multiple matching dialects are registered then all matching ones will be
* tried in reverse order. A user-added dialect will thus be applied first,
* overwriting the defaults.
*
* @note All new dialects are applied to new jdbc DataFrames only. Make
* sure to register your dialects first.
*/
@DeveloperApi
object JdbcDialects {
/**
* Register a dialect for use on all new matching jdbc `org.apache.spark.sql.DataFrame`.
* Reading an existing dialect will cause a move-to-front.
*
* @param dialect The new dialect.
*/
def registerDialect(dialect: JdbcDialect) : Unit = {
dialects = dialect :: dialects.filterNot(_ == dialect)
}
/**
* Unregister a dialect. Does nothing if the dialect is not registered.
*
* @param dialect The jdbc dialect.
*/
def unregisterDialect(dialect : JdbcDialect) : Unit = {
dialects = dialects.filterNot(_ == dialect)
}
private[this] var dialects = List[JdbcDialect]()
registerDialect(MySQLDialect)
registerDialect(PostgresDialect)
registerDialect(DB2Dialect)
registerDialect(MsSqlServerDialect)
registerDialect(DerbyDialect)
registerDialect(OracleDialect)
registerDialect(TeradataDialect)
registerDialect(H2Dialect)
/**
* Fetch the JdbcDialect class corresponding to a given database url.
*/
def get(url: String): JdbcDialect = {
val matchingDialects = dialects.filter(_.canHandle(url))
matchingDialects.length match {
case 0 => NoopDialect
case 1 => matchingDialects.head
case _ => new AggregatedDialect(matchingDialects)
}
}
}
/**
* NOOP dialect object, always returning the neutral element.
*/
private object NoopDialect extends JdbcDialect {
override def canHandle(url : String): Boolean = true
}
|
witgo/spark
|
sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
|
Scala
|
apache-2.0
| 14,283 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.recommendation
import java.{util => ju}
import java.io.IOException
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.util.{Sorting, Try}
import scala.util.hashing.byteswap64
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.apache.spark.{Dependency, Partitioner, ShuffleDependency, SparkContext}
import org.apache.spark.annotation.{DeveloperApi, Experimental, Since}
import org.apache.spark.internal.Logging
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.mllib.linalg.CholeskyDecomposition
import org.apache.spark.mllib.optimization.NNLS
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DoubleType, FloatType, IntegerType, StructType}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.{OpenHashMap, OpenHashSet, SortDataFormat, Sorter}
import org.apache.spark.util.random.XORShiftRandom
/**
* Common params for ALS and ALSModel.
*/
private[recommendation] trait ALSModelParams extends Params with HasPredictionCol {
/**
* Param for the column name for user ids.
* Default: "user"
* @group param
*/
val userCol = new Param[String](this, "userCol", "column name for user ids")
/** @group getParam */
def getUserCol: String = $(userCol)
/**
* Param for the column name for item ids.
* Default: "item"
* @group param
*/
val itemCol = new Param[String](this, "itemCol", "column name for item ids")
/** @group getParam */
def getItemCol: String = $(itemCol)
}
/**
* Common params for ALS.
*/
private[recommendation] trait ALSParams extends ALSModelParams with HasMaxIter with HasRegParam
with HasPredictionCol with HasCheckpointInterval with HasSeed {
/**
* Param for rank of the matrix factorization (>= 1).
* Default: 10
* @group param
*/
val rank = new IntParam(this, "rank", "rank of the factorization", ParamValidators.gtEq(1))
/** @group getParam */
def getRank: Int = $(rank)
/**
* Param for number of user blocks (>= 1).
* Default: 10
* @group param
*/
val numUserBlocks = new IntParam(this, "numUserBlocks", "number of user blocks",
ParamValidators.gtEq(1))
/** @group getParam */
def getNumUserBlocks: Int = $(numUserBlocks)
/**
* Param for number of item blocks (>= 1).
* Default: 10
* @group param
*/
val numItemBlocks = new IntParam(this, "numItemBlocks", "number of item blocks",
ParamValidators.gtEq(1))
/** @group getParam */
def getNumItemBlocks: Int = $(numItemBlocks)
/**
* Param to decide whether to use implicit preference.
* Default: false
* @group param
*/
val implicitPrefs = new BooleanParam(this, "implicitPrefs", "whether to use implicit preference")
/** @group getParam */
def getImplicitPrefs: Boolean = $(implicitPrefs)
/**
* Param for the alpha parameter in the implicit preference formulation (>= 0).
* Default: 1.0
* @group param
*/
val alpha = new DoubleParam(this, "alpha", "alpha for implicit preference",
ParamValidators.gtEq(0))
/** @group getParam */
def getAlpha: Double = $(alpha)
/**
* Param for the column name for ratings.
* Default: "rating"
* @group param
*/
val ratingCol = new Param[String](this, "ratingCol", "column name for ratings")
/** @group getParam */
def getRatingCol: String = $(ratingCol)
/**
* Param for whether to apply nonnegativity constraints.
* Default: false
* @group param
*/
val nonnegative = new BooleanParam(
this, "nonnegative", "whether to use nonnegative constraint for least squares")
/** @group getParam */
def getNonnegative: Boolean = $(nonnegative)
/**
* Param for StorageLevel for intermediate datasets. Pass in a string representation of
* [[StorageLevel]]. Cannot be "NONE".
* Default: "MEMORY_AND_DISK".
*
* @group expertParam
*/
val intermediateStorageLevel = new Param[String](this, "intermediateStorageLevel",
"StorageLevel for intermediate datasets. Cannot be 'NONE'. Default: 'MEMORY_AND_DISK'.",
(s: String) => Try(StorageLevel.fromString(s)).isSuccess && s != "NONE")
/** @group expertGetParam */
def getIntermediateStorageLevel: String = $(intermediateStorageLevel)
/**
* Param for StorageLevel for ALS model factors. Pass in a string representation of
* [[StorageLevel]].
* Default: "MEMORY_AND_DISK".
*
* @group expertParam
*/
val finalStorageLevel = new Param[String](this, "finalStorageLevel",
"StorageLevel for ALS model factors. Default: 'MEMORY_AND_DISK'.",
(s: String) => Try(StorageLevel.fromString(s)).isSuccess)
/** @group expertGetParam */
def getFinalStorageLevel: String = $(finalStorageLevel)
setDefault(rank -> 10, maxIter -> 10, regParam -> 0.1, numUserBlocks -> 10, numItemBlocks -> 10,
implicitPrefs -> false, alpha -> 1.0, userCol -> "user", itemCol -> "item",
ratingCol -> "rating", nonnegative -> false, checkpointInterval -> 10,
intermediateStorageLevel -> "MEMORY_AND_DISK", finalStorageLevel -> "MEMORY_AND_DISK")
/**
* Validates and transforms the input schema.
*
* @param schema input schema
* @return output schema
*/
protected def validateAndTransformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(userCol), IntegerType)
SchemaUtils.checkColumnType(schema, $(itemCol), IntegerType)
val ratingType = schema($(ratingCol)).dataType
require(ratingType == FloatType || ratingType == DoubleType)
SchemaUtils.appendColumn(schema, $(predictionCol), FloatType)
}
}
/**
* :: Experimental ::
* Model fitted by ALS.
*
* @param rank rank of the matrix factorization model
* @param userFactors a DataFrame that stores user factors in two columns: `id` and `features`
* @param itemFactors a DataFrame that stores item factors in two columns: `id` and `features`
*/
@Experimental
@Since("1.3.0")
class ALSModel private[ml] (
@Since("1.4.0") override val uid: String,
@Since("1.4.0") val rank: Int,
@transient val userFactors: DataFrame,
@transient val itemFactors: DataFrame)
extends Model[ALSModel] with ALSModelParams with MLWritable {
/** @group setParam */
@Since("1.4.0")
def setUserCol(value: String): this.type = set(userCol, value)
/** @group setParam */
@Since("1.4.0")
def setItemCol(value: String): this.type = set(itemCol, value)
/** @group setParam */
@Since("1.3.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
// Register a UDF for DataFrame, and then
// create a new column named map(predictionCol) by running the predict UDF.
val predict = udf { (userFeatures: Seq[Float], itemFeatures: Seq[Float]) =>
if (userFeatures != null && itemFeatures != null) {
blas.sdot(rank, userFeatures.toArray, 1, itemFeatures.toArray, 1)
} else {
Float.NaN
}
}
dataset
.join(userFactors, dataset($(userCol)) === userFactors("id"), "left")
.join(itemFactors, dataset($(itemCol)) === itemFactors("id"), "left")
.select(dataset("*"),
predict(userFactors("features"), itemFactors("features")).as($(predictionCol)))
}
@Since("1.3.0")
override def transformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(userCol), IntegerType)
SchemaUtils.checkColumnType(schema, $(itemCol), IntegerType)
SchemaUtils.appendColumn(schema, $(predictionCol), FloatType)
}
@Since("1.5.0")
override def copy(extra: ParamMap): ALSModel = {
val copied = new ALSModel(uid, rank, userFactors, itemFactors)
copyValues(copied, extra).setParent(parent)
}
@Since("1.6.0")
override def write: MLWriter = new ALSModel.ALSModelWriter(this)
}
@Since("1.6.0")
object ALSModel extends MLReadable[ALSModel] {
@Since("1.6.0")
override def read: MLReader[ALSModel] = new ALSModelReader
@Since("1.6.0")
override def load(path: String): ALSModel = super.load(path)
private[ALSModel] class ALSModelWriter(instance: ALSModel) extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val extraMetadata = "rank" -> instance.rank
DefaultParamsWriter.saveMetadata(instance, path, sc, Some(extraMetadata))
val userPath = new Path(path, "userFactors").toString
instance.userFactors.write.format("parquet").save(userPath)
val itemPath = new Path(path, "itemFactors").toString
instance.itemFactors.write.format("parquet").save(itemPath)
}
}
private class ALSModelReader extends MLReader[ALSModel] {
/** Checked against metadata when loading model */
private val className = classOf[ALSModel].getName
override def load(path: String): ALSModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
implicit val format = DefaultFormats
val rank = (metadata.metadata \ "rank").extract[Int]
val userPath = new Path(path, "userFactors").toString
val userFactors = sqlContext.read.format("parquet").load(userPath)
val itemPath = new Path(path, "itemFactors").toString
val itemFactors = sqlContext.read.format("parquet").load(itemPath)
val model = new ALSModel(metadata.uid, rank, userFactors, itemFactors)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
}
/**
* :: Experimental ::
* Alternating Least Squares (ALS) matrix factorization.
*
* ALS attempts to estimate the ratings matrix `R` as the product of two lower-rank matrices,
* `X` and `Y`, i.e. `X * Yt = R`. Typically these approximations are called 'factor' matrices.
* The general approach is iterative. During each iteration, one of the factor matrices is held
* constant, while the other is solved for using least squares. The newly-solved factor matrix is
* then held constant while solving for the other factor matrix.
*
* This is a blocked implementation of the ALS factorization algorithm that groups the two sets
* of factors (referred to as "users" and "products") into blocks and reduces communication by only
* sending one copy of each user vector to each product block on each iteration, and only for the
* product blocks that need that user's feature vector. This is achieved by pre-computing some
* information about the ratings matrix to determine the "out-links" of each user (which blocks of
* products it will contribute to) and "in-link" information for each product (which of the feature
* vectors it receives from each user block it will depend on). This allows us to send only an
* array of feature vectors between each user block and product block, and have the product block
* find the users' ratings and update the products based on these messages.
*
* For implicit preference data, the algorithm used is based on
* "Collaborative Filtering for Implicit Feedback Datasets", available at
* [[http://dx.doi.org/10.1109/ICDM.2008.22]], adapted for the blocked approach used here.
*
* Essentially instead of finding the low-rank approximations to the rating matrix `R`,
* this finds the approximations for a preference matrix `P` where the elements of `P` are 1 if
* r > 0 and 0 if r <= 0. The ratings then act as 'confidence' values related to strength of
* indicated user
* preferences rather than explicit ratings given to items.
*/
@Experimental
@Since("1.3.0")
class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] with ALSParams
with DefaultParamsWritable {
import org.apache.spark.ml.recommendation.ALS.Rating
@Since("1.4.0")
def this() = this(Identifiable.randomUID("als"))
/** @group setParam */
@Since("1.3.0")
def setRank(value: Int): this.type = set(rank, value)
/** @group setParam */
@Since("1.3.0")
def setNumUserBlocks(value: Int): this.type = set(numUserBlocks, value)
/** @group setParam */
@Since("1.3.0")
def setNumItemBlocks(value: Int): this.type = set(numItemBlocks, value)
/** @group setParam */
@Since("1.3.0")
def setImplicitPrefs(value: Boolean): this.type = set(implicitPrefs, value)
/** @group setParam */
@Since("1.3.0")
def setAlpha(value: Double): this.type = set(alpha, value)
/** @group setParam */
@Since("1.3.0")
def setUserCol(value: String): this.type = set(userCol, value)
/** @group setParam */
@Since("1.3.0")
def setItemCol(value: String): this.type = set(itemCol, value)
/** @group setParam */
@Since("1.3.0")
def setRatingCol(value: String): this.type = set(ratingCol, value)
/** @group setParam */
@Since("1.3.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("1.3.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("1.3.0")
def setRegParam(value: Double): this.type = set(regParam, value)
/** @group setParam */
@Since("1.3.0")
def setNonnegative(value: Boolean): this.type = set(nonnegative, value)
/** @group setParam */
@Since("1.4.0")
def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value)
/** @group setParam */
@Since("1.3.0")
def setSeed(value: Long): this.type = set(seed, value)
/** @group expertSetParam */
@Since("2.0.0")
def setIntermediateStorageLevel(value: String): this.type = {
set(intermediateStorageLevel, value)
}
/** @group expertSetParam */
@Since("2.0.0")
def setFinalStorageLevel(value: String): this.type = {
set(finalStorageLevel, value)
}
/**
* Sets both numUserBlocks and numItemBlocks to the specific value.
*
* @group setParam
*/
@Since("1.3.0")
def setNumBlocks(value: Int): this.type = {
setNumUserBlocks(value)
setNumItemBlocks(value)
this
}
@Since("2.0.0")
override def fit(dataset: Dataset[_]): ALSModel = {
import dataset.sparkSession.implicits._
val r = if ($(ratingCol) != "") col($(ratingCol)).cast(FloatType) else lit(1.0f)
val ratings = dataset
.select(col($(userCol)).cast(IntegerType), col($(itemCol)).cast(IntegerType), r)
.rdd
.map { row =>
Rating(row.getInt(0), row.getInt(1), row.getFloat(2))
}
val instrLog = Instrumentation.create(this, ratings)
instrLog.logParams(rank, numUserBlocks, numItemBlocks, implicitPrefs, alpha,
userCol, itemCol, ratingCol, predictionCol, maxIter,
regParam, nonnegative, checkpointInterval, seed)
val (userFactors, itemFactors) = ALS.train(ratings, rank = $(rank),
numUserBlocks = $(numUserBlocks), numItemBlocks = $(numItemBlocks),
maxIter = $(maxIter), regParam = $(regParam), implicitPrefs = $(implicitPrefs),
alpha = $(alpha), nonnegative = $(nonnegative),
intermediateRDDStorageLevel = StorageLevel.fromString($(intermediateStorageLevel)),
finalRDDStorageLevel = StorageLevel.fromString($(finalStorageLevel)),
checkpointInterval = $(checkpointInterval), seed = $(seed))
val userDF = userFactors.toDF("id", "features")
val itemDF = itemFactors.toDF("id", "features")
val model = new ALSModel(uid, $(rank), userDF, itemDF).setParent(this)
instrLog.logSuccess(model)
copyValues(model)
}
@Since("1.3.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
@Since("1.5.0")
override def copy(extra: ParamMap): ALS = defaultCopy(extra)
}
/**
* :: DeveloperApi ::
* An implementation of ALS that supports generic ID types, specialized for Int and Long. This is
* exposed as a developer API for users who do need other ID types. But it is not recommended
* because it increases the shuffle size and memory requirement during training. For simplicity,
* users and items must have the same type. The number of distinct users/items should be smaller
* than 2 billion.
*/
@DeveloperApi
object ALS extends DefaultParamsReadable[ALS] with Logging {
/**
* :: DeveloperApi ::
* Rating class for better code readability.
*/
@DeveloperApi
case class Rating[@specialized(Int, Long) ID](user: ID, item: ID, rating: Float)
@Since("1.6.0")
override def load(path: String): ALS = super.load(path)
/** Trait for least squares solvers applied to the normal equation. */
private[recommendation] trait LeastSquaresNESolver extends Serializable {
/** Solves a least squares problem with regularization (possibly with other constraints). */
def solve(ne: NormalEquation, lambda: Double): Array[Float]
}
/** Cholesky solver for least square problems. */
private[recommendation] class CholeskySolver extends LeastSquaresNESolver {
/**
* Solves a least squares problem with L2 regularization:
*
* min norm(A x - b)^2^ + lambda * norm(x)^2^
*
* @param ne a [[NormalEquation]] instance that contains AtA, Atb, and n (number of instances)
* @param lambda regularization constant
* @return the solution x
*/
override def solve(ne: NormalEquation, lambda: Double): Array[Float] = {
val k = ne.k
// Add scaled lambda to the diagonals of AtA.
var i = 0
var j = 2
while (i < ne.triK) {
ne.ata(i) += lambda
i += j
j += 1
}
CholeskyDecomposition.solve(ne.ata, ne.atb)
val x = new Array[Float](k)
i = 0
while (i < k) {
x(i) = ne.atb(i).toFloat
i += 1
}
ne.reset()
x
}
}
/** NNLS solver. */
private[recommendation] class NNLSSolver extends LeastSquaresNESolver {
private var rank: Int = -1
private var workspace: NNLS.Workspace = _
private var ata: Array[Double] = _
private var initialized: Boolean = false
private def initialize(rank: Int): Unit = {
if (!initialized) {
this.rank = rank
workspace = NNLS.createWorkspace(rank)
ata = new Array[Double](rank * rank)
initialized = true
} else {
require(this.rank == rank)
}
}
/**
* Solves a nonnegative least squares problem with L2 regularization:
*
* min_x_ norm(A x - b)^2^ + lambda * n * norm(x)^2^
* subject to x >= 0
*/
override def solve(ne: NormalEquation, lambda: Double): Array[Float] = {
val rank = ne.k
initialize(rank)
fillAtA(ne.ata, lambda)
val x = NNLS.solve(ata, ne.atb, workspace)
ne.reset()
x.map(x => x.toFloat)
}
/**
* Given a triangular matrix in the order of fillXtX above, compute the full symmetric square
* matrix that it represents, storing it into destMatrix.
*/
private def fillAtA(triAtA: Array[Double], lambda: Double) {
var i = 0
var pos = 0
var a = 0.0
while (i < rank) {
var j = 0
while (j <= i) {
a = triAtA(pos)
ata(i * rank + j) = a
ata(j * rank + i) = a
pos += 1
j += 1
}
ata(i * rank + i) += lambda
i += 1
}
}
}
/**
* Representing a normal equation to solve the following weighted least squares problem:
*
* minimize \sum,,i,, c,,i,, (a,,i,,^T^ x - b,,i,,)^2^ + lambda * x^T^ x.
*
* Its normal equation is given by
*
* \sum,,i,, c,,i,, (a,,i,, a,,i,,^T^ x - b,,i,, a,,i,,) + lambda * x = 0.
*/
private[recommendation] class NormalEquation(val k: Int) extends Serializable {
/** Number of entries in the upper triangular part of a k-by-k matrix. */
val triK = k * (k + 1) / 2
/** A^T^ * A */
val ata = new Array[Double](triK)
/** A^T^ * b */
val atb = new Array[Double](k)
private val da = new Array[Double](k)
private val upper = "U"
private def copyToDouble(a: Array[Float]): Unit = {
var i = 0
while (i < k) {
da(i) = a(i)
i += 1
}
}
/** Adds an observation. */
def add(a: Array[Float], b: Double, c: Double = 1.0): this.type = {
require(c >= 0.0)
require(a.length == k)
copyToDouble(a)
blas.dspr(upper, k, c, da, 1, ata)
if (b != 0.0) {
blas.daxpy(k, c * b, da, 1, atb, 1)
}
this
}
/** Merges another normal equation object. */
def merge(other: NormalEquation): this.type = {
require(other.k == k)
blas.daxpy(ata.length, 1.0, other.ata, 1, ata, 1)
blas.daxpy(atb.length, 1.0, other.atb, 1, atb, 1)
this
}
/** Resets everything to zero, which should be called after each solve. */
def reset(): Unit = {
ju.Arrays.fill(ata, 0.0)
ju.Arrays.fill(atb, 0.0)
}
}
/**
* :: DeveloperApi ::
* Implementation of the ALS algorithm.
*/
@DeveloperApi
def train[ID: ClassTag]( // scalastyle:ignore
ratings: RDD[Rating[ID]],
rank: Int = 10,
numUserBlocks: Int = 10,
numItemBlocks: Int = 10,
maxIter: Int = 10,
regParam: Double = 1.0,
implicitPrefs: Boolean = false,
alpha: Double = 1.0,
nonnegative: Boolean = false,
intermediateRDDStorageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK,
finalRDDStorageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK,
checkpointInterval: Int = 10,
seed: Long = 0L)(
implicit ord: Ordering[ID]): (RDD[(ID, Array[Float])], RDD[(ID, Array[Float])]) = {
require(intermediateRDDStorageLevel != StorageLevel.NONE,
"ALS is not designed to run without persisting intermediate RDDs.")
val sc = ratings.sparkContext
val userPart = new ALSPartitioner(numUserBlocks)
val itemPart = new ALSPartitioner(numItemBlocks)
val userLocalIndexEncoder = new LocalIndexEncoder(userPart.numPartitions)
val itemLocalIndexEncoder = new LocalIndexEncoder(itemPart.numPartitions)
val solver = if (nonnegative) new NNLSSolver else new CholeskySolver
val blockRatings = partitionRatings(ratings, userPart, itemPart)
.persist(intermediateRDDStorageLevel)
val (userInBlocks, userOutBlocks) =
makeBlocks("user", blockRatings, userPart, itemPart, intermediateRDDStorageLevel)
// materialize blockRatings and user blocks
userOutBlocks.count()
val swappedBlockRatings = blockRatings.map {
case ((userBlockId, itemBlockId), RatingBlock(userIds, itemIds, localRatings)) =>
((itemBlockId, userBlockId), RatingBlock(itemIds, userIds, localRatings))
}
val (itemInBlocks, itemOutBlocks) =
makeBlocks("item", swappedBlockRatings, itemPart, userPart, intermediateRDDStorageLevel)
// materialize item blocks
itemOutBlocks.count()
val seedGen = new XORShiftRandom(seed)
var userFactors = initialize(userInBlocks, rank, seedGen.nextLong())
var itemFactors = initialize(itemInBlocks, rank, seedGen.nextLong())
var previousCheckpointFile: Option[String] = None
val shouldCheckpoint: Int => Boolean = (iter) =>
sc.checkpointDir.isDefined && checkpointInterval != -1 && (iter % checkpointInterval == 0)
val deletePreviousCheckpointFile: () => Unit = () =>
previousCheckpointFile.foreach { file =>
try {
val checkpointFile = new Path(file)
checkpointFile.getFileSystem(sc.hadoopConfiguration).delete(checkpointFile, true)
} catch {
case e: IOException =>
logWarning(s"Cannot delete checkpoint file $file:", e)
}
}
if (implicitPrefs) {
for (iter <- 1 to maxIter) {
userFactors.setName(s"userFactors-$iter").persist(intermediateRDDStorageLevel)
val previousItemFactors = itemFactors
itemFactors = computeFactors(userFactors, userOutBlocks, itemInBlocks, rank, regParam,
userLocalIndexEncoder, implicitPrefs, alpha, solver)
previousItemFactors.unpersist()
itemFactors.setName(s"itemFactors-$iter").persist(intermediateRDDStorageLevel)
// TODO: Generalize PeriodicGraphCheckpointer and use it here.
val deps = itemFactors.dependencies
if (shouldCheckpoint(iter)) {
itemFactors.checkpoint() // itemFactors gets materialized in computeFactors
}
val previousUserFactors = userFactors
userFactors = computeFactors(itemFactors, itemOutBlocks, userInBlocks, rank, regParam,
itemLocalIndexEncoder, implicitPrefs, alpha, solver)
if (shouldCheckpoint(iter)) {
ALS.cleanShuffleDependencies(sc, deps)
deletePreviousCheckpointFile()
previousCheckpointFile = itemFactors.getCheckpointFile
}
previousUserFactors.unpersist()
}
} else {
for (iter <- 0 until maxIter) {
itemFactors = computeFactors(userFactors, userOutBlocks, itemInBlocks, rank, regParam,
userLocalIndexEncoder, solver = solver)
if (shouldCheckpoint(iter)) {
val deps = itemFactors.dependencies
itemFactors.checkpoint()
itemFactors.count() // checkpoint item factors and cut lineage
ALS.cleanShuffleDependencies(sc, deps)
deletePreviousCheckpointFile()
previousCheckpointFile = itemFactors.getCheckpointFile
}
userFactors = computeFactors(itemFactors, itemOutBlocks, userInBlocks, rank, regParam,
itemLocalIndexEncoder, solver = solver)
}
}
val userIdAndFactors = userInBlocks
.mapValues(_.srcIds)
.join(userFactors)
.mapPartitions({ items =>
items.flatMap { case (_, (ids, factors)) =>
ids.view.zip(factors)
}
// Preserve the partitioning because IDs are consistent with the partitioners in userInBlocks
// and userFactors.
}, preservesPartitioning = true)
.setName("userFactors")
.persist(finalRDDStorageLevel)
val itemIdAndFactors = itemInBlocks
.mapValues(_.srcIds)
.join(itemFactors)
.mapPartitions({ items =>
items.flatMap { case (_, (ids, factors)) =>
ids.view.zip(factors)
}
}, preservesPartitioning = true)
.setName("itemFactors")
.persist(finalRDDStorageLevel)
if (finalRDDStorageLevel != StorageLevel.NONE) {
userIdAndFactors.count()
itemFactors.unpersist()
itemIdAndFactors.count()
userInBlocks.unpersist()
userOutBlocks.unpersist()
itemInBlocks.unpersist()
itemOutBlocks.unpersist()
blockRatings.unpersist()
}
(userIdAndFactors, itemIdAndFactors)
}
/**
* Factor block that stores factors (Array[Float]) in an Array.
*/
private type FactorBlock = Array[Array[Float]]
/**
* Out-link block that stores, for each dst (item/user) block, which src (user/item) factors to
* send. For example, outLinkBlock(0) contains the local indices (not the original src IDs) of the
* src factors in this block to send to dst block 0.
*/
private type OutBlock = Array[Array[Int]]
/**
* In-link block for computing src (user/item) factors. This includes the original src IDs
* of the elements within this block as well as encoded dst (item/user) indices and corresponding
* ratings. The dst indices are in the form of (blockId, localIndex), which are not the original
* dst IDs. To compute src factors, we expect receiving dst factors that match the dst indices.
* For example, if we have an in-link record
*
* {srcId: 0, dstBlockId: 2, dstLocalIndex: 3, rating: 5.0},
*
* and assume that the dst factors are stored as dstFactors: Map[Int, Array[Array[Float]]], which
* is a blockId to dst factors map, the corresponding dst factor of the record is dstFactor(2)(3).
*
* We use a CSC-like (compressed sparse column) format to store the in-link information. So we can
* compute src factors one after another using only one normal equation instance.
*
* @param srcIds src ids (ordered)
* @param dstPtrs dst pointers. Elements in range [dstPtrs(i), dstPtrs(i+1)) of dst indices and
* ratings are associated with srcIds(i).
* @param dstEncodedIndices encoded dst indices
* @param ratings ratings
* @see [[LocalIndexEncoder]]
*/
private[recommendation] case class InBlock[@specialized(Int, Long) ID: ClassTag](
srcIds: Array[ID],
dstPtrs: Array[Int],
dstEncodedIndices: Array[Int],
ratings: Array[Float]) {
/** Size of the block. */
def size: Int = ratings.length
require(dstEncodedIndices.length == size)
require(dstPtrs.length == srcIds.length + 1)
}
/**
* Initializes factors randomly given the in-link blocks.
*
* @param inBlocks in-link blocks
* @param rank rank
* @return initialized factor blocks
*/
private def initialize[ID](
inBlocks: RDD[(Int, InBlock[ID])],
rank: Int,
seed: Long): RDD[(Int, FactorBlock)] = {
// Choose a unit vector uniformly at random from the unit sphere, but from the
// "first quadrant" where all elements are nonnegative. This can be done by choosing
// elements distributed as Normal(0,1) and taking the absolute value, and then normalizing.
// This appears to create factorizations that have a slightly better reconstruction
// (<1%) compared picking elements uniformly at random in [0,1].
inBlocks.map { case (srcBlockId, inBlock) =>
val random = new XORShiftRandom(byteswap64(seed ^ srcBlockId))
val factors = Array.fill(inBlock.srcIds.length) {
val factor = Array.fill(rank)(random.nextGaussian().toFloat)
val nrm = blas.snrm2(rank, factor, 1)
blas.sscal(rank, 1.0f / nrm, factor, 1)
factor
}
(srcBlockId, factors)
}
}
/**
* A rating block that contains src IDs, dst IDs, and ratings, stored in primitive arrays.
*/
private[recommendation] case class RatingBlock[@specialized(Int, Long) ID: ClassTag](
srcIds: Array[ID],
dstIds: Array[ID],
ratings: Array[Float]) {
/** Size of the block. */
def size: Int = srcIds.length
require(dstIds.length == srcIds.length)
require(ratings.length == srcIds.length)
}
/**
* Builder for [[RatingBlock]]. [[mutable.ArrayBuilder]] is used to avoid boxing/unboxing.
*/
private[recommendation] class RatingBlockBuilder[@specialized(Int, Long) ID: ClassTag]
extends Serializable {
private val srcIds = mutable.ArrayBuilder.make[ID]
private val dstIds = mutable.ArrayBuilder.make[ID]
private val ratings = mutable.ArrayBuilder.make[Float]
var size = 0
/** Adds a rating. */
def add(r: Rating[ID]): this.type = {
size += 1
srcIds += r.user
dstIds += r.item
ratings += r.rating
this
}
/** Merges another [[RatingBlockBuilder]]. */
def merge(other: RatingBlock[ID]): this.type = {
size += other.srcIds.length
srcIds ++= other.srcIds
dstIds ++= other.dstIds
ratings ++= other.ratings
this
}
/** Builds a [[RatingBlock]]. */
def build(): RatingBlock[ID] = {
RatingBlock[ID](srcIds.result(), dstIds.result(), ratings.result())
}
}
/**
* Partitions raw ratings into blocks.
*
* @param ratings raw ratings
* @param srcPart partitioner for src IDs
* @param dstPart partitioner for dst IDs
* @return an RDD of rating blocks in the form of ((srcBlockId, dstBlockId), ratingBlock)
*/
private def partitionRatings[ID: ClassTag](
ratings: RDD[Rating[ID]],
srcPart: Partitioner,
dstPart: Partitioner): RDD[((Int, Int), RatingBlock[ID])] = {
/* The implementation produces the same result as the following but generates less objects.
ratings.map { r =>
((srcPart.getPartition(r.user), dstPart.getPartition(r.item)), r)
}.aggregateByKey(new RatingBlockBuilder)(
seqOp = (b, r) => b.add(r),
combOp = (b0, b1) => b0.merge(b1.build()))
.mapValues(_.build())
*/
val numPartitions = srcPart.numPartitions * dstPart.numPartitions
ratings.mapPartitions { iter =>
val builders = Array.fill(numPartitions)(new RatingBlockBuilder[ID])
iter.flatMap { r =>
val srcBlockId = srcPart.getPartition(r.user)
val dstBlockId = dstPart.getPartition(r.item)
val idx = srcBlockId + srcPart.numPartitions * dstBlockId
val builder = builders(idx)
builder.add(r)
if (builder.size >= 2048) { // 2048 * (3 * 4) = 24k
builders(idx) = new RatingBlockBuilder
Iterator.single(((srcBlockId, dstBlockId), builder.build()))
} else {
Iterator.empty
}
} ++ {
builders.view.zipWithIndex.filter(_._1.size > 0).map { case (block, idx) =>
val srcBlockId = idx % srcPart.numPartitions
val dstBlockId = idx / srcPart.numPartitions
((srcBlockId, dstBlockId), block.build())
}
}
}.groupByKey().mapValues { blocks =>
val builder = new RatingBlockBuilder[ID]
blocks.foreach(builder.merge)
builder.build()
}.setName("ratingBlocks")
}
/**
* Builder for uncompressed in-blocks of (srcId, dstEncodedIndex, rating) tuples.
*
* @param encoder encoder for dst indices
*/
private[recommendation] class UncompressedInBlockBuilder[@specialized(Int, Long) ID: ClassTag](
encoder: LocalIndexEncoder)(
implicit ord: Ordering[ID]) {
private val srcIds = mutable.ArrayBuilder.make[ID]
private val dstEncodedIndices = mutable.ArrayBuilder.make[Int]
private val ratings = mutable.ArrayBuilder.make[Float]
/**
* Adds a dst block of (srcId, dstLocalIndex, rating) tuples.
*
* @param dstBlockId dst block ID
* @param srcIds original src IDs
* @param dstLocalIndices dst local indices
* @param ratings ratings
*/
def add(
dstBlockId: Int,
srcIds: Array[ID],
dstLocalIndices: Array[Int],
ratings: Array[Float]): this.type = {
val sz = srcIds.length
require(dstLocalIndices.length == sz)
require(ratings.length == sz)
this.srcIds ++= srcIds
this.ratings ++= ratings
var j = 0
while (j < sz) {
this.dstEncodedIndices += encoder.encode(dstBlockId, dstLocalIndices(j))
j += 1
}
this
}
/** Builds a [[UncompressedInBlock]]. */
def build(): UncompressedInBlock[ID] = {
new UncompressedInBlock(srcIds.result(), dstEncodedIndices.result(), ratings.result())
}
}
/**
* A block of (srcId, dstEncodedIndex, rating) tuples stored in primitive arrays.
*/
private[recommendation] class UncompressedInBlock[@specialized(Int, Long) ID: ClassTag](
val srcIds: Array[ID],
val dstEncodedIndices: Array[Int],
val ratings: Array[Float])(
implicit ord: Ordering[ID]) {
/** Size the of block. */
def length: Int = srcIds.length
/**
* Compresses the block into an [[InBlock]]. The algorithm is the same as converting a
* sparse matrix from coordinate list (COO) format into compressed sparse column (CSC) format.
* Sorting is done using Spark's built-in Timsort to avoid generating too many objects.
*/
def compress(): InBlock[ID] = {
val sz = length
assert(sz > 0, "Empty in-link block should not exist.")
sort()
val uniqueSrcIdsBuilder = mutable.ArrayBuilder.make[ID]
val dstCountsBuilder = mutable.ArrayBuilder.make[Int]
var preSrcId = srcIds(0)
uniqueSrcIdsBuilder += preSrcId
var curCount = 1
var i = 1
var j = 0
while (i < sz) {
val srcId = srcIds(i)
if (srcId != preSrcId) {
uniqueSrcIdsBuilder += srcId
dstCountsBuilder += curCount
preSrcId = srcId
j += 1
curCount = 0
}
curCount += 1
i += 1
}
dstCountsBuilder += curCount
val uniqueSrcIds = uniqueSrcIdsBuilder.result()
val numUniqueSrdIds = uniqueSrcIds.length
val dstCounts = dstCountsBuilder.result()
val dstPtrs = new Array[Int](numUniqueSrdIds + 1)
var sum = 0
i = 0
while (i < numUniqueSrdIds) {
sum += dstCounts(i)
i += 1
dstPtrs(i) = sum
}
InBlock(uniqueSrcIds, dstPtrs, dstEncodedIndices, ratings)
}
private def sort(): Unit = {
val sz = length
// Since there might be interleaved log messages, we insert a unique id for easy pairing.
val sortId = Utils.random.nextInt()
logDebug(s"Start sorting an uncompressed in-block of size $sz. (sortId = $sortId)")
val start = System.nanoTime()
val sorter = new Sorter(new UncompressedInBlockSort[ID])
sorter.sort(this, 0, length, Ordering[KeyWrapper[ID]])
val duration = (System.nanoTime() - start) / 1e9
logDebug(s"Sorting took $duration seconds. (sortId = $sortId)")
}
}
/**
* A wrapper that holds a primitive key.
*
* @see [[UncompressedInBlockSort]]
*/
private class KeyWrapper[@specialized(Int, Long) ID: ClassTag](
implicit ord: Ordering[ID]) extends Ordered[KeyWrapper[ID]] {
var key: ID = _
override def compare(that: KeyWrapper[ID]): Int = {
ord.compare(key, that.key)
}
def setKey(key: ID): this.type = {
this.key = key
this
}
}
/**
* [[SortDataFormat]] of [[UncompressedInBlock]] used by [[Sorter]].
*/
private class UncompressedInBlockSort[@specialized(Int, Long) ID: ClassTag](
implicit ord: Ordering[ID])
extends SortDataFormat[KeyWrapper[ID], UncompressedInBlock[ID]] {
override def newKey(): KeyWrapper[ID] = new KeyWrapper()
override def getKey(
data: UncompressedInBlock[ID],
pos: Int,
reuse: KeyWrapper[ID]): KeyWrapper[ID] = {
if (reuse == null) {
new KeyWrapper().setKey(data.srcIds(pos))
} else {
reuse.setKey(data.srcIds(pos))
}
}
override def getKey(
data: UncompressedInBlock[ID],
pos: Int): KeyWrapper[ID] = {
getKey(data, pos, null)
}
private def swapElements[@specialized(Int, Float) T](
data: Array[T],
pos0: Int,
pos1: Int): Unit = {
val tmp = data(pos0)
data(pos0) = data(pos1)
data(pos1) = tmp
}
override def swap(data: UncompressedInBlock[ID], pos0: Int, pos1: Int): Unit = {
swapElements(data.srcIds, pos0, pos1)
swapElements(data.dstEncodedIndices, pos0, pos1)
swapElements(data.ratings, pos0, pos1)
}
override def copyRange(
src: UncompressedInBlock[ID],
srcPos: Int,
dst: UncompressedInBlock[ID],
dstPos: Int,
length: Int): Unit = {
System.arraycopy(src.srcIds, srcPos, dst.srcIds, dstPos, length)
System.arraycopy(src.dstEncodedIndices, srcPos, dst.dstEncodedIndices, dstPos, length)
System.arraycopy(src.ratings, srcPos, dst.ratings, dstPos, length)
}
override def allocate(length: Int): UncompressedInBlock[ID] = {
new UncompressedInBlock(
new Array[ID](length), new Array[Int](length), new Array[Float](length))
}
override def copyElement(
src: UncompressedInBlock[ID],
srcPos: Int,
dst: UncompressedInBlock[ID],
dstPos: Int): Unit = {
dst.srcIds(dstPos) = src.srcIds(srcPos)
dst.dstEncodedIndices(dstPos) = src.dstEncodedIndices(srcPos)
dst.ratings(dstPos) = src.ratings(srcPos)
}
}
/**
* Creates in-blocks and out-blocks from rating blocks.
*
* @param prefix prefix for in/out-block names
* @param ratingBlocks rating blocks
* @param srcPart partitioner for src IDs
* @param dstPart partitioner for dst IDs
* @return (in-blocks, out-blocks)
*/
private def makeBlocks[ID: ClassTag](
prefix: String,
ratingBlocks: RDD[((Int, Int), RatingBlock[ID])],
srcPart: Partitioner,
dstPart: Partitioner,
storageLevel: StorageLevel)(
implicit srcOrd: Ordering[ID]): (RDD[(Int, InBlock[ID])], RDD[(Int, OutBlock)]) = {
val inBlocks = ratingBlocks.map {
case ((srcBlockId, dstBlockId), RatingBlock(srcIds, dstIds, ratings)) =>
// The implementation is a faster version of
// val dstIdToLocalIndex = dstIds.toSet.toSeq.sorted.zipWithIndex.toMap
val start = System.nanoTime()
val dstIdSet = new OpenHashSet[ID](1 << 20)
dstIds.foreach(dstIdSet.add)
val sortedDstIds = new Array[ID](dstIdSet.size)
var i = 0
var pos = dstIdSet.nextPos(0)
while (pos != -1) {
sortedDstIds(i) = dstIdSet.getValue(pos)
pos = dstIdSet.nextPos(pos + 1)
i += 1
}
assert(i == dstIdSet.size)
Sorting.quickSort(sortedDstIds)
val dstIdToLocalIndex = new OpenHashMap[ID, Int](sortedDstIds.length)
i = 0
while (i < sortedDstIds.length) {
dstIdToLocalIndex.update(sortedDstIds(i), i)
i += 1
}
logDebug(
"Converting to local indices took " + (System.nanoTime() - start) / 1e9 + " seconds.")
val dstLocalIndices = dstIds.map(dstIdToLocalIndex.apply)
(srcBlockId, (dstBlockId, srcIds, dstLocalIndices, ratings))
}.groupByKey(new ALSPartitioner(srcPart.numPartitions))
.mapValues { iter =>
val builder =
new UncompressedInBlockBuilder[ID](new LocalIndexEncoder(dstPart.numPartitions))
iter.foreach { case (dstBlockId, srcIds, dstLocalIndices, ratings) =>
builder.add(dstBlockId, srcIds, dstLocalIndices, ratings)
}
builder.build().compress()
}.setName(prefix + "InBlocks")
.persist(storageLevel)
val outBlocks = inBlocks.mapValues { case InBlock(srcIds, dstPtrs, dstEncodedIndices, _) =>
val encoder = new LocalIndexEncoder(dstPart.numPartitions)
val activeIds = Array.fill(dstPart.numPartitions)(mutable.ArrayBuilder.make[Int])
var i = 0
val seen = new Array[Boolean](dstPart.numPartitions)
while (i < srcIds.length) {
var j = dstPtrs(i)
ju.Arrays.fill(seen, false)
while (j < dstPtrs(i + 1)) {
val dstBlockId = encoder.blockId(dstEncodedIndices(j))
if (!seen(dstBlockId)) {
activeIds(dstBlockId) += i // add the local index in this out-block
seen(dstBlockId) = true
}
j += 1
}
i += 1
}
activeIds.map { x =>
x.result()
}
}.setName(prefix + "OutBlocks")
.persist(storageLevel)
(inBlocks, outBlocks)
}
/**
* Compute dst factors by constructing and solving least square problems.
*
* @param srcFactorBlocks src factors
* @param srcOutBlocks src out-blocks
* @param dstInBlocks dst in-blocks
* @param rank rank
* @param regParam regularization constant
* @param srcEncoder encoder for src local indices
* @param implicitPrefs whether to use implicit preference
* @param alpha the alpha constant in the implicit preference formulation
* @param solver solver for least squares problems
* @return dst factors
*/
private def computeFactors[ID](
srcFactorBlocks: RDD[(Int, FactorBlock)],
srcOutBlocks: RDD[(Int, OutBlock)],
dstInBlocks: RDD[(Int, InBlock[ID])],
rank: Int,
regParam: Double,
srcEncoder: LocalIndexEncoder,
implicitPrefs: Boolean = false,
alpha: Double = 1.0,
solver: LeastSquaresNESolver): RDD[(Int, FactorBlock)] = {
val numSrcBlocks = srcFactorBlocks.partitions.length
val YtY = if (implicitPrefs) Some(computeYtY(srcFactorBlocks, rank)) else None
val srcOut = srcOutBlocks.join(srcFactorBlocks).flatMap {
case (srcBlockId, (srcOutBlock, srcFactors)) =>
srcOutBlock.view.zipWithIndex.map { case (activeIndices, dstBlockId) =>
(dstBlockId, (srcBlockId, activeIndices.map(idx => srcFactors(idx))))
}
}
val merged = srcOut.groupByKey(new ALSPartitioner(dstInBlocks.partitions.length))
dstInBlocks.join(merged).mapValues {
case (InBlock(dstIds, srcPtrs, srcEncodedIndices, ratings), srcFactors) =>
val sortedSrcFactors = new Array[FactorBlock](numSrcBlocks)
srcFactors.foreach { case (srcBlockId, factors) =>
sortedSrcFactors(srcBlockId) = factors
}
val dstFactors = new Array[Array[Float]](dstIds.length)
var j = 0
val ls = new NormalEquation(rank)
while (j < dstIds.length) {
ls.reset()
if (implicitPrefs) {
ls.merge(YtY.get)
}
var i = srcPtrs(j)
var numExplicits = 0
while (i < srcPtrs(j + 1)) {
val encoded = srcEncodedIndices(i)
val blockId = srcEncoder.blockId(encoded)
val localIndex = srcEncoder.localIndex(encoded)
val srcFactor = sortedSrcFactors(blockId)(localIndex)
val rating = ratings(i)
if (implicitPrefs) {
// Extension to the original paper to handle b < 0. confidence is a function of |b|
// instead so that it is never negative. c1 is confidence - 1.0.
val c1 = alpha * math.abs(rating)
// For rating <= 0, the corresponding preference is 0. So the term below is only added
// for rating > 0. Because YtY is already added, we need to adjust the scaling here.
if (rating > 0) {
numExplicits += 1
ls.add(srcFactor, (c1 + 1.0) / c1, c1)
}
} else {
ls.add(srcFactor, rating)
numExplicits += 1
}
i += 1
}
// Weight lambda by the number of explicit ratings based on the ALS-WR paper.
dstFactors(j) = solver.solve(ls, numExplicits * regParam)
j += 1
}
dstFactors
}
}
/**
* Computes the Gramian matrix of user or item factors, which is only used in implicit preference.
* Caching of the input factors is handled in [[ALS#train]].
*/
private def computeYtY(factorBlocks: RDD[(Int, FactorBlock)], rank: Int): NormalEquation = {
factorBlocks.values.aggregate(new NormalEquation(rank))(
seqOp = (ne, factors) => {
factors.foreach(ne.add(_, 0.0))
ne
},
combOp = (ne1, ne2) => ne1.merge(ne2))
}
/**
* Encoder for storing (blockId, localIndex) into a single integer.
*
* We use the leading bits (including the sign bit) to store the block id and the rest to store
* the local index. This is based on the assumption that users/items are approximately evenly
* partitioned. With this assumption, we should be able to encode two billion distinct values.
*
* @param numBlocks number of blocks
*/
private[recommendation] class LocalIndexEncoder(numBlocks: Int) extends Serializable {
require(numBlocks > 0, s"numBlocks must be positive but found $numBlocks.")
private[this] final val numLocalIndexBits =
math.min(java.lang.Integer.numberOfLeadingZeros(numBlocks - 1), 31)
private[this] final val localIndexMask = (1 << numLocalIndexBits) - 1
/** Encodes a (blockId, localIndex) into a single integer. */
def encode(blockId: Int, localIndex: Int): Int = {
require(blockId < numBlocks)
require((localIndex & ~localIndexMask) == 0)
(blockId << numLocalIndexBits) | localIndex
}
/** Gets the block id from an encoded index. */
@inline
def blockId(encoded: Int): Int = {
encoded >>> numLocalIndexBits
}
/** Gets the local index from an encoded index. */
@inline
def localIndex(encoded: Int): Int = {
encoded & localIndexMask
}
}
/**
* Partitioner used by ALS. We require that getPartition is a projection. That is, for any key k,
* we have getPartition(getPartition(k)) = getPartition(k). Since the default HashPartitioner
* satisfies this requirement, we simply use a type alias here.
*/
private[recommendation] type ALSPartitioner = org.apache.spark.HashPartitioner
/**
* Private function to clean up all of the shuffles files from the dependencies and their parents.
*/
private[spark] def cleanShuffleDependencies[T](
sc: SparkContext,
deps: Seq[Dependency[_]],
blocking: Boolean = false): Unit = {
// If there is no reference tracking we skip clean up.
sc.cleaner.foreach { cleaner =>
/**
* Clean the shuffles & all of its parents.
*/
def cleanEagerly(dep: Dependency[_]): Unit = {
if (dep.isInstanceOf[ShuffleDependency[_, _, _]]) {
val shuffleId = dep.asInstanceOf[ShuffleDependency[_, _, _]].shuffleId
cleaner.doCleanupShuffle(shuffleId, blocking)
}
val rdd = dep.rdd
val rddDeps = rdd.dependencies
if (rdd.getStorageLevel == StorageLevel.NONE && rddDeps != null) {
rddDeps.foreach(cleanEagerly)
}
}
deps.foreach(cleanEagerly)
}
}
}
|
xieguobin/Spark_2.0.0_cn1
|
ml/recommendation/ALS.scala
|
Scala
|
apache-2.0
| 49,989 |
package com.signalcollect.dcop.scalability.conflicts
import com.signalcollect.dcop.benchmark.BenchmarkModes
import com.signalcollect.ExecutionConfiguration
import com.signalcollect.dcop.benchmark.BenchmarkConfiguration
import com.signalcollect.dcop.evaluation.maxsum.MaxSumAlgorithm
import com.signalcollect.configuration.ExecutionMode
import com.signalcollect.dcop.evaluation.maxsum.MaxSumConflictAggregationOperation
import com.signalcollect.dcop.evaluation.dsa.DSAConflictAggregationOperation
import com.signalcollect.dcop.evaluation.dsa.DSAAlgorithm
import com.signalcollect.dcop.evaluation.candidates.DSAVariant
import com.signalcollect.dcop.evaluation.bestresponse.BRAlgorithm
import com.signalcollect.dcop.evaluation.bestresponse.BRConflictAggregationOperation
import com.signalcollect.dcop.io.ResultWriter
import com.signalcollect.dcop.graphs.FactorGraphProvider
import com.signalcollect.dcop.io.FileGraphReader
import com.signalcollect.dcop.scalability.DistributedBenchmarkExecutable
import com.signalcollect.dcop.scalability.AlgorithmType
import com.signalcollect.dcop.graphs.DSAGraphProvider
import com.signalcollect.dcop.graphs.BRGraphProvider
import com.signalcollect.dcop.io.DropboxResultHandler
object SyncConflictsOverStepsScaling extends App {
/*
* general properties
*/
val fileName = "graphs/scaling/synthetic/graph300.txt"
val graphName = "graph300"
val isAdopt = false
val graphSize = 300
val steps = 2
val benchmarkMode = BenchmarkModes.SyncConflictsOverSteps
//------------------------------------------------
/*
* Properties for Graph loading
*/
// val factorGraphProvider = new FactorGraphProvider(new FileGraphReader, fileName)
val dsaaGraphProvider = new DSAGraphProvider(graphSize,0.45, DSAVariant.A,fileName, isAdopt)
// val dsabGraphProvider = new DSAGraphProvider(graphSize,0.45, DSAVariant.B,fileName, isAdopt)
// val brGraphProvider = new BRGraphProvider(graphSize,0.6,fileName, isAdopt)
//------------------------------------------------
// /*
// * properties for MaxSum
// */
// val maxSumName = "MaxSum"
// val MSexecutionConfig = ExecutionConfiguration.withExecutionMode(ExecutionMode.Synchronous).withCollectThreshold(0).withSignalThreshold(0).withStepsLimit(1)
// val MSbenchmarkConfig = new BenchmarkConfiguration(MSexecutionConfig, fileName, isAdopt, steps, new MaxSumConflictAggregationOperation, benchmarkMode)
// val maxSumExecutable = new DistributedBenchmarkExecutable("SyncMaxSum",
// MSexecutionConfig,
// MSbenchmarkConfig,
// factorGraphProvider,
// AlgorithmType.MS,
// null)
/*
* properties for DSA-A and DSA-B
*/
val dsaAname = "DSAA"
val dsaBname = "DSAB"
val DSAexecutionConfig = ExecutionConfiguration.withExecutionMode(ExecutionMode.Synchronous).withCollectThreshold(0).withSignalThreshold(0).withStepsLimit(1)
val DSAbenchmarkConfig = new BenchmarkConfiguration(DSAexecutionConfig,fileName,isAdopt,steps,new DSAConflictAggregationOperation,benchmarkMode)
val dsaAexecutable = new DistributedBenchmarkExecutable("DSA-A",
DSAexecutionConfig,
DSAbenchmarkConfig,
dsaaGraphProvider,
AlgorithmType.DSAA,
null,
graphSize,
0.45)
// val dsaBexecutable = new DistributedBenchmarkExecutable("DSA-B",
// DSAexecutionConfig,
// DSAbenchmarkConfig,
// dsabGraphProvider,
// AlgorithmType.DSAB,
// null,
// graphSize,
// 0.45)
//
//
// /*
// * properties for Best-Response
// */
// val brName = "BestResponse"
// val BRexecutionConfig = ExecutionConfiguration.withExecutionMode(ExecutionMode.Synchronous).withCollectThreshold(0).withSignalThreshold(0).withStepsLimit(1)
// val BRbenchmarkConfig = new BenchmarkConfiguration(BRexecutionConfig,fileName,isAdopt,steps,new BRConflictAggregationOperation,benchmarkMode)
// val brExecutable = new DistributedBenchmarkExecutable("SyncMaxSum",
// BRexecutionConfig,
// BRbenchmarkConfig,
// brGraphProvider,
// AlgorithmType.BR,
// null,
// graphSize,
// 0.6)
/*
* result Containers
*/
var maxSumConflicts: List[Tuple2[Long, Int]] = null
var dsaAconflicts: List[Tuple2[Long, Int]] = null
var dsaBconflicts: List[Tuple2[Long, Int]] = null
var bestResponseConflicts: List[Tuple2[Long, Int]] = null
// /*
// * run evaluation for MaxSum:
// */
// println("Evaluating Max-Sum...")
// maxSumConflicts = maxSumExecutable.run.asInstanceOf[List[Tuple2[Long, Int]]]
// println("Max-Sum evaluted.")
// printConflictList(maxSumConflicts)
// handleResult(maxSumConflicts, benchmarkMode)
// println("-----------------------")
/*
* run evaluation for DSA-A
*/
println("Evaluating DSA-A...")
dsaAconflicts = dsaAexecutable.run.asInstanceOf[List[Tuple2[Long, Int]]]
println("DSA-A evaluated.")
printConflictList(dsaAconflicts)
handleResult(dsaAconflicts, benchmarkMode)
println("-----------------------")
// /*
// * run evaluation for DSA-B
// */
// println("Evaluating DSA-B...")
// dsaBconflicts = dsaBexecutable.run.asInstanceOf[List[Tuple2[Long, Int]]]
// println("DSA-B evaluated.")
// printConflictList(dsaBconflicts)
// handleResult(dsaBconflicts, benchmarkMode)
// println("-----------------------")
//
// /*
// * run evaluation for Best-Response
// */
// println("Evaluating Best-Response...")
// bestResponseConflicts = brExecutable.run.asInstanceOf[List[Tuple2[Long, Int]]]
// println("Best-Response evaluated.")
// printConflictList(bestResponseConflicts)
// handleResult(bestResponseConflicts, benchmarkMode)
// println("-----------------------")
System.exit(0)
def storeResultsToFile(results: Any, algorithm: String) = {
val resultWriter = new ResultWriter(benchmarkMode, graphName, algorithm, results)
resultWriter.write()
}
def printConflictList(list: List[Tuple2[Long, Int]]) = {
list.foreach { el =>
println(el._1 + " - " + el._2)
}
}
def handleResult(results : Any, mode : BenchmarkModes.Value) = {
val dbx = new DropboxResultHandler("graph300SyncConflictsSteps", "scaling/steps/conflicts",mode)
dbx.handleResult(results)
}
}
|
gmazlami/dcop-maxsum
|
src/main/scala/com/signalcollect/dcop/scalability/conflicts/SyncConflictsOverStepsScaling.scala
|
Scala
|
apache-2.0
| 6,492 |
/*
* Copyright 2018 Vladimir Konstantinov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.illfaku.korro.config
import com.github.illfaku.korro.util.configOptions
import com.typesafe.config.Config
/**
* Configuration for router to set actor at specified actor path as handler of matched requests.<br>
* It can be sent to server actor to add new route.<br>
* To remove all routes of an actor from router your should send `RouteConfig` with needed actor ref/path and
* `predicate = RequestPredicate.False`.
*
* @param actor Destination actor.
* @param predicate Predicate to test requests against.
* @param instructions Set of instructions for request handling.
*/
case class RouteConfig(
actor: RouteActor,
predicate: RequestPredicate = RequestPredicate.True,
instructions: List[HttpInstruction] = Nil
)
object RouteConfig {
def extract(config: Config): Option[RouteConfig] = {
config.findString("actor") map { actorPath =>
RouteConfig(
RouteActorPath(actorPath),
config.findConfig("predicate").map(RequestPredicate.extract)
.orElse(config.findString("predicate").map(RequestPredicate.parse))
.getOrElse(RequestPredicate.True),
config.findConfig("instructions").map(HttpInstruction.extract).getOrElse(Nil)
)
}
}
}
|
oxy-development/korro
|
src/main/scala/com/github/illfaku/korro/config/RouteConfig.scala
|
Scala
|
apache-2.0
| 1,834 |
import scala.actors.Futures._
import nimrod.util._
val inv = opts.flag("inverse","Use the corpus in inverse mode")
val maxSize = opts.intValue("m","Minimum length",5)
val acceptanceRate = opts.doubleValue("a","The required percentage of alignment",0.75)
val alignFile = opts.roFile("alignFile","The file containing symmetrized alignments")
val corpusFile = opts.roFile("corpus","The corpus file")
val bothFile = opts.woFile("pairs","The file to write the accepted alignments to")
val foreignFile = opts.woFile("foreign","The file to write foreign frequencies to")
val translationFile = opts.woFile("translation","The file to write translation frequencies to")
opts.verify
def alignFromString(astr : String) : Seq[(Int,Int)] = (astr split "\\\\s+" filter (_ != "") map {
aelem => aelem split "-" match {
case Array(a1,a2) => a1.toInt -> a2.toInt
}
}).toSeq
case class Alignment(val align : Seq[(Int,Int)]) {
lazy val lMin = left.min
lazy val lMax = left.max
lazy val lSize = left.toSet.size
lazy val rMin = right.min
lazy val rMax = right.max
lazy val rSize = right.toSet.size
lazy val left = align.map(_._1)
lazy val right = align.map(_._2)
def lText(words : Array[String]) = {
words.slice(lMin,lMax+1).mkString(" ")
}
def rText(words : Array[String]) = {
words.slice(rMin,rMax+1).mkString(" ")
}
def normalize = Alignment(align map {
case (l,r) => (l - lMin,r - rMin)
})
}
def sliceByAlign(strs : Array[String], align : Seq[Int]) = strs.slice(align.min,align.max+1).mkString(" ")
def goodAlign(a : Alignment) = a.lSize <= maxSize && a.rSize <= maxSize && a.lSize.toDouble / (a.lMax - a.lMin) >= acceptanceRate && a.rSize.toDouble / (a.rMax - a.rMin) >= acceptanceRate
namedTask("Simple phrase extraction") {
val phraseFreq = new util.DiskCounter[(String,String)]()
val foreignFreq = new util.DiskCounter[String]()
val transFreq = new util.DiskCounter[String]()
val alignIn = opts.openInput(alignFile).getLines
val corpusIn = opts.openInput(corpusFile).getLines
(alignIn zip corpusIn).toStream.par.foreach {
case (aline,cline) => {
val leftAligns = collection.mutable.Set[Seq[Int]]()
val rightAligns = collection.mutable.Set[Seq[Int]]()
val clines = cline split " \\\\|\\\\|\\\\| "
if(clines.size != 2 || (aline matches "\\\\s+")) {
System.err.println("Bad line: " + cline)
} else {
val fSent = (if(inv) { clines(0) } else { clines(1) }) split " "
val tSent = (if(inv) { clines(1) } else { clines(0) }) split " "
val aligns = alignFromString(aline)
val allAligns = (0 until aligns.size).toStream flatMap ( a => {
(a+1 until aligns.size).toStream map {
b => Alignment(aligns.slice(a,b))
}
})
val goodAligns = allAligns filter { goodAlign(_) }
val texts = goodAligns map { a => (a.lText(fSent),a.rText(tSent)) }
for(a <- goodAligns) {
phraseFreq.inc((a.lText(fSent),a.rText(tSent)))
leftAligns += a.left
rightAligns += a.right
}
for(a <- leftAligns) {
foreignFreq.inc(sliceByAlign(fSent,a))
}
for(a <- rightAligns) {
transFreq.inc(sliceByAlign(tSent,a))
}
}
}
}
{
val out = opts.openOutput(bothFile)
for(((f,t),s) <- phraseFreq.values) {
out.println("%s ||| %s ||| %d" format (f,t,s))
}
out.flush
out.close
}
{
val out = opts.openOutput(foreignFile)
for((f,s) <- foreignFreq.values) {
out.println("%s ||| %d" format (f,s))
}
out.flush
out.close
}
{
val out = opts.openOutput(translationFile)
for((t,s) <- transFreq.values) {
out.println("%s ||| %d" format (t,s))
}
out.flush
out.close
}
phraseFreq.close
foreignFreq.close
transFreq.close
}
|
jmccrae/nimrod
|
scripts/simple-phrase-extractor.scala
|
Scala
|
apache-2.0
| 3,855 |
package coursier.cli.install
object ShellUtil {
def rcFileOpt: Option[String] =
Option(System.getenv("SHELL")).map(_.split('/').last).flatMap {
case "zsh" => Some("~/.zshrc")
case "bash" => Some("~/.bashrc")
case _ => None
}
}
|
alexarchambault/coursier
|
modules/cli/src/main/scala/coursier/cli/install/ShellUtil.scala
|
Scala
|
apache-2.0
| 264 |
package com.twitter.finatra.benchmarks
import com.twitter.finagle.httpx.{Method, Request, Response}
import com.twitter.finatra.http.internal.routing.{Route, RoutingService}
import com.twitter.inject.requestscope.{FinagleRequestScope, FinagleRequestScopeFilter}
import com.twitter.util.Future
import org.openjdk.jmh.annotations._
@State(Scope.Thread)
class FinagleRequestScopeBenchmark {
def defaultCallback(request: Request) = {
Future.value(Response())
}
val route = Route(
name = "groups",
method = Method.Get,
path = "/groups/",
callback = defaultCallback,
annotations = Seq(),
requestClass = classOf[Request],
responseClass = classOf[Response])
val routingController = new RoutingService(routes = Seq(route))
val getRequest = Request("/groups/")
val finagleRequestScope = new FinagleRequestScope()
val finagleRequestScopeFilter =
new FinagleRequestScopeFilter[Request, Response](finagleRequestScope)
val filtersAndService =
finagleRequestScopeFilter andThen
routingController
@Benchmark
def timeServiceWithRequestScopeFilter() = {
filtersAndService.apply(getRequest)
}
}
|
deanh/finatra
|
benchmarks/src/main/scala/com/twitter/finatra/benchmarks/FinagleRequestScopeBenchmark.scala
|
Scala
|
apache-2.0
| 1,157 |
package cs220
case class HelloThread(s: String) extends Thread {
override def run = println(s"Hello from a thread: $s!")
}
object ThreadExample {
def thread(s: String) = HelloThread(s)
def main(args: Array[String]) = {
thread("Hazzah!").start
}
}
|
umass-cs-220/week-11-parallel
|
code/threads/src/main/scala/cs220/ThreadExample01.scala
|
Scala
|
apache-2.0
| 262 |
package uk.gov.gds.ier.transaction.overseas.nino
import uk.gov.gds.ier.transaction.overseas.InprogressOverseas
import uk.gov.gds.ier.step.StepTemplate
trait NinoMustache extends StepTemplate[InprogressOverseas] {
val title = "What is your National Insurance number?"
case class NinoModel(
question:Question,
nino: Field,
noNinoReason: Field
) extends MustacheData
val mustache = MustacheTemplate("overseas/nino") { (form, post) =>
implicit val progressForm = form
NinoModel(
question = Question(
postUrl = post.url,
errorMessages = form.globalErrors.map{ _.message },
title = title
),
nino = TextField(
key = keys.nino.nino
),
noNinoReason = TextField(
key = keys.nino.noNinoReason
)
)
}
}
|
michaeldfallen/ier-frontend
|
app/uk/gov/gds/ier/transaction/overseas/nino/NinoMustache.scala
|
Scala
|
mit
| 814 |
class A
{
implicit def e: E = new E
def x(i: Int)(implicit y: E): String = ""
}
class E
|
twitter-forks/sbt
|
sbt/src/sbt-test/source-dependencies/implicit-params/A.scala
|
Scala
|
bsd-3-clause
| 89 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis
import controls.ViewTrait
import org.dom4j.Element
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.common.ValidationException
/**
* Trait representing a variable element, whether in the model or in the view.
*/
trait VariableAnalysisTrait extends SimpleElementAnalysis with VariableTrait {
variableSelf ⇒
import VariableAnalysis._
// Variable name and value
val name = element.attributeValue(NAME_QNAME)
if (name eq null)
throw new ValidationException(s"`${element.getQualifiedName}` element must have a `name` attribute", ElementAnalysis.createLocationData(element))
// Lazy because accessing scopeModel
private lazy val nestedAnalysis =
valueOrSequenceElement(element) map { valueElement ⇒
new SimpleElementAnalysis(staticStateContext, valueElement, Some(variableSelf), None, getChildElementScope(valueElement)) {
nestedSelf ⇒
override protected def computeValueAnalysis =
valueOrSelectAttribute(element) match {
case Some(value) ⇒ Some(analyzeXPath(nestedSelf.getChildrenContext, value))
case None ⇒ Some(StringAnalysis()) // TODO: store constant value?
}
// If in same scope as xf:var, in-scope variables are the same as xxf:var because we don't
// want the variable defined by xf:var to be in-scope for xxf:value. Otherwise, use
// default algorithm.
// TODO: This is bad architecture as we duplicate the logic in ViewTrait.
override lazy val inScopeVariables =
if (variableSelf.scope == nestedSelf.scope)
variableSelf.inScopeVariables
else
getRootVariables ++ nestedSelf.treeInScopeVariables
override protected def getRootVariables = variableSelf match {
case _: ViewTrait ⇒ nestedSelf.model match { case Some(model) ⇒ model.variablesMap; case None ⇒ Map() }
case _ ⇒ Map()
}
}
}
// Scope of xf:var OR nested xxf:value if present
lazy val (hasNestedValue, valueScope, valueNamespaceMapping, valueStaticId) = nestedAnalysis match {
case Some(nestedAnalysis) ⇒
(true, nestedAnalysis.scope, nestedAnalysis.namespaceMapping, nestedAnalysis.staticId)
case None ⇒
(false, scope, namespaceMapping, staticId)
}
def variableAnalysis = getValueAnalysis
override def computeValueAnalysis =
nestedAnalysis match {
case Some(nestedAnalysis) ⇒
// Value is provided by nested xxf:value/@value
nestedAnalysis.analyzeXPath()
nestedAnalysis.getValueAnalysis
case None ⇒
// No nested xxf:value element
valueOrSelectAttribute(element) match {
case Some(value) ⇒ Some(analyzeXPath(getChildrenContext, value))
case _ ⇒ Some(StringAnalysis()) // TODO: store constant value?
}
}
}
object VariableAnalysis {
def valueOrSelectAttributeJava(element: Element) =
valueOrSelectAttribute(element).orNull
def valueOrSelectAttribute(element: Element) =
Option(element.attributeValue(VALUE_QNAME)) orElse Option(element.attributeValue(SELECT_QNAME))
def valueOrSequenceElementJava(element: Element) =
valueOrSequenceElement(element).orNull
def valueOrSequenceElement(element: Element) =
Option(element.element(XXFORMS_VALUE_QNAME)) orElse Option(element.element(XXFORMS_SEQUENCE_QNAME))
// See https://github.com/orbeon/orbeon-forms/issues/1104 and https://github.com/orbeon/orbeon-forms/issues/1132
def variableScopesModelVariables(v: VariableAnalysisTrait) =
v.isInstanceOf[ViewTrait] || v.model != (v.parent flatMap (_.model))
}
|
wesley1001/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/analysis/VariableAnalysisTrait.scala
|
Scala
|
lgpl-2.1
| 4,372 |
package de.gwdg.europeanaqa.spark.saturation
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.sql._
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.sum
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.functions.first
import org.apache.spark.sql.functions.regexp_replace
import org.apache.spark.sql.types.DoubleType
import org.apache.spark.sql.types.IntegerType
object SaturationWithHistogramForAllCollections {
def main(args: Array[String]): Unit = {
val log = org.apache.log4j.LogManager.getLogger("SaturationWithHistogramForAllCollections")
val spark = SparkSession.builder.appName("SaturationWithHistogramForAll").getOrCreate()
import spark.implicits._
val configMap : Map[String, String] = spark.conf.getAll
for ((key, value) <- configMap) {
log.info(s"key: $key, value: $value")
}
val inputFile = args(0)
val outputFile = args(1)
log.info("reading the data")
val dataWithoutHeader = spark.read.
option("header", "false").
option("inferSchema", "true").
format("csv").
load(inputFile)
log.info("setting names")
val ids = Seq("id")
val collectionIds = Seq("dataset", "dataProvider")
val individualFieldNames = Seq(
"dc_title", "dcterms_alternative", "dc_description", "dc_creator", "dc_publisher",
"dc_contributor", "dc_type", "dc_identifier", "dc_language", "dc_coverage",
"dcterms_temporal", "dcterms_spatial", "dc_subject", "dc_date", "dcterms_created",
"dcterms_issued", "dcterms_extent", "dcterms_medium", "dcterms_provenance", "dcterms_hasPart",
"dcterms_isPartOf", "dc_format", "dc_source", "dc_rights", "dc_relation",
"edm_year", "edm_userTag", "dcterms_conformsTo", "dcterms_hasFormat", "dcterms_hasVersion",
"dcterms_isFormatOf", "dcterms_isReferencedBy", "dcterms_isReplacedBy", "dcterms_isRequiredBy",
"dcterms_isVersionOf", "dcterms_references", "dcterms_replaces", "dcterms_requires",
"dcterms_tableOfContents", "edm_currentLocation", "edm_hasMet", "edm_hasType", "edm_incorporates",
"edm_isDerivativeOf", "edm_isRelatedTo", "edm_isRepresentationOf", "edm_isSimilarTo",
"edm_isSuccessorOf", "edm_realizes", "edm_wasPresentAt"
)
val individualFields = individualFieldNames.
flatMap(i => Seq(s"provider_$i", s"europeana_$i")).
flatMap(i => Seq(s"${i}_taggedLiterals", s"${i}_languages", s"${i}_literalsPerLanguage"))
val genericFields = Seq(
"NumberOfLanguagesPerPropertyInProviderProxy",
"NumberOfLanguagesPerPropertyInEuropeanaProxy",
"NumberOfLanguagesPerPropertyInObject",
"TaggedLiteralsInProviderProxy",
"TaggedLiteralsInEuropeanaProxy",
"DistinctLanguageCountInProviderProxy",
"DistinctLanguageCountInEuropeanaProxy",
"TaggedLiteralsInObject",
"DistinctLanguageCountInObject",
"TaggedLiteralsPerLanguageInProviderProxy",
"TaggedLiteralsPerLanguageInEuropeanaProxy",
"TaggedLiteralsPerLanguageInObject"
)
val names = ids ++ collectionIds ++ individualFields ++ genericFields
val selectedNames = collectionIds ++ individualFields ++ genericFields
val data = dataWithoutHeader.toDF(names: _*).select(selectedNames.map(col): _*)
data.cache()
log.info("reading the data: done")
data.printSchema()
def toLongForm(df: DataFrame): DataFrame = {
val schema = df.schema
df.flatMap(row => {
val metric = row.getString(0)
(1 until row.size).map(i => {
(metric, schema.fieldNames(i), row.getString(i).toDouble)
})
}).toDF("metric", "field", "value")
}
def getDouble(first: Row): Double = {
if (first.schema.fields(0).dataType.equals(DoubleType)) {
first.getDouble(0)
} else {
first.getInt(0).toDouble
}
}
def getMedianFromHistogram(histogram: DataFrame, l: Long): Double = {
var first = histogram.filter($"start" <= l && $"end" >= l)
.select("label")
.first()
getDouble(first)
}
def calculateMedian(histogram: DataFrame, isUneven: Boolean, total: Long): Double = {
var l : Long = -1
var r : Long = -1
var median : Double = -1.0
if (isUneven) {
// log.info("- is uneven")
l = (total / 2)
r = l
median = getMedianFromHistogram(histogram, l)
} else {
// log.info("- is even")
l = (total / 2) - 1
r = l + 1
var lval = getMedianFromHistogram(histogram, l)
var rval = getMedianFromHistogram(histogram, r)
median = (lval + rval) / 2
}
return median
}
def createHistogram(input: DataFrame, fieldName: String): DataFrame = {
input
.groupBy(fieldName)
.count()
.toDF("label", "count")
.orderBy("label")
.withColumn("group", functions.lit(1))
.withColumn("end", sum("count")
.over(Window.partitionBy("group").orderBy($"label")))
.withColumn("start", (col("end") - col("count")))
}
def createWideDF(data: DataFrame): DataFrame = {
data.
filter(col("field") =!= "fake").
groupBy("field").
pivot("metric", Seq("count", "median", "zerosPerc", "mean", "stddev", "min", "max")).
agg(first("value")).
withColumn("source", regexp_replace(regexp_replace($"field", "europeana_.*", "b"), "provider_.*", "a")).
withColumn("type", regexp_replace(regexp_replace(regexp_replace($"field", ".*_taggedLiterals$", "a"), ".*_languages", "b"), ".*_literalsPerLanguage", "c")).
withColumn("main", regexp_replace($"field", "^(provider|europeana)_(.*)_(taggedLiterals|languages|literalsPerLanguage)$", "$2")).
orderBy("main", "source", "type").
select("field", "count", "median", "zerosPerc", "mean", "stddev", "min", "max")
}
// val tls = data.schema.fieldNames.filter(x => (x.startsWith("provider_") && x.endsWith("_taggedLiterals")))
// data.schema.fieldNames.filter(startsWith("provider_") && endsWith("_taggedLiterals"))
// data.select()
// provider_xxxx_taggedLiterals
// europeana_xxxx_taggedLiterals
def iterateFields(filtered: DataFrame, id: String) : Unit = {
var total = filtered.count()
var isUneven = (total == 1) || ((total / 2) == 1)
var stat2 = Seq(("fake", "fake", 0.0)).toDF("metric", "field", "value")
var numberOfFields = filtered.schema.fieldNames.size
for (i <- 2 to (filtered.schema.fieldNames.size - 1)) {
var median : Double = -1.0
var zerosPerc : Double = -1.0
var fieldName = filtered.schema.fieldNames(i);
var dataType = filtered.schema.fields(i).dataType;
log.info(s"# $i/$numberOfFields - calculating the median for $fieldName ($dataType) ")
var filterField = fieldName
if (filterField.endsWith("_languages"))
filterField = filterField.replace("_languages", "_taggedLiterals")
else if (filterField.endsWith("_literalsPerLanguage"))
filterField = filterField.replace("_literalsPerLanguage", "_taggedLiterals")
// log.info(s"- filterField: $filterField")
var existing = filtered.filter(col(filterField) > -1).select(fieldName)
total = existing.count()
isUneven = (total == 1) || ((total / 2) == 1)
var half = total / 2;
// log.info(s"- total: $total (half: $half) -> $isUneven")
if (total > 0) {
stat2 = stat2.union(toLongForm(existing.describe()))
var histogram = createHistogram(existing, fieldName)
median = calculateMedian(histogram, isUneven, total)
/*
var lowest = histogram.select("label").first();
if (dataType.equals(DoubleType))
log.info("- lowest: " + lowest.getDouble(0))
else
log.info("- lowest: " + lowest.getInt(0))
*/
var zeros = histogram.select("count").first().getLong(0)
zerosPerc = zeros * 100.0 / total
// log.info("- zerosPerc: " + zerosPerc)
// log.info("- median: " + median)
} else {
stat2 = stat2.union(Seq(
("count", fieldName, 0),
("mean", fieldName, 0),
("stddev", fieldName, 0),
("min", fieldName, 0),
("max", fieldName, 0)
).toDF("metric", "field", "value"))
}
// log.info(s"- $fieldName: $median (zeros: $zerosPerc%)")
stat2 = stat2.union(Seq(
("median", fieldName, median),
("zerosPerc", fieldName, zerosPerc)
).toDF("metric", "field", "value"))
}
val wideDf = createWideDF(stat2)
stat2.repartition(1).write.
option("header", "true").
mode(SaveMode.Overwrite).
csv(outputFile + "-" + id + "-longform")
wideDf.repartition(1).write.
option("header", "true").
mode(SaveMode.Overwrite).
csv(s"$outputFile-$id-csv")
spark.sparkContext.parallelize(List(wideDf.schema.fieldNames.mkString(","))).
repartition(1).
toDF().
write.
mode(SaveMode.Overwrite).
format("text").
save(outputFile + "-" + id + "-header")
log.info("write wideDf")
}
var datasets = data.groupBy("dataset").count().map{row => row.getInt(0)}.as[Int].collect
for (id <- datasets) {
log.info(s"processing dataset $id")
var filtered = data.filter($"dataset" === id);
filtered.cache()
var count = filtered.count()
println(s"size of $id: $count")
iterateFields(filtered, s"c-$id")
}
/*
var dataProviders = data.groupBy("dataProvider").count().map{row => row.getInt(0)}.as[Int].collect
for (id <- dataProviders) {
var filtered = data.filter($"dataProvider" === id);
filtered.cache()
var count = filtered.count()
println(s"$id: $count")
iterateFields(filtered, s"d-$id")
}
var cidsDids = data.groupBy("dataset", "dataProvider").count().map(row => (row.getInt(0), row.getInt(1))).collect
for (cid <- cidsDids) {
var c = cid._1
var d = cid._2
var filtered = data.filter($"dataset" === c && $"dataProvider" === d);
filtered.cache()
var count = filtered.count()
println(s"$c/$d: $count")
iterateFields(filtered, s"cd-$c-$d")
}
*/
}
}
|
pkiraly/europeana-qa-spark
|
scala/src/main/scala/de/gwdg/europeanaqa/spark/saturation/SaturationWithHistogramForAllCollections.scala
|
Scala
|
gpl-3.0
| 10,523 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.events._
class TimesOnIntSpec extends FunSpec with SharedHelpers with TimesOnInt {
describe("The TimesOnInt trait") {
it("should allow people to repeat side effects a specified number of times") {
// Need to support this one, because someone may invoke times on an integer variable.
// Probably need to support 0 times as well, but should throw IAE if negative is passed.
var i = 0
0 times { i += 1 }
assert(i === 0)
1 times { i += 1 }
assert(i === 1)
2 times { i += 1 }
assert(i === 3)
3 times { i += 1 }
assert(i === 6)
4 times { i += 1 }
assert(i === 10)
90 times { i += 1 }
assert(i === 100)
}
it("should throw IllegalArgumentException if times is invoked on a negative integer") {
var i = 0
intercept[IllegalArgumentException] {
-1 times { i += 1 }
}
assert(i === 0)
}
}
}
|
hubertp/scalatest
|
src/test/scala/org/scalatest/TimesOnIntSpec.scala
|
Scala
|
apache-2.0
| 1,570 |
package me.jeffmay.neo4j.client.ws
import me.jeffmay.util.RunHooks
import play.api.libs.ws.ning.NingWSClient
import play.api.libs.ws.{WSClient, WSRequest}
object TestWSClient {
private lazy val client: NingWSClient = synchronized {
val started = NingWSClient()
RunHooks.addShutdownHook("TestNeo4jWSClient.close()") {
this.close()
}
started
}
object TestWS extends WSClient {
override def underlying[T]: T = client.asInstanceOf[T]
override def url(url: String): WSRequest = new DebugWSRequest(client.url(url), "TestNeo4jWSClient.TestWS")
override def close(): Unit = ()
}
def close(): Unit = {
println("Closing connection pool for TestNeo4jWSClient.TestWS")
client.close()
}
}
|
AudaxHealthInc/neo4j-scala-client
|
ws-test-util/src/main/scala/me/jeffmay/neo4j/client/ws/TestWSClient.scala
|
Scala
|
apache-2.0
| 736 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.jdbc
import java.sql.SQLException
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuilder
import org.apache.spark.internal.Logging
import org.apache.spark.sql.connector.catalog.{Identifier, NamespaceChange, SupportsNamespaces, Table, TableCatalog, TableChange}
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.errors.{QueryCompilationErrors, QueryExecutionErrors}
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcOptionsInWrite, JDBCRDD, JdbcUtils}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class JDBCTableCatalog extends TableCatalog with SupportsNamespaces with Logging {
private var catalogName: String = null
private var options: JDBCOptions = _
private var dialect: JdbcDialect = _
override def name(): String = {
require(catalogName != null, "The JDBC table catalog is not initialed")
catalogName
}
override def initialize(name: String, options: CaseInsensitiveStringMap): Unit = {
assert(catalogName == null, "The JDBC table catalog is already initialed")
catalogName = name
val map = options.asCaseSensitiveMap().asScala.toMap
// The `JDBCOptions` checks the existence of the table option. This is required by JDBC v1, but
// JDBC V2 only knows the table option when loading a table. Here we put a table option with a
// fake value, so that it can pass the check of `JDBCOptions`.
this.options = new JDBCOptions(map + (JDBCOptions.JDBC_TABLE_NAME -> "__invalid_dbtable"))
dialect = JdbcDialects.get(this.options.url)
}
override def listTables(namespace: Array[String]): Array[Identifier] = {
checkNamespace(namespace)
JdbcUtils.withConnection(options) { conn =>
val schemaPattern = if (namespace.length == 1) namespace.head else null
val rs = conn.getMetaData
.getTables(null, schemaPattern, "%", Array("TABLE"));
new Iterator[Identifier] {
def hasNext = rs.next()
def next() = Identifier.of(namespace, rs.getString("TABLE_NAME"))
}.toArray
}
}
override def tableExists(ident: Identifier): Boolean = {
checkNamespace(ident.namespace())
val writeOptions = new JdbcOptionsInWrite(
options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident)))
JdbcUtils.classifyException(s"Failed table existence check: $ident", dialect) {
JdbcUtils.withConnection(options)(JdbcUtils.tableExists(_, writeOptions))
}
}
override def dropTable(ident: Identifier): Boolean = {
checkNamespace(ident.namespace())
JdbcUtils.withConnection(options) { conn =>
try {
JdbcUtils.dropTable(conn, getTableName(ident), options)
true
} catch {
case _: SQLException => false
}
}
}
override def renameTable(oldIdent: Identifier, newIdent: Identifier): Unit = {
checkNamespace(oldIdent.namespace())
JdbcUtils.withConnection(options) { conn =>
JdbcUtils.classifyException(s"Failed table renaming from $oldIdent to $newIdent", dialect) {
JdbcUtils.renameTable(conn, getTableName(oldIdent), getTableName(newIdent), options)
}
}
}
override def loadTable(ident: Identifier): Table = {
checkNamespace(ident.namespace())
val optionsWithTableName = new JDBCOptions(
options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident)))
try {
val schema = JDBCRDD.resolveTable(optionsWithTableName)
JDBCTable(ident, schema, optionsWithTableName)
} catch {
case _: SQLException => throw QueryCompilationErrors.noSuchTableError(ident)
}
}
override def createTable(
ident: Identifier,
schema: StructType,
partitions: Array[Transform],
properties: java.util.Map[String, String]): Table = {
checkNamespace(ident.namespace())
if (partitions.nonEmpty) {
throw QueryExecutionErrors.cannotCreateJDBCTableWithPartitionsError()
}
var tableOptions = options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident))
var tableComment: String = ""
var tableProperties: String = ""
if (!properties.isEmpty) {
properties.asScala.foreach {
case (k, v) => k match {
case TableCatalog.PROP_COMMENT => tableComment = v
case TableCatalog.PROP_PROVIDER =>
throw QueryCompilationErrors.cannotCreateJDBCTableUsingProviderError()
case TableCatalog.PROP_OWNER => // owner is ignored. It is default to current user name.
case TableCatalog.PROP_LOCATION =>
throw QueryCompilationErrors.cannotCreateJDBCTableUsingLocationError()
case _ => tableProperties = tableProperties + " " + s"$k $v"
}
}
}
if (tableComment != "") {
tableOptions = tableOptions + (JDBCOptions.JDBC_TABLE_COMMENT -> tableComment)
}
if (tableProperties != "") {
// table property is set in JDBC_CREATE_TABLE_OPTIONS, which will be appended
// to CREATE TABLE statement.
// E.g., "CREATE TABLE t (name string) ENGINE InnoDB DEFAULT CHARACTER SET utf8"
// Spark doesn't check if these table properties are supported by databases. If
// table property is invalid, database will fail the table creation.
tableOptions = tableOptions + (JDBCOptions.JDBC_CREATE_TABLE_OPTIONS -> tableProperties)
}
val writeOptions = new JdbcOptionsInWrite(tableOptions)
val caseSensitive = SQLConf.get.caseSensitiveAnalysis
JdbcUtils.withConnection(options) { conn =>
JdbcUtils.classifyException(s"Failed table creation: $ident", dialect) {
JdbcUtils.createTable(conn, getTableName(ident), schema, caseSensitive, writeOptions)
}
}
JDBCTable(ident, schema, writeOptions)
}
override def alterTable(ident: Identifier, changes: TableChange*): Table = {
checkNamespace(ident.namespace())
JdbcUtils.withConnection(options) { conn =>
JdbcUtils.classifyException(s"Failed table altering: $ident", dialect) {
JdbcUtils.alterTable(conn, getTableName(ident), changes, options)
}
loadTable(ident)
}
}
override def namespaceExists(namespace: Array[String]): Boolean = namespace match {
case Array(db) =>
JdbcUtils.withConnection(options) { conn =>
val rs = conn.getMetaData.getSchemas(null, db)
while (rs.next()) {
if (rs.getString(1) == db) return true;
}
false
}
case _ => false
}
override def listNamespaces(): Array[Array[String]] = {
JdbcUtils.withConnection(options) { conn =>
val schemaBuilder = ArrayBuilder.make[Array[String]]
val rs = conn.getMetaData.getSchemas()
while (rs.next()) {
schemaBuilder += Array(rs.getString(1))
}
schemaBuilder.result
}
}
override def listNamespaces(namespace: Array[String]): Array[Array[String]] = {
namespace match {
case Array() =>
listNamespaces()
case Array(_) if namespaceExists(namespace) =>
Array()
case _ =>
throw QueryCompilationErrors.noSuchNamespaceError(namespace)
}
}
override def loadNamespaceMetadata(namespace: Array[String]): util.Map[String, String] = {
namespace match {
case Array(db) =>
if (!namespaceExists(namespace)) {
throw QueryCompilationErrors.noSuchNamespaceError(Array(db))
}
mutable.HashMap[String, String]().asJava
case _ =>
throw QueryCompilationErrors.noSuchNamespaceError(namespace)
}
}
override def createNamespace(
namespace: Array[String],
metadata: util.Map[String, String]): Unit = namespace match {
case Array(db) if !namespaceExists(namespace) =>
var comment = ""
if (!metadata.isEmpty) {
metadata.asScala.foreach {
case (k, v) => k match {
case SupportsNamespaces.PROP_COMMENT => comment = v
case SupportsNamespaces.PROP_OWNER => // ignore
case SupportsNamespaces.PROP_LOCATION =>
throw QueryCompilationErrors.cannotCreateJDBCNamespaceUsingProviderError()
case _ =>
throw QueryCompilationErrors.cannotCreateJDBCNamespaceWithPropertyError(k)
}
}
}
JdbcUtils.withConnection(options) { conn =>
JdbcUtils.classifyException(s"Failed create name space: $db", dialect) {
JdbcUtils.createNamespace(conn, options, db, comment)
}
}
case Array(_) =>
throw QueryCompilationErrors.namespaceAlreadyExistsError(namespace)
case _ =>
throw QueryExecutionErrors.invalidNamespaceNameError(namespace)
}
override def alterNamespace(namespace: Array[String], changes: NamespaceChange*): Unit = {
namespace match {
case Array(db) =>
changes.foreach {
case set: NamespaceChange.SetProperty =>
if (set.property() == SupportsNamespaces.PROP_COMMENT) {
JdbcUtils.withConnection(options) { conn =>
JdbcUtils.createNamespaceComment(conn, options, db, set.value)
}
} else {
throw QueryCompilationErrors.cannotSetJDBCNamespaceWithPropertyError(set.property)
}
case unset: NamespaceChange.RemoveProperty =>
if (unset.property() == SupportsNamespaces.PROP_COMMENT) {
JdbcUtils.withConnection(options) { conn =>
JdbcUtils.removeNamespaceComment(conn, options, db)
}
} else {
throw QueryCompilationErrors.cannotUnsetJDBCNamespaceWithPropertyError(unset.property)
}
case _ =>
throw QueryCompilationErrors.unsupportedJDBCNamespaceChangeInCatalogError(changes)
}
case _ =>
throw QueryCompilationErrors.noSuchNamespaceError(namespace)
}
}
override def dropNamespace(
namespace: Array[String],
cascade: Boolean): Boolean = namespace match {
case Array(db) if namespaceExists(namespace) =>
if (listTables(Array(db)).nonEmpty) {
throw QueryExecutionErrors.namespaceNotEmptyError(namespace)
}
JdbcUtils.withConnection(options) { conn =>
JdbcUtils.classifyException(s"Failed drop name space: $db", dialect) {
JdbcUtils.dropNamespace(conn, options, db)
true
}
}
case _ =>
throw QueryCompilationErrors.noSuchNamespaceError(namespace)
}
private def checkNamespace(namespace: Array[String]): Unit = {
// In JDBC there is no nested database/schema
if (namespace.length > 1) {
throw QueryCompilationErrors.noSuchNamespaceError(namespace)
}
}
private def getTableName(ident: Identifier): String = {
(ident.namespace() :+ ident.name()).map(dialect.quoteIdentifier).mkString(".")
}
}
|
WeichenXu123/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalog.scala
|
Scala
|
apache-2.0
| 11,856 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import com.intel.analytics.bigdl.utils.T
import org.scalatest.{FlatSpec, Matchers}
import com.intel.analytics.bigdl.tensor.Tensor
import scala.collection.mutable.ArrayBuffer
@com.intel.analytics.bigdl.tags.Parallel
class LBFGSSpec extends FlatSpec with Matchers {
"torchLBFGS in regular batch test" should "perform well on rosenbrock function" in {
val x = Tensor[Double](2).fill(0)
val optm = new LBFGS[Double]
val result = optm.optimize(TestUtils.rosenBrock, x,
T("maxIter" -> 100, "learningRate" -> 1e-1))
val fx = result._2
println()
println("Rosenbrock test")
println()
println(s"x = $x")
println("fx = ")
for (i <- 1 to fx.length) {
println(s"$i ${fx(i - 1)}")
}
println()
println()
fx.last < 1e-6 should be(true)
}
"torchLBFGS in stochastic test" should "perform well on rosenbrock function" in {
val x = Tensor[Double](2).fill(0)
val optm = new LBFGS[Double]
val fx = new ArrayBuffer[Double]()
val config = T("maxIter" -> 1, "learningRate" -> 1e-1)
for (i <- 1 to 100) {
val result = optm.optimize(TestUtils.rosenBrock, x, config)
fx.append(result._2(0))
}
println()
println("Rosenbrock test")
println()
println(s"x = $x")
println("fx = ")
for (i <- 1 to fx.length) {
println(s"$i ${fx(i - 1)}")
}
println()
println()
fx.last < 1e-6 should be(true)
}
}
|
zhichao-li/BigDL
|
dl/src/test/scala/com/intel/analytics/bigdl/optim/LBFGSSpec.scala
|
Scala
|
apache-2.0
| 2,263 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.dl
import caffe.Caffe.LayerParameter;
import caffe.Caffe.NetParameter;
import caffe.Caffe.SolverParameter;
import org.apache.sysml.parser.LanguageException;
import org.apache.sysml.runtime.DMLRuntimeException;
import org.apache.sysml.api.ml.ScriptsUtils
import org.apache.sysml.runtime.matrix.MatrixCharacteristics
import org.apache.sysml.runtime.matrix.data.MatrixBlock
import scala.collection.JavaConversions._
import java.util.ArrayList
import caffe.Caffe.Phase
import caffe.Caffe
import java.util.HashSet
import org.apache.sysml.api.DMLScript
import java.io.File
import org.apache.spark.SparkContext
import org.apache.spark.ml.{ Model, Estimator }
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.StructType
import org.apache.spark.ml.param.{ Params, Param, ParamMap, DoubleParam }
import org.apache.sysml.runtime.matrix.MatrixCharacteristics
import org.apache.sysml.runtime.matrix.data.MatrixBlock
import org.apache.sysml.runtime.DMLRuntimeException
import org.apache.sysml.runtime.instructions.spark.utils.{ RDDConverterUtilsExt => RDDConverterUtils }
import org.apache.sysml.api.mlcontext._
import org.apache.sysml.api.mlcontext.ScriptFactory._
import org.apache.sysml.api.ml._
import java.util.Random
import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory
import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer
/***************************************************************************************
DESIGN OF CAFFE2DML:
1. Caffe2DML is designed to fit well into the mllearn framework. Hence, the key methods that were to be implemented are:
- `getTrainingScript` for the Estimator class.
- `getPredictionScript` for the Model class.
These methods should be the starting point of any developer to understand the DML generated for training and prediction respectively.
2. To simplify the DML generation in getTrainingScript and getPredictionScript method, we use DMLGenerator interface.
This interface generates DML string for common operations such as loops (such as if, for, while) as well as built-in functions (read, write), etc.
Also, this interface helps in "code reading" of this class :)
3. Here is an analogy for SystemML developers to think of various moving components of Caffe2DML:
- Like Dml.g4 in the org.apache.sysml.parser.dml package, caffe.proto in the src/main/proto/caffe directory
is used to generate classes to parse the input files.
Dml.g4 ---> antlr ---> DmlLexer.java, DmlListener.java, DmlParser.java
caffe.proto ---> protoc ---> target/generated-sources/caffe/Caffe.java
- Just like the classes generated by Dml.g4 are used to parse input DML file,
the target/generated-sources/caffe/Caffe.java class is used to parse the input caffe network/deploy prototxt and solver files.
- You can think of .caffemodel file as DML file with matrix values encoded in it (please see below example).
So it is possible to read .caffemodel file with the Caffe.java class. This is done in Utils.scala's readCaffeNet method.
X = matrix("1.2 3.5 0.999 7.123", rows=2, cols=2)
...
- Just like we convert the AST generated by antlr into our DMLProgram representation, we convert
caffe's abstraction into the below given mapping classes for layer, solver and learning rate.
These mapping classes maps the corresponding Caffe abstraction to the SystemML-NN library.
This greatly simplifies adding new layers into Caffe2DML:
trait CaffeLayer {
// Any layer that wants to reuse SystemML-NN has to override following methods that help in generating the DML for the given layer:
def sourceFileName:String;
def init(dmlScript:StringBuilder):Unit;
def forward(dmlScript:StringBuilder, isPrediction:Boolean):Unit;
def backward(dmlScript:StringBuilder, outSuffix:String):Unit;
...
}
trait CaffeSolver {
def sourceFileName:String;
def update(dmlScript:StringBuilder, layer:CaffeLayer):Unit;
def init(dmlScript:StringBuilder, layer:CaffeLayer):Unit;
}
4. To simplify the traversal of the network, we created a Network interface:
trait Network {
def getLayers(): List[String]
def getCaffeLayer(layerName:String):CaffeLayer
def getBottomLayers(layerName:String): Set[String]
def getTopLayers(layerName:String): Set[String]
def getLayerID(layerName:String): Int
}
5. One of the key design restriction of Caffe2DML is that every layer is identified uniquely by its name.
This restriction simplifies the code significantly.
To shield from network files that violates this restriction, Caffe2DML performs rewrites in CaffeNetwork class (search for condition 1-5).
6. Caffe2DML also expects the layers to be in sorted order.
***************************************************************************************/
object Caffe2DML {
val LOG = LogFactory.getLog(classOf[Caffe2DML].getName())
// ------------------------------------------------------------------------
def layerDir = "nn/layers/"
def optimDir = "nn/optim/"
// Naming conventions:
val X = "X"; val y = "y"; val batchSize = "BATCH_SIZE"; val numImages = "num_images"; val numValidationImages = "num_validation"
val XVal = "X_val"; val yVal = "y_val"
val USE_NESTEROV_UDF = {
// Developer environment variable flag 'USE_NESTEROV_UDF' until codegen starts working.
// Then, we will remove this flag and also the class org.apache.sysml.udf.lib.SGDNesterovUpdate
val envFlagNesterovUDF = System.getenv("USE_NESTEROV_UDF")
envFlagNesterovUDF != null && envFlagNesterovUDF.toBoolean
}
}
class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,
val solver:CaffeSolver, val net:CaffeNetwork,
val lrPolicy:LearningRatePolicy, val numChannels:String, val height:String, val width:String) extends Estimator[Caffe2DMLModel]
with BaseSystemMLClassifier with DMLGenerator {
// --------------------------------------------------------------
// Invoked by Python, MLPipeline
def this(sc: SparkContext, solver1:Caffe.SolverParameter, networkPath:String, numChannels:String, height:String, width:String) {
this(sc, solver1, Utils.parseSolver(solver1),
new CaffeNetwork(networkPath, caffe.Caffe.Phase.TRAIN, numChannels, height, width),
new LearningRatePolicy(solver1), numChannels, height, width)
}
def this(sc: SparkContext, solver1:Caffe.SolverParameter, numChannels:String, height:String, width:String) {
this(sc, solver1, Utils.parseSolver(solver1), new CaffeNetwork(solver1.getNet, caffe.Caffe.Phase.TRAIN, numChannels, height, width),
new LearningRatePolicy(solver1), numChannels, height, width)
}
val uid:String = "caffe_classifier_" + (new Random).nextLong
override def copy(extra: org.apache.spark.ml.param.ParamMap): Estimator[Caffe2DMLModel] = {
val that = new Caffe2DML(sc, solverParam, solver, net, lrPolicy, numChannels, height, width)
copyValues(that, extra)
}
// Note: will update the y_mb as this will be called by Python mllearn
def fit(X_mb: MatrixBlock, y_mb: MatrixBlock): Caffe2DMLModel = {
mloutput = baseFit(X_mb, y_mb, sc)
new Caffe2DMLModel(this)
}
def fit(df: ScriptsUtils.SparkDataType): Caffe2DMLModel = {
mloutput = baseFit(df, sc)
new Caffe2DMLModel(this)
}
// --------------------------------------------------------------
// Returns true if last 2 of 4 dimensions are 1.
// The first dimension refers to number of input datapoints.
// The second dimension refers to number of classes.
def isClassification():Boolean = {
val outShape = getOutputShapeOfLastLayer
return outShape._2 == 1 && outShape._3 == 1
}
def getOutputShapeOfLastLayer():(Int, Int, Int) = {
val out = net.getCaffeLayer(net.getLayers().last).outputShape
(out._1.toInt, out._2.toInt, out._3.toInt)
}
// Used for simplifying transfer learning
private val layersToIgnore:HashSet[String] = new HashSet[String]()
def setWeightsToIgnore(layerName:String):Unit = layersToIgnore.add(layerName)
def setWeightsToIgnore(layerNames:ArrayList[String]):Unit = layersToIgnore.addAll(layerNames)
// Input parameters to prediction and scoring script
val inputs:java.util.HashMap[String, String] = new java.util.HashMap[String, String]()
def setInput(key: String, value:String):Unit = inputs.put(key, value)
customAssert(solverParam.getTestIterCount <= 1, "Multiple test_iter variables are not supported")
customAssert(solverParam.getMaxIter > 0, "Please set max_iter to a positive value")
customAssert(net.getLayers.filter(net.getCaffeLayer(_).isInstanceOf[IsLossLayer]).length == 1, "Expected exactly one loss layer")
// TODO: throw error or warning if user tries to set solver_mode == GPU instead of using setGPU method
// Method called by Python mllearn to visualize variable of certain layer
def visualizeLayer(layerName:String, varType:String, aggFn:String): Unit = visualizeLayer(net, layerName, varType, aggFn)
def getTrainAlgo():String = if(inputs.containsKey("$train_algo")) inputs.get("$train_algo") else "minibatch"
def getTestAlgo():String = if(inputs.containsKey("$test_algo")) inputs.get("$test_algo") else "minibatch"
def summary(sparkSession:org.apache.spark.sql.SparkSession):Unit = {
val header = Seq("Name", "Type", "Output", "Weight", "Bias", "Top", "Bottom")
val entries = net.getLayers.map(l => (l, net.getCaffeLayer(l))).map(l => {
val layer = l._2
(l._1, layer.param.getType,
"(, " + layer.outputShape._1 + ", " + layer.outputShape._2 + ", " + layer.outputShape._3 + ")",
if(layer.weightShape != null) "[" + layer.weightShape()(0) + " X " + layer.weightShape()(1) + "]" else "",
if(layer.biasShape != null) "[" + layer.biasShape()(0) + " X " + layer.biasShape()(1) + "]" else "",
layer.param.getTopList.mkString(","),
layer.param.getBottomList.mkString(",")
)
})
import sparkSession.implicits._
sc.parallelize(entries).toDF(header : _*).show(net.getLayers.size)
}
// ================================================================================================
// The below method parses the provided network and solver file and generates DML script.
def getTrainingScript(isSingleNode:Boolean):(Script, String, String) = {
val startTrainingTime = System.nanoTime()
reset // Reset the state of DML generator for training script.
// Flags passed by user
val DEBUG_TRAINING = if(inputs.containsKey("$debug")) inputs.get("$debug").toLowerCase.toBoolean else false
assign(tabDMLScript, "debug", if(DEBUG_TRAINING) "TRUE" else "FALSE")
appendHeaders(net, solver, true) // Appends DML corresponding to source and externalFunction statements.
readInputData(net, true) // Read X_full and y_full
// Initialize the layers and solvers. Reads weights and bias if $weights is set.
initWeights(net, solver, inputs.containsKey("$weights"), layersToIgnore)
// Split into training and validation set
// Initializes Caffe2DML.X, Caffe2DML.y, Caffe2DML.XVal, Caffe2DML.yVal and Caffe2DML.numImages
val shouldValidate = solverParam.getTestInterval > 0 && solverParam.getTestIterCount > 0 && solverParam.getTestIter(0) > 0
trainTestSplit(if(shouldValidate) solverParam.getTestIter(0) else 0)
// Set iteration-related variables such as max_epochs, num_iters_per_epoch, lr, etc.
setIterationVariables
val lossLayers = getLossLayers(net)
// ----------------------------------------------------------------------------
// Main logic
forBlock("e", "1", "max_epochs") {
getTrainAlgo.toLowerCase match {
case "minibatch" =>
forBlock("i", "1", "num_iters_per_epoch") {
getTrainingBatch(tabDMLScript)
tabDMLScript.append("iter = iter + 1\\n")
// -------------------------------------------------------
// Perform forward, backward and update on minibatch
forward; backward; update
// -------------------------------------------------------
displayLoss(lossLayers(0), shouldValidate)
performSnapshot
}
case "batch" => {
tabDMLScript.append("iter = iter + 1\\n")
// -------------------------------------------------------
// Perform forward, backward and update on entire dataset
forward; backward; update
// -------------------------------------------------------
displayLoss(lossLayers(0), shouldValidate)
performSnapshot
}
case "allreduce_parallel_batches" => {
// This setting uses the batch size provided by the user
if(!inputs.containsKey("$parallel_batches")) {
throw new RuntimeException("The parameter parallel_batches is required for allreduce_parallel_batches")
}
// The user specifies the number of parallel_batches
// This ensures that the user of generated script remembers to provide the commandline parameter $parallel_batches
assign(tabDMLScript, "parallel_batches", "$parallel_batches")
assign(tabDMLScript, "group_batch_size", "parallel_batches*" + Caffe2DML.batchSize)
assign(tabDMLScript, "groups", "as.integer(ceil(" + Caffe2DML.numImages + "/group_batch_size))")
// Grab groups of mini-batches
forBlock("g", "1", "groups") {
tabDMLScript.append("iter = iter + 1\\n")
// Get next group of mini-batches
assign(tabDMLScript, "group_beg", "((g-1) * group_batch_size) %% " + Caffe2DML.numImages + " + 1")
assign(tabDMLScript, "group_end", "min(" + Caffe2DML.numImages + ", group_beg + group_batch_size - 1)")
assign(tabDMLScript, "X_group_batch", Caffe2DML.X + "[group_beg:group_end,]")
assign(tabDMLScript, "y_group_batch", Caffe2DML.y + "[group_beg:group_end,]")
initializeGradients("parallel_batches")
assign(tabDMLScript, "X_group_batch_size", nrow("X_group_batch"))
parForBlock("j", "1", "parallel_batches") {
// Get a mini-batch in this group
assign(tabDMLScript, "beg", "((j-1) * " + Caffe2DML.batchSize + ") %% nrow(X_group_batch) + 1")
assign(tabDMLScript, "end", "min(nrow(X_group_batch), beg + " + Caffe2DML.batchSize + " - 1)")
assign(tabDMLScript, "Xb", "X_group_batch[beg:end,]")
assign(tabDMLScript, "yb", "y_group_batch[beg:end,]")
forward; backward
flattenGradients
}
aggregateAggGradients
update
// -------------------------------------------------------
assign(tabDMLScript, "Xb", "X_group_batch")
assign(tabDMLScript, "yb", "y_group_batch")
displayLoss(lossLayers(0), shouldValidate)
performSnapshot
}
}
case "allreduce" => {
// This is distributed synchronous gradient descent
forBlock("i", "1", "num_iters_per_epoch") {
tabDMLScript.append("iter = iter + 1\\n")
// -------------------------------------------------------
// Perform forward, backward and update on minibatch in parallel
assign(tabDMLScript, "beg", "((i-1) * " + Caffe2DML.batchSize + ") %% " + Caffe2DML.numImages + " + 1")
assign(tabDMLScript, "end", " min(beg + " + Caffe2DML.batchSize + " - 1, " + Caffe2DML.numImages + ")")
assign(tabDMLScript, "X_group_batch", Caffe2DML.X + "[beg:end,]")
assign(tabDMLScript, "y_group_batch", Caffe2DML.y + "[beg:end,]")
assign(tabDMLScript, "X_group_batch_size", nrow("X_group_batch"))
tabDMLScript.append("local_batch_size = nrow(y_group_batch)\\n")
val localBatchSize = "local_batch_size"
initializeGradients(localBatchSize)
parForBlock("j", "1", localBatchSize) {
assign(tabDMLScript, "Xb", "X_group_batch[j,]")
assign(tabDMLScript, "yb", "y_group_batch[j,]")
forward; backward
flattenGradients
}
aggregateAggGradients
update
// -------------------------------------------------------
assign(tabDMLScript, "Xb", "X_group_batch")
assign(tabDMLScript, "yb", "y_group_batch")
displayLoss(lossLayers(0), shouldValidate)
performSnapshot
}
}
case _ => throw new DMLRuntimeException("Unsupported train algo:" + getTrainAlgo)
}
// After every epoch, update the learning rate
tabDMLScript.append("# Learning rate\\n")
lrPolicy.updateLearningRate(tabDMLScript)
}
// ----------------------------------------------------------------------------
// Check if this is necessary
if(doVisualize) tabDMLScript.append("print(" + asDMLString("Visualization counter:") + " + viz_counter)")
val trainingScript = tabDMLScript.toString()
// Print script generation time and the DML script on stdout
System.out.println("Time taken to generate training script from Caffe proto: " + ((System.nanoTime() - startTrainingTime)*1e-9) + " seconds." )
if(DEBUG_TRAINING) Utils.prettyPrintDMLScript(trainingScript)
// Set input/output variables and execute the script
val script = dml(trainingScript).in(inputs)
net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => script.out(l.weight))
net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => script.out(l.bias))
(script, "X_full", "y_full")
}
// ================================================================================================
// -------------------------------------------------------------------------------------------
// Helper functions to generate DML
// Initializes Caffe2DML.X, Caffe2DML.y, Caffe2DML.XVal, Caffe2DML.yVal and Caffe2DML.numImages
private def trainTestSplit(numValidationBatches:Int):Unit = {
if(numValidationBatches > 0) {
if(solverParam.getDisplay <= 0)
throw new DMLRuntimeException("Since test_iter and test_interval is greater than zero, you should set display to be greater than zero")
tabDMLScript.append(Caffe2DML.numValidationImages).append(" = " + numValidationBatches + " * " + Caffe2DML.batchSize + "\\n")
tabDMLScript.append("# Sanity check to ensure that validation set is not too large\\n")
val maxValidationSize = "ceil(0.3 * " + Caffe2DML.numImages + ")"
ifBlock(Caffe2DML.numValidationImages + " > " + maxValidationSize) {
assign(tabDMLScript, "max_test_iter", "floor(" + maxValidationSize + " / " + Caffe2DML.batchSize + ")")
tabDMLScript.append("stop(" +
dmlConcat(asDMLString("Too large validation size. Please reduce test_iter to "), "max_test_iter")
+ ")\\n")
}
val one = "1"
val rl = int_add(Caffe2DML.numValidationImages, one)
rightIndexing(tabDMLScript.append(Caffe2DML.X).append(" = "), "X_full", rl, Caffe2DML.numImages, null, null)
tabDMLScript.append("; ")
rightIndexing(tabDMLScript.append(Caffe2DML.y).append(" = "), "y_full", rl, Caffe2DML.numImages, null, null)
tabDMLScript.append("; ")
rightIndexing(tabDMLScript.append(Caffe2DML.XVal).append(" = "), "X_full", one, Caffe2DML.numValidationImages, null, null)
tabDMLScript.append("; ")
rightIndexing(tabDMLScript.append(Caffe2DML.yVal).append(" = "), "y_full", one, Caffe2DML.numValidationImages, null, null)
tabDMLScript.append("; ")
tabDMLScript.append(Caffe2DML.numImages).append(" = nrow(y)\\n")
}
else {
assign(tabDMLScript, Caffe2DML.X, "X_full")
assign(tabDMLScript, Caffe2DML.y, "y_full")
tabDMLScript.append(Caffe2DML.numImages).append(" = nrow(" + Caffe2DML.y + ")\\n")
}
}
// Append the DML to display training and validation loss
private def displayLoss(lossLayer:IsLossLayer, shouldValidate:Boolean):Unit = {
if(solverParam.getDisplay > 0) {
// Append the DML to compute training loss
if(!getTrainAlgo.toLowerCase.startsWith("allreduce")) {
// Compute training loss for allreduce
tabDMLScript.append("# Compute training loss & accuracy\\n")
ifBlock("iter %% " + solverParam.getDisplay + " == 0") {
assign(tabDMLScript, "loss", "0"); assign(tabDMLScript, "accuracy", "0")
lossLayer.computeLoss(dmlScript, numTabs)
assign(tabDMLScript, "training_loss", "loss"); assign(tabDMLScript, "training_accuracy", "accuracy")
tabDMLScript.append(print( dmlConcat( asDMLString("Iter:"), "iter",
asDMLString(", training loss:"), "training_loss", asDMLString(", training accuracy:"), "training_accuracy" )))
appendTrainingVisualizationBody(dmlScript, numTabs)
printClassificationReport
}
}
else {
Caffe2DML.LOG.info("Training loss is not printed for train_algo=" + getTrainAlgo)
}
if(shouldValidate) {
if( getTrainAlgo.toLowerCase.startsWith("allreduce") &&
getTestAlgo.toLowerCase.startsWith("allreduce")) {
Caffe2DML.LOG.warn("The setting: train_algo=" + getTrainAlgo + " and test_algo=" + getTestAlgo + " is not recommended. Consider changing test_algo=minibatch")
}
// Append the DML to compute validation loss
val numValidationBatches = if(solverParam.getTestIterCount > 0) solverParam.getTestIter(0) else 0
tabDMLScript.append("# Compute validation loss & accuracy\\n")
ifBlock("iter %% " + solverParam.getTestInterval + " == 0") {
assign(tabDMLScript, "loss", "0"); assign(tabDMLScript, "accuracy", "0")
getTestAlgo.toLowerCase match {
case "minibatch" => {
assign(tabDMLScript, "validation_loss", "0")
assign(tabDMLScript, "validation_accuracy", "0")
forBlock("iVal", "1", "num_iters_per_epoch") {
getValidationBatch(tabDMLScript)
forward; lossLayer.computeLoss(dmlScript, numTabs)
tabDMLScript.append("validation_loss = validation_loss + loss\\n")
tabDMLScript.append("validation_accuracy = validation_accuracy + accuracy\\n")
}
tabDMLScript.append("validation_accuracy = validation_accuracy / num_iters_per_epoch\\n")
}
case "batch" => {
assign(tabDMLScript, "Xb", Caffe2DML.XVal); assign(tabDMLScript, "yb", Caffe2DML.yVal)
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))
lossLayer.computeLoss(dmlScript, numTabs)
assign(tabDMLScript, "validation_loss", "loss"); assign(tabDMLScript, "validation_accuracy", "accuracy")
}
case "allreduce_parallel_batches" => {
// This setting uses the batch size provided by the user
if(!inputs.containsKey("$parallel_batches")) {
throw new RuntimeException("The parameter parallel_batches is required for allreduce_parallel_batches")
}
// The user specifies the number of parallel_batches
// This ensures that the user of generated script remembers to provide the commandline parameter $parallel_batches
assign(tabDMLScript, "parallel_batches_val", "$parallel_batches")
assign(tabDMLScript, "group_batch_size_val", "parallel_batches_val*" + Caffe2DML.batchSize)
assign(tabDMLScript, "groups_val", "as.integer(ceil(" + Caffe2DML.numValidationImages + "/group_batch_size_val))")
assign(tabDMLScript, "validation_accuracy", "0")
assign(tabDMLScript, "validation_loss", "0")
// Grab groups of mini-batches
forBlock("g_val", "1", "groups_val") {
assign(tabDMLScript, "group_beg_val", "((g_val-1) * group_batch_size_val) %% " + Caffe2DML.numValidationImages + " + 1")
assign(tabDMLScript, "group_end_val", "min(" + Caffe2DML.numValidationImages + ", group_beg_val + group_batch_size_val - 1)")
assign(tabDMLScript, "X_group_batch_val", Caffe2DML.XVal + "[group_beg_val:group_end_val,]")
assign(tabDMLScript, "y_group_batch_val", Caffe2DML.yVal + "[group_beg_val:group_end_val,]")
assign(tabDMLScript, "group_validation_loss", matrix("0", "parallel_batches_val", "1"))
assign(tabDMLScript, "group_validation_accuracy", matrix("0", "parallel_batches_val", "1"))
// Run graph on each mini-batch in this group in parallel (ideally on multiple GPUs)
parForBlock("iVal", "1", "parallel_batches_val") {
assign(tabDMLScript, "beg_val", "((iVal-1) * " + Caffe2DML.batchSize + ") %% nrow(y_group_batch_val) + 1")
assign(tabDMLScript, "end_val", "min(nrow(y_group_batch_val), beg_val + " + Caffe2DML.batchSize + " - 1)")
assign(tabDMLScript, "Xb", "X_group_batch_val[beg_val:end_val,]")
assign(tabDMLScript, "yb", "y_group_batch_val[beg_val:end_val,]")
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))
lossLayer.computeLoss(dmlScript, numTabs)
assign(tabDMLScript, "group_validation_loss[iVal,1]", "loss")
assign(tabDMLScript, "group_validation_accuracy[iVal,1]", "accuracy")
}
assign(tabDMLScript, "validation_loss", "validation_loss + sum(group_validation_loss)")
assign(tabDMLScript, "validation_accuracy", "validation_accuracy + sum(group_validation_accuracy)")
}
assign(tabDMLScript, "validation_accuracy", "validation_accuracy/groups_val")
}
case "allreduce" => {
// This setting doesnot use the batch size for validation and allows the parfor optimizer to select plan
// by minimizing the memory requirement (i.e. batch size = 1)
assign(tabDMLScript, "group_validation_loss", matrix("0", Caffe2DML.numValidationImages, "1"))
assign(tabDMLScript, "group_validation_accuracy", matrix("0", Caffe2DML.numValidationImages, "1"))
parForBlock("iVal", "1", Caffe2DML.numValidationImages) {
assign(tabDMLScript, "Xb", Caffe2DML.XVal + "[iVal,]")
assign(tabDMLScript, "yb", Caffe2DML.yVal + "[iVal,]")
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))
lossLayer.computeLoss(dmlScript, numTabs)
assign(tabDMLScript, "group_validation_loss[iVal,1]", "loss")
assign(tabDMLScript, "group_validation_accuracy[iVal,1]", "accuracy")
}
assign(tabDMLScript, "validation_loss", "sum(group_validation_loss)")
assign(tabDMLScript, "validation_accuracy", "mean(group_validation_accuracy)")
}
case _ => throw new DMLRuntimeException("Unsupported test algo:" + getTestAlgo)
}
tabDMLScript.append(print( dmlConcat( asDMLString("Iter:"), "iter",
asDMLString(", validation loss:"), "validation_loss", asDMLString(", validation accuracy:"), "validation_accuracy" )))
appendValidationVisualizationBody(dmlScript, numTabs)
}
}
}
}
private def performSnapshot():Unit = {
if(solverParam.getSnapshot > 0) {
ifBlock("iter %% snapshot == 0") {
tabDMLScript.append("snapshot_dir= \\"" + solverParam.getSnapshotPrefix + "\\" + \\"/iter_\\" + iter + \\"/\\"\\n")
net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => tabDMLScript.append(write(l.weight, "snapshot_dir + \\"" + l.param.getName + "_weight.mtx\\"", "binary")))
net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => tabDMLScript.append(write(l.bias, "snapshot_dir + \\"" + l.param.getName + "_bias.mtx\\"", "binary")))
}
}
}
private def forward():Unit = {
tabDMLScript.append("# Perform forward pass\\n")
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))
}
private def backward():Unit = {
tabDMLScript.append("# Perform backward pass\\n")
net.getLayers.reverse.map(layer => net.getCaffeLayer(layer).backward(tabDMLScript, ""))
}
private def update():Unit = {
tabDMLScript.append("# Update the parameters\\n")
net.getLayers.map(layer => solver.update(tabDMLScript, net.getCaffeLayer(layer)))
}
private def initializeGradients(parallel_batches:String):Unit = {
tabDMLScript.append("# Data structure to store gradients computed in parallel\\n")
net.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {
if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight + "_agg", matrix("0", parallel_batches, multiply(nrow(l.weight), ncol(l.weight))))
if(l.shouldUpdateBias) assign(tabDMLScript, l.dBias + "_agg", matrix("0", parallel_batches, multiply(nrow(l.bias), ncol(l.bias))))
})
}
private def flattenGradients():Unit = {
tabDMLScript.append("# Flatten and store gradients for this parallel execution\\n")
// Note: We multiply by a weighting to allow for proper gradient averaging during the
// aggregation even with uneven batch sizes.
assign(tabDMLScript, "weighting", "nrow(Xb)/X_group_batch_size")
net.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {
if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight + "_agg[j,]",
matrix(l.dWeight, "1", multiply(nrow(l.weight), ncol(l.weight))) + " * weighting")
if(l.shouldUpdateWeight) assign(tabDMLScript, l.dBias + "_agg[j,]",
matrix(l.dBias, "1", multiply(nrow(l.bias), ncol(l.bias))) + " * weighting")
})
}
private def aggregateAggGradients():Unit = {
tabDMLScript.append("# Aggregate the gradients\\n")
net.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {
if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight,
matrix(colSums(l.dWeight + "_agg"), nrow(l.weight), ncol(l.weight)))
if(l.shouldUpdateWeight) assign(tabDMLScript, l.dBias,
matrix(colSums(l.dBias + "_agg"), nrow(l.bias), ncol(l.bias)))
})
}
// Set iteration-related variables such as max_epochs, num_iters_per_epoch, lr, etc.
def setIterationVariables():Unit = {
getTrainAlgo.toLowerCase match {
case "batch" =>
assign(tabDMLScript, "max_epochs", solverParam.getMaxIter.toString)
case _ => {
ceilDivide(tabDMLScript, "num_iters_per_epoch", Caffe2DML.numImages, Caffe2DML.batchSize)
ceilDivide(tabDMLScript, "max_epochs", solverParam.getMaxIter.toString, "num_iters_per_epoch")
}
}
assign(tabDMLScript, "iter", "0")
assign(tabDMLScript, "lr", solverParam.getBaseLr.toString)
}
// -------------------------------------------------------------------------------------------
}
class Caffe2DMLModel(val numClasses:String, val sc: SparkContext, val solver:CaffeSolver,
val net:CaffeNetwork, val lrPolicy:LearningRatePolicy,
val estimator:Caffe2DML)
extends Model[Caffe2DMLModel] with HasMaxOuterIter with BaseSystemMLClassifierModel with DMLGenerator {
// --------------------------------------------------------------
// Invoked by Python, MLPipeline
val uid:String = "caffe_model_" + (new Random).nextLong
def this(estimator:Caffe2DML) = {
this(Utils.numClasses(estimator.net), estimator.sc, estimator.solver,
estimator.net,
// new CaffeNetwork(estimator.solverParam.getNet, caffe.Caffe.Phase.TEST, estimator.numChannels, estimator.height, estimator.width),
estimator.lrPolicy, estimator)
}
override def copy(extra: org.apache.spark.ml.param.ParamMap): Caffe2DMLModel = {
val that = new Caffe2DMLModel(numClasses, sc, solver, net, lrPolicy, estimator)
copyValues(that, extra)
}
// --------------------------------------------------------------
def modelVariables():List[String] = {
net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(_.weight) ++
net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(_.bias)
}
// ================================================================================================
// The below method parses the provided network and solver file and generates DML script.
def getPredictionScript(isSingleNode:Boolean): (Script, String) = {
val startPredictionTime = System.nanoTime()
reset // Reset the state of DML generator for training script.
val DEBUG_PREDICTION = if(estimator.inputs.containsKey("$debug")) estimator.inputs.get("$debug").toLowerCase.toBoolean else false
assign(tabDMLScript, "debug", if(DEBUG_PREDICTION) "TRUE" else "FALSE")
appendHeaders(net, solver, false) // Appends DML corresponding to source and externalFunction statements.
readInputData(net, false) // Read X_full and y_full
assign(tabDMLScript, "X", "X_full")
// Initialize the layers and solvers. Reads weights and bias if readWeights is true.
if(!estimator.inputs.containsKey("$weights") && estimator.mloutput == null)
throw new DMLRuntimeException("Cannot call predict/score without calling either fit or by providing weights")
val readWeights = estimator.inputs.containsKey("$weights") || estimator.mloutput != null
initWeights(net, solver, readWeights)
// Donot update mean and variance in batchnorm
updateMeanVarianceForBatchNorm(net, false)
val lossLayers = getLossLayers(net)
val lastLayerShape = estimator.getOutputShapeOfLastLayer
assign(tabDMLScript, "Prob", matrix("1", Caffe2DML.numImages, (lastLayerShape._1*lastLayerShape._2*lastLayerShape._3).toString))
estimator.getTestAlgo.toLowerCase match {
case "minibatch" => {
ceilDivide(tabDMLScript(), "num_iters", Caffe2DML.numImages, Caffe2DML.batchSize)
forBlock("i", "1", "num_iters") {
getTestBatch(tabDMLScript)
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))
assign(tabDMLScript, "Prob[beg:end,]", lossLayers(0).out)
}
}
case "batch" => {
assign(tabDMLScript, "Xb", "X_full")
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))
assign(tabDMLScript, "Prob", lossLayers(0).out)
}
case "allreduce_parallel_batches" => {
// This setting uses the batch size provided by the user
if(!estimator.inputs.containsKey("$parallel_batches")) {
throw new RuntimeException("The parameter parallel_batches is required for allreduce_parallel_batches")
}
// The user specifies the number of parallel_batches
// This ensures that the user of generated script remembers to provide the commandline parameter $parallel_batches
assign(tabDMLScript, "parallel_batches", "$parallel_batches")
assign(tabDMLScript, "group_batch_size", "parallel_batches*" + Caffe2DML.batchSize)
assign(tabDMLScript, "groups", "as.integer(ceil(" + Caffe2DML.numImages + "/group_batch_size))")
// Grab groups of mini-batches
forBlock("g", "1", "groups") {
assign(tabDMLScript, "group_beg", "((g-1) * group_batch_size) %% " + Caffe2DML.numImages + " + 1")
assign(tabDMLScript, "group_end", "min(" + Caffe2DML.numImages + ", group_beg + group_batch_size - 1)")
assign(tabDMLScript, "X_group_batch", "X_full[group_beg:group_end,]")
// Run graph on each mini-batch in this group in parallel (ideally on multiple GPUs)
parForBlock("j", "1", "parallel_batches") {
assign(tabDMLScript, "beg", "((j-1) * " + Caffe2DML.batchSize + ") %% nrow(X_group_batch) + 1")
assign(tabDMLScript, "end", "min(nrow(X_group_batch), beg + " + Caffe2DML.batchSize + " - 1)")
assign(tabDMLScript, "Xb", "X_group_batch[beg:end,]")
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))
assign(tabDMLScript, "Prob[beg:end,]", lossLayers(0).out)
}
}
}
case "allreduce" => {
// This setting doesnot use the batch size for scoring and allows the parfor optimizer to select the best plan
// by minimizing the memory requirement (i.e. batch size = 1)
parForBlock("i", "1", Caffe2DML.numImages) {
assign(tabDMLScript, "Xb", "X_full[i,]")
net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))
assign(tabDMLScript, "Prob[i,]", lossLayers(0).out)
}
}
case _ => throw new DMLRuntimeException("Unsupported test algo:" + estimator.getTestAlgo)
}
if(estimator.inputs.containsKey("$output_activations")) {
if(estimator.getTestAlgo.toLowerCase.equals("batch")) {
net.getLayers.map(layer =>
tabDMLScript.append(write(net.getCaffeLayer(layer).out,
estimator.inputs.get("$output_activations") + "/" + net.getCaffeLayer(layer).param.getName + "_activations.mtx", "csv") + "\\n")
)
}
else {
throw new DMLRuntimeException("Incorrect usage of output_activations. It should be only used in batch mode.")
}
}
val predictionScript = dmlScript.toString()
System.out.println("Time taken to generate prediction script from Caffe proto:" + ((System.nanoTime() - startPredictionTime)*1e-9) + "secs." )
if(DEBUG_PREDICTION) Utils.prettyPrintDMLScript(predictionScript)
// Reset state of BatchNorm layer
updateMeanVarianceForBatchNorm(net, true)
val script = dml(predictionScript).out("Prob").in(estimator.inputs)
if(estimator.mloutput != null) {
// fit was called
net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => script.in(l.weight, estimator.mloutput.getMatrix(l.weight)))
net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => script.in(l.bias, estimator.mloutput.getMatrix(l.bias)))
}
(script, "X_full")
}
// ================================================================================================
def baseEstimator():BaseSystemMLEstimator = estimator
// Prediction
def transform(X: MatrixBlock): MatrixBlock = {
if(estimator.isClassification) {
Caffe2DML.LOG.debug("Prediction assuming classification")
baseTransform(X, sc, "Prob")
}
else {
Caffe2DML.LOG.debug("Prediction assuming segmentation")
val outShape = estimator.getOutputShapeOfLastLayer
baseTransform(X, sc, "Prob", outShape._1.toInt, outShape._2.toInt, outShape._3.toInt)
}
}
def transform_probability(X: MatrixBlock): MatrixBlock = {
if(estimator.isClassification) {
Caffe2DML.LOG.debug("Prediction of probability assuming classification")
baseTransformProbability(X, sc, "Prob")
}
else {
Caffe2DML.LOG.debug("Prediction of probability assuming segmentation")
val outShape = estimator.getOutputShapeOfLastLayer
baseTransformProbability(X, sc, "Prob", outShape._1.toInt, outShape._2.toInt, outShape._3.toInt)
}
}
def transform(df: ScriptsUtils.SparkDataType): DataFrame = {
if(estimator.isClassification) {
Caffe2DML.LOG.debug("Prediction assuming classification")
baseTransform(df, sc, "Prob", true)
}
else {
Caffe2DML.LOG.debug("Prediction assuming segmentation")
val outShape = estimator.getOutputShapeOfLastLayer
baseTransform(df, sc, "Prob", true, outShape._1.toInt, outShape._2.toInt, outShape._3.toInt)
}
}
}
|
asurve/arvind-sysml2
|
src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
|
Scala
|
apache-2.0
| 40,652 |
package monocle.law.discipline
import monocle.Traversal
import monocle.law.TraversalLaws
import org.scalacheck.Prop._
import org.scalacheck.{Arbitrary, Prop}
import org.typelevel.discipline.Laws
import scalaz.Equal
import scalaz.std.list._
import scalaz.std.option._
object TraversalTests extends Laws {
def apply[S: Arbitrary : Equal, A: Arbitrary : Equal](traversal: Traversal[S, A])(implicit arbAA: Arbitrary[A => A]): RuleSet =
apply[S, A, Unit](_ => traversal)
def apply[S: Arbitrary : Equal, A: Arbitrary : Equal, I: Arbitrary](f: I => Traversal[S, A])(implicit arbAA: Arbitrary[A => A]): RuleSet = {
def laws(i: I): TraversalLaws[S, A] = new TraversalLaws(f(i))
new SimpleRuleSet("Traversal",
"headOption" -> forAll( (s: S, i: I) => laws(i).headOption(s)),
"get what you set" -> forAll( (s: S, f: A => A, i: I) => laws(i).modifyGetAll(s, f)),
"set idempotent" -> forAll( (s: S, a: A, i: I) => laws(i).setIdempotent(s, a)),
"modify id = id" -> forAll( (s: S, i: I) => laws(i).modifyIdentity(s)),
"compose modify" -> forAll( (s: S, f: A => A, g: A => A, i: I) => laws(i).composeModify(s, f, g))
)
}
}
|
rperry/Monocle
|
law/shared/src/main/scala/monocle/law/discipline/TraversalTests.scala
|
Scala
|
mit
| 1,180 |
package com.lot.order.service
import akka.actor.Actor
import akka.actor.ActorLogging
import com.lot.exchange.Message.NewOrder
import com.lot.exchange.Message.ModifyOrder
import com.lot.exchange.Message.CancelOrder
import com.lot.order.model.Order
import scala.concurrent.duration._
import akka.util.Timeout
import scala.concurrent.Await
import akka.actor.ActorRef
import com.lot.security.model.PriceMessage
import com.lot.security.model.Price
import akka.pattern.ask
import scala.concurrent.ExecutionContext.Implicits.global
import com.lot.user.dao.UserDao
import com.lot.user.service.UserManagerMessages.BlockAmount
import com.lot.utils.GenericMessages.Success
import com.lot.utils.GenericMessages.Failure
import com.lot.exchange.Exchange
import org.joda.time.DateTime
import com.lot.order.dao.OrderDao
import akka.actor.ActorSystem
import com.lot.utils.ConfigurationModuleImpl
import akka.routing.FromConfig
import akka.actor.Props
import com.lot.user.service.UserManager
import com.lot.trade.service.SecurityManager
/**
* This is where all the checks are made before the order is sent to the exchange
* 1. User has sufficient account_balance
*
*/
class OrderPreCheck(securityManager: ActorRef, userManager: ActorRef) extends Actor with ActorLogging {
def receive = {
case NewOrder(order, at) => { handleNewOrder(order) }
case CancelOrder(order, at) => { handleNewOrder(order) }
case _ => {}
}
/**
* Ensure that the amount required for the order is blocked in the user account
*/
def handleNewOrder(order: Order) = {
/*
* Load the price of the security from the securityManager
*/
implicit val timeout = Timeout(15 second)
/*
* Ask for the price
*/
val futurePrice = securityManager ? PriceMessage.Get(Price(order.security_id, 0.0))
futurePrice.map { priceMsg =>
/*
* We get a PriceMessage.Value which has a Price in it
*/
val price = priceMsg.asInstanceOf[PriceMessage.Value].price
val amount = price.price * order.quantity
val result = userManager ? BlockAmount(order.user_id, order.id.get, amount)
result.map { reply =>
log.debug(s"UserManager responded with $reply")
reply match {
case Success => {
/*
* Send it to the exchange for execution
*/
Exchange.exchanges.get(order.exchange).map { exchange =>
exchange ! NewOrder(order, new DateTime())
}
OrderDao.updatePreTradeStatus(order.copy(pre_trade_check_status="BlockAmountSuccess")).map { count =>
log.debug(s"Order updated $count")
}
}
case Failure => {
/*
* Mark order as BlockAmount failed
*/
OrderDao.updatePreTradeStatus(order.copy(pre_trade_check_status="BlockAmountFailed")).map { count =>
log.debug(s"Order updated $count")
}
}
}
}
}
}
}
/**
* Factory for creating OrderPreCheck actors with references to the securityManager and userManager
*/
object OrderPreCheck extends ConfigurationModuleImpl {
val system = ActorSystem("lot-om", config)
/*
* The actor that handles price update and broadcasting of prices
*/
val securityManager = SecurityManager.securityManager
/*
* Actor that handles blocking of the amounts to be charged
*/
val userManager = UserManager.userManager
val orderPreCheckRouter = system.actorOf(Props(classOf[OrderPreCheck], securityManager, userManager), "orderPreCheckRouter")
/**
* Factory method
*/
def apply() = {
orderPreCheckRouter
}
}
|
thimmaiah/life_of_a_trade_scala
|
src/main/scala/com/lot/order/service/OrderPreCheck.scala
|
Scala
|
apache-2.0
| 3,700 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.cloudml.zen.ml.linalg
import com.github.cloudml.zen.ml.util.Logging
import com.github.fommil.netlib.{F2jBLAS, BLAS => NetlibBLAS}
import com.github.fommil.netlib.BLAS.{getInstance => NativeBLAS}
import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector}
/**
* Copy form MLlib
*/
private[ml] object BLAS extends Serializable with Logging {
@transient private var _f2jBLAS: NetlibBLAS = _
@transient private var _nativeBLAS: NetlibBLAS = _
// For level-1 routines, we use Java implementation.
private def f2jBLAS: NetlibBLAS = {
if (_f2jBLAS == null) {
_f2jBLAS = new F2jBLAS
}
_f2jBLAS
}
/**
* y += a * x
*/
def axpy(a: Double, x: Vector, y: Vector): Unit = {
require(x.size == y.size)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
axpy(a, sx, dy)
case dx: DenseVector =>
axpy(a, dx, dy)
case _ =>
throw new UnsupportedOperationException(
s"axpy doesn't support x type ${x.getClass}.")
}
case _ =>
throw new IllegalArgumentException(
s"axpy only supports adding to a dense vector but got type ${y.getClass}.")
}
}
/**
* y += a * x
*/
private def axpy(a: Double, x: DenseVector, y: DenseVector): Unit = {
val n = x.size
f2jBLAS.daxpy(n, a, x.values, 1, y.values, 1)
}
/**
* y += a * x
*/
private def axpy(a: Double, x: SparseVector, y: DenseVector): Unit = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.size
if (a == 1.0) {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += xValues(k)
k += 1
}
} else {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += a * xValues(k)
k += 1
}
}
}
/**
* dot(x, y)
*/
def dot(x: Vector, y: Vector): Double = {
require(x.size == y.size,
"BLAS.dot(x: Vector, y:Vector) was given Vectors with non-matching sizes:" +
" x.size = " + x.size + ", y.size = " + y.size)
(x, y) match {
case (dx: DenseVector, dy: DenseVector) =>
dot(dx, dy)
case (sx: SparseVector, dy: DenseVector) =>
dot(sx, dy)
case (dx: DenseVector, sy: SparseVector) =>
dot(sy, dx)
case (sx: SparseVector, sy: SparseVector) =>
dot(sx, sy)
case _ =>
throw new IllegalArgumentException(s"dot doesn't support (${x.getClass}, ${y.getClass}).")
}
}
/**
* dot(x, y)
*/
private def dot(x: DenseVector, y: DenseVector): Double = {
val n = x.size
f2jBLAS.ddot(n, x.values, 1, y.values, 1)
}
/**
* dot(x, y)
*/
private def dot(x: SparseVector, y: DenseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.size
var sum = 0.0
var k = 0
while (k < nnz) {
sum += xValues(k) * yValues(xIndices(k))
k += 1
}
sum
}
/**
* dot(x, y)
*/
private def dot(x: SparseVector, y: SparseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val yIndices = y.indices
val nnzx = xIndices.size
val nnzy = yIndices.size
var kx = 0
var ky = 0
var sum = 0.0
// y catching x
while (kx < nnzx && ky < nnzy) {
val ix = xIndices(kx)
while (ky < nnzy && yIndices(ky) < ix) {
ky += 1
}
if (ky < nnzy && yIndices(ky) == ix) {
sum += xValues(kx) * yValues(ky)
ky += 1
}
kx += 1
}
sum
}
/**
* y = x
*/
def copy(x: Vector, y: Vector): Unit = {
val n = y.size
require(x.size == n)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
val sxIndices = sx.indices
val sxValues = sx.values
val dyValues = dy.values
val nnz = sxIndices.size
var i = 0
var k = 0
while (k < nnz) {
val j = sxIndices(k)
while (i < j) {
dyValues(i) = 0.0
i += 1
}
dyValues(i) = sxValues(k)
i += 1
k += 1
}
while (i < n) {
dyValues(i) = 0.0
i += 1
}
case dx: DenseVector =>
Array.copy(dx.values, 0, dy.values, 0, n)
}
case _ =>
throw new IllegalArgumentException(s"y must be dense in copy but got ${y.getClass}")
}
}
/**
* x = a * x
*/
def scal(a: Double, x: Vector): Unit = {
x match {
case sx: SparseVector =>
f2jBLAS.dscal(sx.values.size, a, sx.values, 1)
case dx: DenseVector =>
f2jBLAS.dscal(dx.values.size, a, dx.values, 1)
case _ =>
throw new IllegalArgumentException(s"scal doesn't support vector type ${x.getClass}.")
}
}
// For level-3 routines, we use the native BLAS.
private def nativeBLAS: NetlibBLAS = {
if (_nativeBLAS == null) {
_nativeBLAS = NativeBLAS
}
_nativeBLAS
}
}
|
witgo/zen
|
ml/src/main/scala/com/github/cloudml/zen/ml/linalg/BLAS.scala
|
Scala
|
apache-2.0
| 6,010 |
package controllers
import contexts.{CreateInventoryOrderContext, UpdateInventoryOrderContext}
import jsons.InventoryOrderJson
import models.InventoryOrder
import org.joda.time.DateTime
import play.api.data.Form
import play.api.data.Forms._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
object InventoryOrdersController extends KiwiERPController {
def list = AuthorizedAction.async { implicit req =>
req.getQueryString("partsId") filter isId map { partsIdStr =>
Page(InventoryOrder.findAllByPartsId(partsIdStr.toLong))
} getOrElse {
Page(InventoryOrder.findAll)
} map { results =>
val (inventoryOrders, page) = results
Ok(InventoryOrderJson.index(inventoryOrders, page))
}
}
def create = AuthorizedAction.async(parse.urlFormEncoded) { implicit req =>
case class CreateForm(partsId: Long, supplierId: Long, quantity: Int, orderedDate: DateTime)
val form = Form(
mapping(
"partsId" -> longNumber(min = 1, max = MAX_LONG_NUMBER),
"supplierId" -> longNumber(min = 1, max = MAX_LONG_NUMBER),
"quantity" -> number(min = 1, max = MAX_NUMBER),
"orderedDate" -> jodaDate(pattern = DATETIME_PATTERN)
)(CreateForm.apply)(CreateForm.unapply))
form.bindFromRequestAndCheckErrors { f =>
CreateInventoryOrderContext(
f.partsId,
f.supplierId,
f.quantity,
f.orderedDate
) map { inventoryOrder =>
CreatedWithLocation(InventoryOrderJson.create(inventoryOrder))
}
}
}
def read(id: Long) = AuthorizedAction.async {
InventoryOrder.find(id) map { inventoryOrder =>
Ok(InventoryOrderJson.read(inventoryOrder))
}
}
def update(id: Long) = AuthorizedAction.async(parse.urlFormEncoded) { implicit req =>
case class UpdateForm(status: String, statusChangedDate: DateTime)
val form = Form(
mapping(
"status" -> nonEmptyText(minLength = 1, maxLength = 10),
"statusChangedDate" -> jodaDate(pattern = DATETIME_PATTERN)
)(UpdateForm.apply)(UpdateForm.unapply))
form.bindFromRequestAndCheckErrors { f =>
UpdateInventoryOrderContext(id, f.status, f.statusChangedDate) map (_ => NoContent)
}
}
def delete(id: Long) = AuthorizedAction.async {
InventoryOrder.destroy(id) map (_ => NoContent)
}
}
|
KIWIKIGMBH/kiwierp
|
kiwierp-backend/app/controllers/InventoryOrdersController.scala
|
Scala
|
mpl-2.0
| 2,337 |
package ch.uzh.ifi.pdeboer.pplib.process.stdlib
import ch.uzh.ifi.pdeboer.pplib.patterns.{FixPatchExecuter, FixVerifyFPDriver}
import ch.uzh.ifi.pdeboer.pplib.process.entities._
/**
* Created by pdeboer on 14/12/14.
*/
@PPLibProcess
class FixPatchProcess(params: Map[String, Any] = Map.empty) extends ProcessStub[List[Patch], List[Patch]](params) {
import ch.uzh.ifi.pdeboer.pplib.process.stdlib.FixPatchProcess._
override protected def run(dataToFix: List[Patch]): List[Patch] = {
val memoizer: ProcessMemoizer = getProcessMemoizer(dataToFix.hashCode() + "").getOrElse(new NoProcessMemoizer())
val allData = ALL_DATA.get ::: dataToFix.filter(d => !ALL_DATA.get.contains(d))
val indicesToFix: List[Int] = allData.zipWithIndex.filter(d => dataToFix.contains(d._1)).map(_._2)
val fixerProcess = FIXER_PROCESS.get
val targetParamToPassAllData = TARGET_PARAMETER_TO_PASS_ALL_DATA.get
if (targetParamToPassAllData.isDefined) {
fixerProcess.params += targetParamToPassAllData.get.key -> ALL_DATA.get
}
val driver = new FixVerifyFPDriver(fixerProcess, FIXER_BEFORE_AFTER_HANDLER.get)
val exec = new FixPatchExecuter(driver, allData, indicesToFix, PATCHES_TO_INCLUDE_BEFORE_AND_AFTER_MAIN.get, memoizer)
exec.allFixedPatches.map(_._2)
}
override def optionalParameters: List[ProcessParameter[_]] = List(ALL_DATA, TARGET_PARAMETER_TO_PASS_ALL_DATA,
FIXER_BEFORE_AFTER_HANDLER, PATCHES_TO_INCLUDE_BEFORE_AND_AFTER_MAIN)
override def expectedParametersBeforeRun: List[ProcessParameter[_]] = List(FIXER_PROCESS)
override def getCostCeiling(data: List[Patch]): Int = FIXER_PROCESS.get.create().getCostCeiling(data.head)
}
object FixPatchProcess {
val ALL_DATA = new ProcessParameter[List[Patch]]("allData", Some(List(Nil)))
val TARGET_PARAMETER_TO_PASS_ALL_DATA = new ProcessParameter[Option[ProcessParameter[List[Patch]]]]("targetParamToPassPatchesAllData", Some(List(Some(FixPatchProcess.ALL_DATA))))
val PATCHES_TO_INCLUDE_BEFORE_AND_AFTER_MAIN = new ProcessParameter[(Int, Int)]("patchesToIncludeBeforeAndAfterMain", Some(List((1, 1))))
val FIXER_PROCESS = new ProcessParameter[PassableProcessParam[CreateProcess[Patch, Patch]]]("fixerProcess", None)
val FIXER_BEFORE_AFTER_HANDLER = new ProcessParameter[FixVerifyFPDriver.FVFPDBeforeAfterHandler]("beforeAfterHandler", Some(List(FixVerifyFPDriver.DEFAULT_BEFORE_AFTER_HANDLER)))
}
|
uzh/PPLib
|
src/main/scala/ch/uzh/ifi/pdeboer/pplib/process/stdlib/FixPatchProcess.scala
|
Scala
|
mit
| 2,372 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import sbt.io.Path
object CommandStrings {
/** The prefix used to identify a request to execute the remaining input on source changes.*/
val AboutCommand = "about"
val TasksCommand = "tasks"
val SettingsCommand = "settings"
val ProjectCommand = "project"
val ProjectsCommand = "projects"
val ShowCommand = "show"
val MultiTaskCommand = "all"
val BootCommand = "boot"
val EvalCommand = "eval"
val evalBrief = (EvalCommand + " <expression>", "Evaluates a Scala expression and prints the result and type.")
val evalDetailed =
EvalCommand + """ <expression>
Evaluates the given Scala expression and prints the result and type."""
@deprecated("Misnomer: was only for `show`. Use showBrief.", "0.13.2")
def actBrief = showBrief
@deprecated("Misnomer: was only for `show`. Use showDetailed.", "0.13.2")
def actDetailed = showDetailed
def actHelp = showHelp ++ multiTaskHelp
def multiTaskHelp = Help(MultiTaskCommand, (multiTaskSyntax, multiTaskBrief), multiTaskDetailed)
def multiTaskDetailed =
s"""$multiTaskSyntax
$multiTaskBrief"""
def multiTaskSyntax = s"""$MultiTaskCommand <task>+"""
def multiTaskBrief = """Executes all of the specified tasks concurrently."""
def showHelp = Help(ShowCommand, (s"$ShowCommand <key>", showBrief), showDetailed)
def showBrief = "Displays the result of evaluating the setting or task associated with 'key'."
def showDetailed =
s"""$ShowCommand <setting>
Displays the value of the specified setting.
$ShowCommand <task>
Evaluates the specified task and display the value returned by the task."""
val PluginsCommand = "plugins"
val PluginCommand = "plugin"
def pluginsBrief = "Lists currently available plugins."
def pluginsDetailed = pluginsBrief // TODO: expand
val LastCommand = "last"
val LastGrepCommand = "last-grep"
val ExportCommand = "export"
val ExportStream = "export"
val lastGrepBrief = (LastGrepCommand, "Shows lines from the last output for 'key' that match 'pattern'.")
val lastGrepDetailed =
LastGrepCommand + """ <pattern>
Displays lines from the logging of previous commands that match `pattern`.
""" + LastGrepCommand + """ <pattern> [key]
Displays lines from logging associated with `key` that match `pattern`. The key typically refers to a task (for example, test:compile). The logging that is displayed is restricted to the logging for that particular task.
<pattern> is a regular expression interpreted by java.util.Pattern. Matching text is highlighted (when highlighting is supported and enabled).
See also '""" + LastCommand + "'."
val lastBrief = (LastCommand, "Displays output from a previous command or the output from a specific task.")
val lastDetailed =
LastCommand + """
Prints the logging for the previous command, typically at a more verbose level.
""" + LastCommand + """ <key>
Prints the logging associated with the provided key. The key typically refers to a task (for example, test:compile). The logging that is displayed is restricted to the logging for that particular task.
See also '""" + LastGrepCommand + "'."
val exportBrief = (ExportCommand + " <tasks>+", "Executes tasks and displays the equivalent command lines.")
val exportDetailed =
s"""$ExportCommand [--last] <task>+
Runs the specified tasks and prints the equivalent command lines or other exportable information for those runs.
--last
Uses information from the previous execution
NOTES: These command lines are necessarily approximate. Usually tasks do not actually
execute the command line and the actual command line program may not be installed or
on the PATH. Incremental tasks will typically show the command line for an
incremental run and not for a full run. Many tasks have no direct command line
equivalent and will show nothing at all.
"""
val InspectCommand = "inspect"
val inspectBrief = (InspectCommand + " [uses|tree|definitions] <key>", "Prints the value for 'key', the defining scope, delegates, related definitions, and dependencies.")
val inspectDetailed = s"""
|$InspectCommand <key>
|
| For a plain setting, the value bound to the key argument is displayed using its toString method.
| Otherwise, the type of task ("Task" or "Input task") is displayed.
|
| "Dependencies" shows the settings that this setting depends on.
|
| "Reverse dependencies" shows the settings that depend on this setting.
|
| When a key is resolved to a value, it may not actually be defined in the requested scope.
| In this case, there is a defined search sequence.
| "Delegates" shows the scopes that are searched for the key.
| "Provided by" shows the scope that contained the value returned for the key.
|
| "Related" shows all of the scopes in which the key is defined.
|
|$InspectCommand tree <key>
|
| Displays `key` and its dependencies in a tree structure.
| For settings, the value bound to the setting is displayed and for tasks, the type of the task is shown.
|
|$InspectCommand uses <key>
|
| Displays the settings and tasks that directly depend on `key`.
|
|$InspectCommand definitions <key>
|
| Displays the scopes in which `key` is defined.
|
|$InspectCommand actual <key>
|
| Displays the actual dependencies used by `key`.
| This is useful because delegation means that a dependency can come from a scope other than the requested one.
| Using `inspect actual` will show exactly which scope is providing a value for a setting.
""".stripMargin.trim
val SetCommand = "set"
val setBrief = (s"$SetCommand [every] <setting>", "Evaluates a Setting and applies it to the current project.")
val setDetailed =
SetCommand + """ [every] <setting-expression>
Applies the given setting to the current project:
1) Constructs the expression provided as an argument by compiling and loading it.
2) Appends the new setting to the current project's settings.
3) Re-evaluates the build's settings.
This command does not rebuild the build definitions, plugins, or configurations.
It does not automatically persist the setting(s) either.
To persist the setting(s), run 'session save' or 'session save-all'.
If 'every' is specified, the setting is evaluated in the current context
and the resulting value is used in every scope. This overrides the value
bound to the key everywhere.
"""
def SessionCommand = "session"
def sessionBrief = (SessionCommand, "Manipulates session settings. For details, run 'help " + SessionCommand + "'.")
def settingsPreamble = commonPreamble("settings")
def tasksPreamble = commonPreamble("tasks") + """
Tasks produce values. Use the 'show' command to run the task and print the resulting value."""
def commonPreamble(label: String) = """
This is a list of %s defined for the current project.
It does not list the scopes the %<s are defined in; use the 'inspect' command for that.""".format(label)
def settingsBrief(label: String) = (label, "Lists the " + label + " defined for the current project.")
def settingsDetailed(label: String) =
"""
Syntax summary
%s [-(v|-vv|...|-V)] [<filter>]
%<s
Displays the main %<s defined directly or indirectly for the current project.
-v
Displays additional %<s. More 'v's increase the number of %<s displayed.
-V
displays all %<s
<filter>
Restricts the %<s that are displayed. The names of %<s are searched for an exact match against the filter, in which case only the description of the exact match is displayed. Otherwise, the filter is interpreted as a regular expression and all %<s whose name or description match the regular expression are displayed. Note that this is an additional filter on top of the %<s selected by the -v style switches, so you must specify -V to search all %<s. Use the %s command to search all commands, tasks, and settings at once.
""".format(label, BasicCommandStrings.HelpCommand)
def moreAvailableMessage(label: String, search: Boolean) =
"More %s may be %s by increasing verbosity. See '%s %s'.\\n".format(label, if (search) "searched" else "viewed", BasicCommandStrings.HelpCommand, label)
def aboutBrief = "Displays basic information about sbt and the build."
def aboutDetailed = aboutBrief
def projectBrief = (ProjectCommand, "Displays the current project or changes to the provided `project`.")
def projectDetailed =
ProjectCommand +
"""
Displays the name of the current project.
""" + ProjectCommand + """ name
Changes to the project with the provided name.
This command fails if there is no project with the given name.
""" + ProjectCommand + """ {uri}
Changes to the root project in the build defined by `uri`.
`uri` must have already been declared as part of the build, such as with Project.dependsOn.
""" + ProjectCommand + """ {uri}name
Changes to the project `name` in the build defined by `uri`.
`uri` must have already been declared as part of the build, such as with Project.dependsOn.
""" + ProjectCommand + """ /
Changes to the initial project.
""" + ProjectCommand + """ ..
Changes to the parent project of the current project.
If there is no parent project, the current project is unchanged.
Use n+1 dots to change to the nth parent.
For example, 'project ....' is equivalent to three consecutive 'project ..' commands."""
def projectsBrief = "Lists the names of available projects or temporarily adds/removes extra builds to the session."
def projectsDetailed =
ProjectsCommand + """
List the names of available builds and the projects defined in those builds.
""" + ProjectsCommand + """ add <URI>+
Adds the builds at the provided URIs to this session.
These builds may be selected using the """ + ProjectCommand + """ command.
Alternatively, tasks from these builds may be run using the explicit syntax {URI}project/task
""" + ProjectsCommand + """ remove <URI>+
Removes extra builds from this session.
Builds explicitly listed in the build definition are not affected by this command.
"""
def sbtrc = ".sbtrc"
def DefaultsCommand = "add-default-commands"
def DefaultsBrief = (DefaultsCommand, DefaultsDetailed)
def DefaultsDetailed = "Registers default built-in commands"
def Load = "load"
def LoadLabel = "a project"
def LoadCommand = "load-commands"
def LoadCommandLabel = "commands"
def LoadFailed = "load-failed"
def LoadProjectImpl = "loadp"
def LoadProject = "reload"
def LoadProjectBrief = (LoadProject, "(Re)loads the current project or changes to plugins project or returns from it.")
def LoadProjectDetailed = LoadProject +
s"""
\\t(Re)loads the project in the current directory.
$LoadProject plugins
\\t(Re)loads the plugins project (under project directory).
$LoadProject return
\\t(Re)loads the root project (and leaves the plugins project)."""
def InitCommand = "initialize"
def InitBrief = (InitCommand, "Initializes command processing.")
def InitDetailed =
InitCommand + """
Initializes command processing.
Runs the following commands.
defaults
Registers default commands.
< ~/.sbtrc
< .sbtrc
Runs commands from ~/.sbtrc and ./.sbtrc if they exist
"""
import java.io.File
import Path._
def sbtRCs(s: State): Seq[File] =
(Path.userHome / sbtrc) ::
(s.baseDir / sbtrc asFile) ::
Nil
val CrossCommand = "+"
val SwitchCommand = "++"
def crossHelp: Help = Help.more(CrossCommand, CrossDetailed)
def switchHelp: Help = Help.more(SwitchCommand, SwitchDetailed)
def CrossDetailed =
s"""$CrossCommand <command>
Runs <command> for each Scala version specified for cross-building.
For each string in `crossScalaVersions` in the current project, this command sets the
`scalaVersion` of all projects to that version, reloads the build, and
executes <command>. When finished, it reloads the build with the original
Scala version.
See also `help $SwitchCommand`
"""
def SwitchDetailed =
s"""$SwitchCommand <scala-version> [<command>]
Changes the Scala version and runs a command.
Sets the `scalaVersion` of all projects to <scala-version> and reloads the build.
If <command> is provided, it is then executed.
$SwitchCommand [<scala-version>=]<scala-home> [<command>]
Uses the Scala installation at <scala-home> by configuring the scalaHome setting for
all projects.
If <scala-version> is specified, it is used as the value of the scalaVersion setting.
This is important when using managed dependencies. This version will determine the
cross-version used as well as transitive dependencies.
If <command> is provided, it is then executed.
See also `help $CrossCommand`
"""
}
|
mdedetrich/sbt
|
main/src/main/scala/sbt/CommandStrings.scala
|
Scala
|
bsd-3-clause
| 12,762 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform;
import org.scalastyle.PositionError
import org.scalastyle.ScalariformChecker
import org.scalastyle.ScalastyleError
import VisitorHelper.visit
import VisitorHelper.traverse
import VisitorHelper.TreeVisit
import scalariform.lexer.Tokens.LBRACE
import scalariform.lexer.Tokens.RBRACE
import scalariform.parser.AstNode
import scalariform.parser.CompilationUnit
import scalariform.parser.TmplDef
abstract class AbstractClassChecker extends ScalariformChecker {
case class TmplClazz(t: TmplDef, subs: List[TmplClazz]) extends TreeVisit[TmplClazz]
final def verify(ast: CompilationUnit): List[ScalastyleError] = {
val it = for {
f <- visit[TmplDef, TmplClazz](map)(ast.immediateChildren(0));
t <- traverse(f, matches)
} yield {
PositionError(t.t.name.offset)
}
it.toList
}
def matches(t: TmplClazz): Boolean
private def map(t: TmplDef): List[TmplClazz] = List(TmplClazz(t, visit(map)(t.templateBodyOption)))
}
|
kahosato/scalastyle
|
src/main/scala/org/scalastyle/scalariform/AbstractClassChecker.scala
|
Scala
|
apache-2.0
| 1,720 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.sources.v2.reader._
/**
* Physical plan node for scanning a batch of data from a data source v2.
*/
case class BatchScanExec(
output: Seq[AttributeReference],
@transient scan: Scan) extends DataSourceV2ScanExecBase {
@transient lazy val batch = scan.toBatch
// TODO: unify the equal/hashCode implementation for all data source v2 query plans.
override def equals(other: Any): Boolean = other match {
case other: BatchScanExec => this.batch == other.batch
case _ => false
}
override def hashCode(): Int = batch.hashCode()
override lazy val partitions: Seq[InputPartition] = batch.planInputPartitions()
override lazy val readerFactory: PartitionReaderFactory = batch.createReaderFactory()
override lazy val inputRDD: RDD[InternalRow] = {
new DataSourceRDD(sparkContext, partitions, readerFactory, supportsBatch)
}
}
|
WindCanDie/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/BatchScanExec.scala
|
Scala
|
apache-2.0
| 1,878 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.Properties
import org.joda.time.DateTime
import kafka.common.TopicAndPartition
import org.slf4j.LoggerFactory
import scala.collection.immutable.Queue
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.util.{Try, Success, Failure}
import scalaz.{NonEmptyList, Validation}
/**
* @author hiral
*/
object ActorModel {
trait ActorRequest
trait ActorResponse
trait CommandRequest extends ActorRequest
trait CommandResponse extends ActorResponse
trait QueryRequest extends ActorRequest
trait QueryResponse extends ActorResponse
case class ActorErrorResponse(msg: String, throwableOption: Option[Throwable] = None) extends ActorResponse
sealed trait BVRequest extends QueryRequest
case object BVForceUpdate extends CommandRequest
case object BVGetTopicIdentities extends BVRequest
case object BVGetTopicConsumerMap extends BVRequest
case object BVGetConsumerIdentities extends BVRequest
case class BVGetView(id: Int) extends BVRequest
case object BVGetViews extends BVRequest
case class BVGetTopicMetrics(topic: String) extends BVRequest
case object BVGetBrokerMetrics extends BVRequest
case class BVView(topicPartitions: Map[TopicIdentity, IndexedSeq[Int]], clusterContext: ClusterContext,
metrics: Option[BrokerMetrics] = None,
messagesPerSecCountHistory: Option[Queue[BrokerMessagesPerSecCount]] = None,
stats: Option[BrokerClusterStats] = None) extends QueryResponse {
def numTopics : Int = topicPartitions.size
def numPartitions : Int = topicPartitions.values.foldLeft(0)((acc,i) => acc + i.size)
}
case class BVUpdateTopicMetricsForBroker(id: Int, metrics: IndexedSeq[(String,BrokerMetrics)]) extends CommandRequest
case class BVUpdateBrokerMetrics(id: Int, metric: BrokerMetrics) extends CommandRequest
case object CMGetView extends QueryRequest
case class CMGetTopicIdentity(topic: String) extends QueryRequest
case object CMGetClusterContext extends QueryRequest
case class CMView(topicsCount: Int, brokersCount: Int, clusterContext: ClusterContext) extends QueryResponse
case class CMGetConsumerIdentity(consumer: String) extends QueryRequest
case class CMGetConsumedTopicState(consumer: String, topic: String) extends QueryRequest
case class CMTopicIdentity(topicIdentity: Try[TopicIdentity]) extends QueryResponse
case class CMConsumerIdentity(consumerIdentity: Try[ConsumerIdentity]) extends QueryResponse
case class CMConsumedTopic(ctIdentity: Try[ConsumedTopicState]) extends QueryResponse
case object CMShutdown extends CommandRequest
case class CMCreateTopic(topic: String,
partitions: Int,
replicationFactor: Int,
config: Properties = new Properties) extends CommandRequest
case class CMAddTopicPartitions(topic: String,
brokers: Seq[Int],
partitions: Int,
partitionReplicaList: Map[Int, Seq[Int]],
readVersion: Int) extends CommandRequest
case class CMAddMultipleTopicsPartitions(topicsAndReplicas: Seq[(String, Map[Int, Seq[Int]])],
brokers: Seq[Int],
partitions: Int,
readVersions: Map[String,Int]) extends CommandRequest
case class CMUpdateTopicConfig(topic: String, config: Properties, readVersion: Int) extends CommandRequest
case class CMDeleteTopic(topic: String) extends CommandRequest
case class CMRunPreferredLeaderElection(topics: Set[String]) extends CommandRequest
case class CMRunReassignPartition(topics: Set[String]) extends CommandRequest
case class CMGeneratePartitionAssignments(topics: Set[String], brokers: Seq[Int]) extends CommandRequest
case class CMManualPartitionAssignments(assignments: List[(String, List[(Int, List[Int])])]) extends CommandRequest
//these are used by Logkafka
//##########
case class CMGetLogkafkaIdentity(hostname: String) extends QueryRequest
case class CMLogkafkaIdentity(logkafkaIdentity: Try[LogkafkaIdentity]) extends QueryResponse
case class CMCreateLogkafka(hostname: String,
log_path: String,
config: Properties = new Properties
) extends CommandRequest
case class CMUpdateLogkafkaConfig(hostname: String,
log_path: String,
config: Properties,
checkConfig: Boolean = true
) extends CommandRequest
case class CMDeleteLogkafka(hostname: String, log_path: String) extends CommandRequest
//##########
case class CMCommandResult(result: Try[ClusterContext]) extends CommandResponse
case class CMCommandResults(result: IndexedSeq[Try[Unit]]) extends CommandResponse
case class KCCreateTopic(topic: String,
brokers: Seq[Int],
partitions: Int,
replicationFactor:Int,
config: Properties) extends CommandRequest
case class KCAddTopicPartitions(topic: String,
brokers: Seq[Int],
partitions: Int,
partitionReplicaList: Map[Int, Seq[Int]],
readVersion: Int) extends CommandRequest
case class KCAddMultipleTopicsPartitions(topicsAndReplicas: Seq[(String, Map[Int, Seq[Int]])],
brokers: Seq[Int],
partitions: Int,
readVersions: Map[String, Int]) extends CommandRequest
case class KCUpdateTopicConfig(topic: String, config: Properties, readVersion: Int) extends CommandRequest
case class KCDeleteTopic(topic: String) extends CommandRequest
case class KCPreferredReplicaLeaderElection(topicAndPartition: Set[TopicAndPartition]) extends CommandRequest
case class KCReassignPartition(currentTopicIdentity: Map[String, TopicIdentity],
generatedTopicIdentity: Map[String, TopicIdentity]) extends CommandRequest
case class KCCommandResult(result: Try[Unit]) extends CommandResponse
case object KMGetActiveClusters extends QueryRequest
case object KMGetAllClusters extends QueryRequest
case class KMGetClusterConfig(clusterName: String) extends QueryRequest
case class KMClusterQueryRequest(clusterName: String, request: QueryRequest) extends QueryRequest
case class KMQueryResult(result: IndexedSeq[ClusterConfig]) extends QueryResponse
case class KMClusterConfigResult(result: Try[ClusterConfig]) extends QueryResponse
case class KMClusterList(active: IndexedSeq[ClusterConfig], pending : IndexedSeq[ClusterConfig]) extends QueryResponse
case object KMUpdateState extends CommandRequest
case object KMPruneClusters extends CommandRequest
case object KMShutdown extends CommandRequest
case object KMShutdownComplete extends CommandResponse
case class KMAddCluster(config: ClusterConfig) extends CommandRequest
case class KMUpdateCluster(config: ClusterConfig) extends CommandRequest
case class KMEnableCluster(clusterName: String) extends CommandRequest
case class KMDisableCluster(clusterName: String) extends CommandRequest
case class KMDeleteCluster(clusterName: String) extends CommandRequest
case class KMClusterCommandRequest(clusterName: String, request: CommandRequest) extends CommandRequest
case class KMCommandResult(result: Try[Unit]) extends CommandResponse
sealed trait KSRequest extends QueryRequest
case object KSGetTopics extends KSRequest
case object KSGetConsumers extends KSRequest
case class KSGetTopicConfig(topic: String) extends KSRequest
case class KSGetTopicDescription(topic: String) extends KSRequest
case class KSGetAllTopicDescriptions(lastUpdateMillis: Option[Long]= None) extends KSRequest
case class KSGetTopicDescriptions(topics: Set[String]) extends KSRequest
case class KSGetConsumerDescription(consumer: String) extends KSRequest
case class KSGetConsumedTopicDescription(consumer: String, topic: String) extends KSRequest
case class KSGetAllConsumerDescriptions(lastUpdateMillis: Option[Long]= None) extends KSRequest
case class KSGetConsumerDescriptions(consumers: Set[String]) extends KSRequest
case object KSGetTopicsLastUpdateMillis extends KSRequest
case object KSGetPreferredLeaderElection extends KSRequest
case object KSGetReassignPartition extends KSRequest
case class KSEndPreferredLeaderElection(millis: Long) extends CommandRequest
case class KSUpdatePreferredLeaderElection(millis: Long, json: String) extends CommandRequest
case class KSEndReassignPartition(millis: Long) extends CommandRequest
case class KSUpdateReassignPartition(millis: Long, json: String) extends CommandRequest
case object KSGetBrokers extends KSRequest
case class KSGetBrokerState(id: String) extends KSRequest
case class TopicList(list: IndexedSeq[String], deleteSet: Set[String], clusterContext: ClusterContext) extends QueryResponse
case class TopicConfig(topic: String, config: Option[(Int,String)]) extends QueryResponse
case class ConsumerList(list: IndexedSeq[String], clusterContext: ClusterContext) extends QueryResponse
case class TopicDescription(topic: String,
description: (Int,String),
partitionState: Option[Map[String, String]],
partitionOffsets: Future[PartitionOffsetsCapture],
config:Option[(Int,String)]) extends QueryResponse
case class TopicDescriptions(descriptions: IndexedSeq[TopicDescription], lastUpdateMillis: Long) extends QueryResponse
case class BrokerList(list: IndexedSeq[BrokerIdentity], clusterContext: ClusterContext) extends QueryResponse
case class PreferredReplicaElection(startTime: DateTime,
topicAndPartition: Set[TopicAndPartition],
endTime: Option[DateTime],
clusterContext: ClusterContext) extends QueryResponse
case class ReassignPartitions(startTime: DateTime,
partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]],
endTime: Option[DateTime],
clusterContext: ClusterContext) extends QueryResponse
case class ConsumedTopicDescription(consumer: String,
topic: String,
numPartitions: Int,
topicDescription: Option[TopicDescription],
partitionOwners: Option[Map[Int, String]],
partitionOffsets: Option[Map[Int, Long]])
case class ConsumerDescription(consumer: String,
topics: Map[String, ConsumedTopicDescription]) extends QueryResponse
case class ConsumerDescriptions(descriptions: IndexedSeq[ConsumerDescription], lastUpdateMillis: Long) extends QueryResponse
case object DCUpdateState extends CommandRequest
case class BrokerIdentity(id: Int, host: String, port: Int, jmxPort: Int)
object BrokerIdentity {
import scalaz.syntax.applicative._
import org.json4s.jackson.JsonMethods._
import org.json4s.scalaz.JsonScalaz
import org.json4s.scalaz.JsonScalaz._
import scala.language.reflectiveCalls
implicit def from(id: Int, config: String): Validation[NonEmptyList[JsonScalaz.Error],BrokerIdentity]= {
val json = parse(config)
(field[String]("host")(json) |@| field[Int]("port")(json) |@| field[Int]("jmx_port")(json))
{
(host: String, port: Int, jmxPort: Int) => BrokerIdentity(id,host, port, jmxPort)
}
}
}
case class TopicPartitionIdentity(partNum: Int,
leader: Int,
latestOffset: Option[Long],
rateOfChange: Option[Double],
isr: Seq[Int],
replicas: Seq[Int],
isPreferredLeader: Boolean = false,
isUnderReplicated: Boolean = false)
object TopicPartitionIdentity {
lazy val logger = LoggerFactory.getLogger(this.getClass)
import scalaz.syntax.applicative._
import org.json4s.jackson.JsonMethods._
import org.json4s.scalaz.JsonScalaz._
import scala.language.reflectiveCalls
implicit def from(partition: Int,
state:Option[String],
offset: Option[Long],
rateOfChange: Option[Double],
replicas: Seq[Int]) : TopicPartitionIdentity = {
val leaderAndIsr = for {
json <- state
parsedJson = parse(json)
} yield {
(field[Int]("leader")(parsedJson) |@| field[List[Int]]("isr")(parsedJson)) {
(leader: Int, isr: Seq[Int]) => leader -> isr
}
}
val default = TopicPartitionIdentity(partition,
-2,
offset,
rateOfChange,
Seq.empty,
replicas)
leaderAndIsr.fold(default) { parsedLeaderAndIsrOrError =>
parsedLeaderAndIsrOrError.fold({ e =>
logger.error(s"Failed to parse topic state $e")
default
}, {
case (leader, isr) =>
TopicPartitionIdentity(partition, leader, offset, rateOfChange, isr, replicas, leader == replicas.head, isr.size != replicas.size)
})
}
}
}
case class BrokerTopicPartitions(id: Int, partitions: IndexedSeq[Int], isSkewed: Boolean)
case class PartitionOffsetsCapture(updateTimeMillis: Long, offsetsMap: Map[Int, Long])
object PartitionOffsetsCapture {
val ZERO : Option[Double] = Option(0D)
val EMPTY : PartitionOffsetsCapture = PartitionOffsetsCapture(0, Map.empty)
def getRate(part: Int, currentOffsets: PartitionOffsetsCapture, previousOffsets: PartitionOffsetsCapture): Option[Double] = {
val timeDiffMillis = currentOffsets.updateTimeMillis - previousOffsets.updateTimeMillis
val offsetDif = for {
currentOffset <- currentOffsets.offsetsMap.get(part)
previousOffset <- previousOffsets.offsetsMap.get(part)
} yield {
currentOffset - previousOffset
}
if(timeDiffMillis > 0) {
//multiply by 1000 since we have millis
offsetDif.map( od => od * 1000 * 1D / timeDiffMillis)
} else {
PartitionOffsetsCapture.ZERO
}
}
}
case class TopicIdentity(topic:String,
readVersion: Int,
partitions:Int,
partitionsIdentity: Map[Int,TopicPartitionIdentity],
numBrokers: Int,
configReadVersion: Int,
config: List[(String,String)],
clusterContext: ClusterContext,
metrics: Option[BrokerMetrics] = None) {
val replicationFactor : Int = partitionsIdentity.head._2.replicas.size
val partitionsByBroker : IndexedSeq[BrokerTopicPartitions] = {
val brokerPartitionsMap : Map[Int, Iterable[Int]] =
partitionsIdentity.toList.flatMap(t => t._2.isr.map(i => (i,t._2.partNum))).groupBy(_._1).mapValues(_.map(_._2))
val brokersForTopic = brokerPartitionsMap.keySet.size
val avgPartitionsPerBroker : Double = Math.ceil((1.0 * partitions) / brokersForTopic * replicationFactor)
brokerPartitionsMap.map {
case (brokerId, brokerPartitions)=>
BrokerTopicPartitions(brokerId, brokerPartitions.toIndexedSeq.sorted,
brokerPartitions.size > avgPartitionsPerBroker)
}.toIndexedSeq.sortBy(_.id)
}
// a topic's log-size is the sum of its partitions' log-sizes, we take the sum of the ones we know the offset for.
val summedTopicOffsets : Long = partitionsIdentity.map(_._2.latestOffset).collect{case Some(offset) => offset}.sum
val preferredReplicasPercentage : Int = (100 * partitionsIdentity.count(_._2.isPreferredLeader)) / partitions
val underReplicatedPercentage : Int = (100 * partitionsIdentity.count(_._2.isUnderReplicated)) / partitions
val topicBrokers : Int = partitionsByBroker.size
val brokersSkewPercentage : Int = {
if(topicBrokers > 0)
(100 * partitionsByBroker.count(_.isSkewed)) / topicBrokers
else 0
}
val brokersSpreadPercentage : Int = if(numBrokers > 0) {
(100 * topicBrokers) / numBrokers
} else {
100 // everthing is spreaded if nothing has to be spreaded
}
val producerRate: String = BigDecimal(partitionsIdentity.map(_._2.rateOfChange.getOrElse(0D)).sum).setScale(2, BigDecimal.RoundingMode.HALF_UP).toString()
}
object TopicIdentity {
lazy val logger = LoggerFactory.getLogger(this.getClass)
import org.json4s.jackson.JsonMethods._
import org.json4s.scalaz.JsonScalaz._
import scala.language.reflectiveCalls
private[this] def getPartitionReplicaMap(td: TopicDescription) : Map[String, List[Int]] = {
// Get the topic description information
val descJson = parse(td.description._2)
field[Map[String,List[Int]]]("partitions")(descJson).fold({ e =>
logger.error(s"[topic=${td.topic}] Failed to get partitions from topic json ${td.description._2}")
Map.empty
}, identity)
}
private[this] def getTopicPartitionIdentity(td: TopicDescription,
partMap: Map[String, List[Int]],
tdPrevious: Option[TopicDescription]) : Map[Int, TopicPartitionIdentity] = {
val stateMap = td.partitionState.getOrElse(Map.empty)
// Assign the partition data to the TPI format
partMap.map { case (partition, replicas) =>
val partitionNum = partition.toInt
// block on the futures that hold the latest produced offset in each partition
val partitionOffsets: Option[PartitionOffsetsCapture] = Await.ready(td.partitionOffsets, Duration.Inf).value.get match {
case Success(offsetMap) =>
Option(offsetMap)
case Failure(e) =>
None
}
val previousPartitionOffsets: Option[PartitionOffsetsCapture] = tdPrevious.flatMap {
ptd => Await.ready(ptd.partitionOffsets, Duration.Inf).value.get match {
case Success(offsetMap) =>
Option(offsetMap)
case Failure(e) =>
None
}
}
val currentOffsetOption = partitionOffsets.flatMap(_.offsetsMap.get(partitionNum))
val rateOfChange = for {
currentOffsets <- partitionOffsets
previousOffsets <- previousPartitionOffsets
result <- PartitionOffsetsCapture.getRate(partitionNum, currentOffsets, previousOffsets)
} yield result
(partitionNum,TopicPartitionIdentity.from(partitionNum,
stateMap.get(partition),
currentOffsetOption,
rateOfChange,
replicas))
}
}
def getTopicPartitionIdentity(td: TopicDescription, tdPrevious: Option[TopicDescription]) : Map[Int, TopicPartitionIdentity] = {
// Get the topic description information
val partMap = getPartitionReplicaMap(td)
getTopicPartitionIdentity(td, partMap, tdPrevious)
}
implicit def from(brokers: Int,
td: TopicDescription,
tm: Option[BrokerMetrics],
clusterContext: ClusterContext, tdPrevious: Option[TopicDescription]) : TopicIdentity = {
// Get the topic description information
val partMap = getPartitionReplicaMap(td)
val tpi : Map[Int,TopicPartitionIdentity] = getTopicPartitionIdentity(td, partMap, tdPrevious)
val config : (Int,Map[String, String]) = {
try {
val resultOption: Option[(Int,Map[String, String])] = td.config.map { configString =>
val configJson = parse(configString._2)
val configMap : Map[String, String] = field[Map[String,String]]("config")(configJson).fold({ e =>
logger.error(s"Failed to parse topic config ${configString._2}")
Map.empty
}, identity)
(configString._1,configMap)
}
resultOption.getOrElse((-1,Map.empty[String, String]))
} catch {
case e: Exception =>
logger.error(s"[topic=${td.topic}] Failed to parse topic config : ${td.config.getOrElse("")}",e)
(-1,Map.empty[String, String])
}
}
TopicIdentity(td.topic,td.description._1,partMap.size,tpi,brokers,config._1,config._2.toList, clusterContext, tm)
}
implicit def from(bl: BrokerList,td: TopicDescription, tm: Option[BrokerMetrics], clusterContext: ClusterContext, tdPrevious: Option[TopicDescription]) : TopicIdentity = {
from(bl.list.size, td, tm, clusterContext, tdPrevious)
}
implicit def reassignReplicas(currentTopicIdentity: TopicIdentity,
assignedReplicas: Map[Int, Seq[Int]]) : Try[TopicIdentity] = {
Try {
val newTpi : Map[Int, TopicPartitionIdentity] = currentTopicIdentity.partitionsIdentity.map { case (part, tpi) =>
val newReplicaSeq = assignedReplicas.get(part)
require(newReplicaSeq.isDefined, s"Missing replica assignment for partition $part for topic ${currentTopicIdentity.topic}")
val newReplicaSet = newReplicaSeq.get.toSet
require(newReplicaSeq.get.size == newReplicaSet.size,
s"Duplicates found in replica set ${newReplicaSeq.get} for partition $part for topic ${currentTopicIdentity.topic}")
(part,tpi.copy(replicas = newReplicaSeq.get))
}
TopicIdentity(
currentTopicIdentity.topic,
currentTopicIdentity.readVersion,
currentTopicIdentity.partitions,
newTpi,
currentTopicIdentity.numBrokers,
currentTopicIdentity.configReadVersion,
currentTopicIdentity.config,
currentTopicIdentity.clusterContext,
currentTopicIdentity.metrics)
}
}
}
case class ConsumedTopicState(consumerGroup: String,
topic: String,
numPartitions: Int,
partitionLatestOffsets: Map[Int, Long],
partitionOwners: Map[Int, String],
partitionOffsets: Map[Int, Long],
clusterContext: ClusterContext) {
lazy val totalLag : Option[Long] = {
// only defined if every partition has a latest offset
if (partitionLatestOffsets.values.size == numPartitions && partitionLatestOffsets.size == numPartitions) {
Some(partitionLatestOffsets.values.sum - partitionOffsets.values.sum)
} else None
}
def topicOffsets(partitionNum: Int) : Option[Long] = partitionLatestOffsets.get(partitionNum)
def partitionLag(partitionNum: Int) : Option[Long] = {
topicOffsets(partitionNum).flatMap{topicOffset =>
partitionOffsets.get(partitionNum).map(topicOffset - _)}
}
// Percentage of the partitions that have an owner
def percentageCovered : Int =
if (numPartitions != 0) {
val numCovered = partitionOwners.size
100 * numCovered / numPartitions
} else {
100 // if there are no partitions to cover, they are all covered!
}
}
object ConsumedTopicState {
def from(ctd: ConsumedTopicDescription, clusterContext: ClusterContext): ConsumedTopicState = {
val partitionOffsetsMap = ctd.partitionOffsets.getOrElse(Map.empty)
val partitionOwnersMap = ctd.partitionOwners.getOrElse(Map.empty)
// block on the futures that hold the latest produced offset in each partition
val topicOffsetsOptMap: Map[Int, Long]= ctd.topicDescription.map{td: TopicDescription =>
Await.ready(td.partitionOffsets, Duration.Inf).value.get match {
case Success(offsetMap) =>
offsetMap.offsetsMap
case Failure(e) =>
Map.empty[Int, Long]
}}.getOrElse(Map.empty)
ConsumedTopicState(
ctd.consumer,
ctd.topic,
ctd.numPartitions,
topicOffsetsOptMap,
partitionOwnersMap,
partitionOffsetsMap,
clusterContext)
}
}
case class ConsumerIdentity(consumerGroup:String,
topicMap: Map[String, ConsumedTopicState],
clusterContext: ClusterContext)
object ConsumerIdentity {
lazy val logger = LoggerFactory.getLogger(this.getClass)
import scala.language.reflectiveCalls
implicit def from(cd: ConsumerDescription,
clusterContext: ClusterContext) : ConsumerIdentity = {
val topicMap: Seq[(String, ConsumedTopicState)] = for {
(topic, ctd) <- cd.topics.toSeq
cts = ConsumedTopicState.from(ctd, clusterContext)
} yield (topic, cts)
ConsumerIdentity(cd.consumer,
topicMap.toMap,
clusterContext)
}
}
case class BrokerMessagesPerSecCount(date: DateTime,
count: Long)
case class BrokerMetrics(bytesInPerSec: MeterMetric,
bytesOutPerSec: MeterMetric,
bytesRejectedPerSec: MeterMetric,
failedFetchRequestsPerSec: MeterMetric,
failedProduceRequestsPerSec: MeterMetric,
messagesInPerSec: MeterMetric,
oSystemMetrics: OSMetric) {
def +(o: BrokerMetrics) : BrokerMetrics = {
BrokerMetrics(
o.bytesInPerSec + bytesInPerSec,
o.bytesOutPerSec + bytesOutPerSec,
o.bytesRejectedPerSec + bytesRejectedPerSec,
o.failedFetchRequestsPerSec + failedFetchRequestsPerSec,
o.failedProduceRequestsPerSec + failedProduceRequestsPerSec,
o.messagesInPerSec + messagesInPerSec,
oSystemMetrics)
}
}
object BrokerMetrics {
val DEFAULT = BrokerMetrics(
MeterMetric(0, 0, 0, 0, 0),
MeterMetric(0, 0, 0, 0, 0),
MeterMetric(0, 0, 0, 0, 0),
MeterMetric(0, 0, 0, 0, 0),
MeterMetric(0, 0, 0, 0, 0),
MeterMetric(0, 0, 0, 0, 0),
OSMetric(0D, 0D))
}
case class BrokerClusterStats(perMessages: BigDecimal, perIncoming: BigDecimal, perOutgoing: BigDecimal)
sealed trait LKVRequest extends QueryRequest
case object LKVForceUpdate extends CommandRequest
case object LKVGetLogkafkaIdentities extends LKVRequest
case class LKCCreateLogkafka(hostname: String,
log_path: String,
config: Properties,
logkafkaConfig: Option[LogkafkaConfig]) extends CommandRequest
case class LKCDeleteLogkafka(hostname: String,
log_path: String,
logkafkaConfig: Option[LogkafkaConfig]) extends CommandRequest
case class LKCUpdateLogkafkaConfig(hostname: String,
log_path: String,
config: Properties,
logkafkaConfig: Option[LogkafkaConfig],
checkConfig: Boolean = true
) extends CommandRequest
case class LKCCommandResult(result: Try[Unit]) extends CommandResponse
sealed trait LKSRequest extends QueryRequest
case object LKSGetLogkafkaHostnames extends LKSRequest
case class LKSGetLogkafkaConfig(hostname: String) extends LKSRequest
case class LKSGetLogkafkaClient(hostname: String) extends LKSRequest
case class LKSGetLogkafkaConfigs(hostnames: Set[String]) extends LKSRequest
case class LKSGetLogkafkaClients(hostnames: Set[String]) extends LKSRequest
case class LKSGetAllLogkafkaConfigs(lastUpdateMillis: Option[Long]= None) extends LKSRequest
case class LKSGetAllLogkafkaClients(lastUpdateMillis: Option[Long]= None) extends LKSRequest
case class LogkafkaHostnameList(list: IndexedSeq[String], deleteSet: Set[String]) extends QueryResponse
case class LogkafkaConfig(hostname: String, config: Option[String]) extends QueryResponse
case class LogkafkaClient(hostname: String, client: Option[String]) extends QueryResponse
case class LogkafkaConfigs(configs: IndexedSeq[LogkafkaConfig], lastUpdateMillis: Long) extends QueryResponse
case class LogkafkaClients(clients: IndexedSeq[LogkafkaClient], lastUpdateMillis: Long) extends QueryResponse
case class LogkafkaIdentity(hostname: String,
active: Boolean,
identityMap: Map[String, (Option[Map[String, String]], Option[Map[String, String]])]) {
}
object LogkafkaIdentity {
lazy val logger = LoggerFactory.getLogger(this.getClass)
implicit def from(hostname: String, lcg: Option[LogkafkaConfig], lct: Option[LogkafkaClient]) : LogkafkaIdentity = {
val configJsonStr = lcg match {
case Some(l) => l.config.getOrElse[String]("{}")
case None => "{}"
}
val configMap: Map[String, Map[String, String]] = utils.Logkafka.parseJsonStr(hostname, configJsonStr)
val clientJsonStr = lct match {
case Some(l) => l.client.getOrElse[String]("{}")
case None => "{}"
}
val clientMap: Map[String, Map[String, String]] = utils.Logkafka.parseJsonStr(hostname, clientJsonStr)
val hostnameSet = configMap.keySet ++ clientMap.keySet
val identitySet = if (!hostnameSet.isEmpty) {
hostnameSet map { l => l -> ((if(!configMap.isEmpty) configMap.get(l) else None, if(!clientMap.isEmpty) clientMap.get(l) else None)) }
} else { Set() }
LogkafkaIdentity(hostname, lct.isDefined, identitySet.toMap)
}
}
}
|
Flipkart/kafka-manager
|
app/kafka/manager/ActorModel.scala
|
Scala
|
apache-2.0
| 30,622 |
object SCL6807 {
object literal extends scala.Dynamic {
def applyDynamic(s: String)(r: String) = 1
def applyDynamicNamed(s : String)(r: (String, Any)) = "2"
}
val x = literal
/*start*/(literal.foo(x = 2),
literal(""),
this literal (x = 2),
literal(x = 2),
x(""),
this x (x = 2),
x(x = 2))/*end*/
}
//(String, Int, String, String, Int, String, String)
|
ilinum/intellij-scala
|
testdata/typeInference/bugs5/SCL6807.scala
|
Scala
|
apache-2.0
| 392 |
package com.proinnovate
import javafx.application.Application
import javafx.fxml.FXMLLoader
import javafx.scene.Parent
import javafx.scene.Scene
import javafx.stage.Stage
import javafx.scene.control.{ProgressIndicator, TextField, Button}
import com.typesafe.scalalogging.slf4j.Logging
import javafx.event.{ActionEvent, EventHandler}
import javafx.scene.input.KeyEvent
import javafx.scene.layout.Pane
import concurrent.Future
import concurrent.ExecutionContext.Implicits.global
import java.io.File
import org.joda.time.{LocalDateTime, LocalDate}
import util.Try
class BurnApp extends Application with Logging {
def start(stage: Stage) {
// Text.fontSmoothingType(FontSmoothingType.LCD) - I think this requires JavaFX2.2
val resource = getClass.getResource("mp3andburn.fxml")
val root = FXMLLoader.load[Parent](resource)
setupControllers(root)
stage.setTitle("mp3 and burn")
val scene = new Scene(root)
stage.setScene(scene)
stage.show()
}
private def setupControllers(root: Parent) {
val burnButton:Button = root.lookup("#burnButton").asInstanceOf[Button]
burnButton.setDisable(true)
val titleField = root.lookup("#titleField").asInstanceOf[TextField]
val authorField = root.lookup("#authorField").asInstanceOf[TextField]
val seriesField = root.lookup("#seriesField").asInstanceOf[TextField]
val commentField = root.lookup("#commentField").asInstanceOf[TextField]
val seriesPrefix = "St Mungo's"
setMaxFieldLength(titleField, 30)
setMaxFieldLength(authorField, 30)
setMaxFieldLength(seriesField, 30 - (seriesPrefix.length + 1))
setMaxFieldLength(commentField, 30)
// Require a title and author field before enabling the Burn button.
object burnButtonEnablingEventHandler extends EventHandler[KeyEvent] {
def handle(keyEvent: KeyEvent) {
if (titleField.getText.length > 0 && authorField.getText.length > 2) burnButton.setDisable(false)
else burnButton.setDisable(true)
}
}
Seq(titleField, authorField).map(_.addEventHandler(KeyEvent.KEY_TYPED, burnButtonEnablingEventHandler))
setupBurnButtonHandler(root, burnButton)
// Connect progress displays with workers
val normaliseProgress = root.lookup("#normaliseProgress").asInstanceOf[ProgressIndicator]
val splitProgress = root.lookup("#splitProgress").asInstanceOf[ProgressIndicator]
val burnProgress = root.lookup("#burnProgress").asInstanceOf[ProgressIndicator]
val mp3Progress = root.lookup("#mp3Progress").asInstanceOf[ProgressIndicator]
normaliseProgress.progressProperty.bind(Normalize.progress)
splitProgress.progressProperty.bind(SplitRecordingIntoTracks.progress)
burnProgress.progressProperty.bind(BurnCD.progress)
mp3Progress.progressProperty.bind(CreateMp3.progress)
}
private def setupBurnButtonHandler(root: Parent, burnButton: Button) {
burnButton.setOnAction(new EventHandler[ActionEvent]() {
def handle(actionEvent: ActionEvent) {
val pane1 = root.lookup("#pane1").asInstanceOf[Pane]
val pane2 = root.lookup("#pane2").asInstanceOf[Pane]
pane1.setVisible(false)
pane2.setVisible(true)
// If it exists, remove the "Untitled CD.fpbf" file on the desktop.
val untitledCd = new File(Config.userHome, "Desktop/Untitled CD.fpbf")
if (untitledCd.isDirectory) {
Try(untitledCd.delete())
}
val title = root.lookup("#titleField").asInstanceOf[TextField].getText
val author = root.lookup("#authorField").asInstanceOf[TextField].getText
val series = "St Mungo's:" + root.lookup("#seriesField").asInstanceOf[TextField].getText
val year = new LocalDate().getYear.toString
val comment = root.lookup("#commentField").asInstanceOf[TextField].getText
val successFuture = prepareMp3(title, author, series, year, comment)
}
})
}
private def setMaxFieldLength(field: TextField, length: Int) {
field.addEventFilter(KeyEvent.KEY_TYPED, new EventHandler[KeyEvent] {
def handle(keyEvent: KeyEvent) {
val text = field.getText
if (text.length >= length) keyEvent.consume()
}
})
}
private def prepareMp3(title: String, author: String, album: String, year: String, comment: String): Future[Boolean] = {
// FIXME: Find the SONGS directory by checking all potential volumes from an appropriately named file.
val inputDir = new File("/Volumes/NO NAME/YPE/SONGS")
val outputFile = new File(Config.userHome, "Desktop/file.mp3")
val splitTrackDir = new File(Config.userHome, "Desktop/audiocd")
val files = inputDir.listFiles
logger.info(s"files = ${files.mkString(",")}")
val latestFileOpt = files.sortWith{ case (x: File,y: File) => x.lastModified > y.lastModified }.headOption
logger.info(s"latestFileOpt = $latestFileOpt")
val finalMp3File = latestFileOpt.map {
latestFile: File =>
val modifiedDate = new LocalDateTime(latestFileOpt.get.lastModified)
modifiedDate.getYear
val year = f"${modifiedDate.getYear}%04d"
val month = f"${modifiedDate.getMonthOfYear}%02d"
val date = f"${modifiedDate.getDayOfMonth}%02d"
val ampm = if (modifiedDate.getHourOfDay < 15) "am" else "pm"
val fullname = s"$year-$month-$date-$ampm.mp3"
new File(Config.userHome, "Desktop/" + fullname)
}.getOrElse(new File(Config.userHome, "Desktop/today.mp3"))
logger.info(s"finalMp3File = $finalMp3File")
val normFileFuture = Future { latestFileOpt.flatMap(Normalize.normalise(_, outputFile)) }
val splitDirFuture = normFileFuture.map(_.flatMap(SplitRecordingIntoTracks.splitRecording(_, splitTrackDir)))
val burnCdOkayFuture = splitDirFuture.map(_.map {
cdDir =>
BurnCD.burn(cdDir)
Try(cdDir.listFiles().foreach(_.delete()))
Try(cdDir.delete())
})
val createMp3OkayFuture = normFileFuture.map(_.flatMap(CreateMp3.createMp3(_,finalMp3File, title, author, album, year, comment)))
val success = for {
burnCdOkay <- burnCdOkayFuture
createMp3Okay <- createMp3OkayFuture
} yield {
Try(outputFile.delete())
burnCdOkay.isDefined && createMp3Okay.isDefined
}
success
}
}
|
sroebuck/mp3andburn
|
src/main/scala/com/proinnovate/BurnApp.scala
|
Scala
|
mit
| 6,251 |
import session.model.Session
import session.services.CryptoService
class CryptoServiceSpec extends AuthenticationTestHelper {
val cryptoService: CryptoService = inject[CryptoService]
"CryptoService" must {
"encrypt and decrypt session data correctly" in {
val session = Session(userId = Some(43), csrfToken = "token")
val encSession = cryptoService.encryptSession(session)
val decSession = cryptoService.decryptSession(encSession)
decSession.get shouldBe session
}
}
}
|
sysgears/apollo-universal-starter-kit
|
modules/authentication/server-scala/src/test/scala/CryptoServiceSpec.scala
|
Scala
|
mit
| 511 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.log
import com.waz.log.BasicLogging.LogTag
import com.waz.log.InternalLog.LogLevel
import scala.concurrent.Future
trait LogOutput {
def id: String
def showSafeOnly: Boolean
def level: LogLevel = LogLevel.Verbose
def log(str: String, level: InternalLog.LogLevel, tag: LogTag, ex: Option[Throwable] = None): Unit
def log(str: String, cause: Throwable, level: InternalLog.LogLevel, tag: LogTag): Unit =
log(str, level, tag, Some(cause))
def close(): Unit = ()
def flush(): Unit = ()
def clear(): Unit = ()
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/main/scala/com/waz/log/LogOutput.scala
|
Scala
|
gpl-3.0
| 1,246 |
package es.uvigo.ei.sing.sds
package database
import scala.concurrent.Future
import play.api.Play
import play.api.db.slick.{ DatabaseConfigProvider, HasDatabaseConfig }
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import slick.driver.JdbcProfile
import entity.{ Category, Keyword }
import util.Page
trait KeywordsComponent { self: HasDatabaseConfig[JdbcProfile] =>
import driver.api._
implicit def CategoryColumnType: BaseColumnType[Category] =
MappedColumnType.base[Category, Long](_.id, Category.fromId)
class Keywords(tag: Tag) extends Table[Keyword](tag, "keywords") {
def id = column[Keyword.ID]("keyword_id", O.PrimaryKey, O.AutoInc)
def normalized = column[String]("keyword_normalized")
def category = column[Category]("keyword_category")
def unique_normalized_category = index("idx_keywords_unique_normalized_category", (normalized, category), unique = true)
def * = (id.?, normalized, category) <> (Keyword.tupled, Keyword.unapply)
}
lazy val keywords = TableQuery[Keywords]
}
final class KeywordsDAO extends KeywordsComponent with HasDatabaseConfig[JdbcProfile] {
import driver.api._
import KeywordsDAO._
protected val dbConfig = DatabaseConfigProvider.get[JdbcProfile](Play.current)
def count: Future[Int] =
db.run(keywords.length.result)
def count(normalizedFilter: String): Future[Int] =
db.run(keywords.filter(_.normalized.toLowerCase like normalizedFilter.toLowerCase).length.result)
def get(id: Keyword.ID): Future[Option[Keyword]] =
db.run(keywords.filter(_.id === id).result.headOption)
def getByNormalized(normalized: String): Future[Option[Keyword]] =
db.run(keywords.filter(_.normalized.toLowerCase === normalized.toLowerCase).result.headOption.transactionally)
def list(page: Int = 0, pageSize: Int = 10, orderBy: OrderBy = OrderByID, normalizedFilter: String = "%"): Future[Page[Keyword]] = {
val offset = pageSize * page
val query = keywords.filter(
_.normalized.toLowerCase like normalizedFilter.toLowerCase
).sortBy(orderBy.order).drop(offset).take(pageSize)
for {
total <- count(normalizedFilter)
result <- db.run(query.result)
} yield Page(result, page, offset, total)
}
def getOrInsert(norm: String, cat: Category, keyword: => Keyword): Future[Keyword] = {
def insert: DBIOAction[Keyword, NoStream, Effect.Write] =
(keywords returning keywords.map(_.id) into ((keyword, id) => keyword.copy(id = Some(id)))) += keyword
val filter = keywords filter { k =>
k.normalized.toLowerCase === norm.toLowerCase &&
k.category === cat
}
val query = filter.result.headOption flatMap {
_.fold(insert)(DBIO.successful)
}
db.run(query.transactionally)
}
def insert(keyword: Keyword): Future[Keyword] =
db.run {
((keywords returning keywords.map(_.id) into ((keyword, id) => keyword.copy(id = Some(id)))) += keyword).transactionally
}
def insert(keywords: Keyword*): Future[Seq[Keyword]] =
db.run {
((this.keywords returning this.keywords.map(_.id) into ((keyword, id) => keyword.copy(id = Some(id)))) ++= keywords).transactionally
}
def delete(id: Keyword.ID): Future[Unit] =
db.run(keywords.filter(_.id === id).delete.transactionally).map(_ => ())
def delete(keyword: Keyword): Future[Unit] =
keyword.id.fold(Future.failed[Unit] {
new IllegalArgumentException("It is impossible to delete a keyword with empty ID")
})(delete)
}
object KeywordsDAO {
import slick.ast.Ordering
import slick.ast.Ordering.{ Asc, NullsDefault }
import slick.lifted.ColumnOrdered
private type Keywords = KeywordsComponent#Keywords
sealed trait OrderBy {
type ColumnType
val order: Keywords => ColumnOrdered[ColumnType]
}
case object OrderByID extends OrderBy {
type ColumnType = Long
val order: Keywords => ColumnOrdered[ColumnType] =
keyword => ColumnOrdered(keyword.id, Ordering(Asc, NullsDefault))
}
case object OrderByNormalized extends OrderBy {
type ColumnType = String
val order: Keywords => ColumnOrdered[ColumnType] =
keyword => ColumnOrdered(keyword.normalized, Ordering(Asc, NullsDefault))
}
case object OrderByCategory extends OrderBy {
type ColumnType = Category
val order: Keywords => ColumnOrdered[ColumnType] =
keyword => ColumnOrdered(keyword.category, Ordering(Asc, NullsDefault))
}
}
|
agjacome/smart-drug-search
|
src/main/scala/database/KeywordsDAO.scala
|
Scala
|
mit
| 4,469 |
package org.jetbrains.plugins.dotty.lang.parser.parsing.top.template
import org.jetbrains.plugins.dotty.lang.parser.parsing.types.SelfType
/**
* @author adkozlov
*/
object TemplateBody extends org.jetbrains.plugins.scala.lang.parser.parsing.top.template.TemplateBody {
override protected def templateStat = TemplateStat
override protected def selfType = SelfType
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/dotty/lang/parser/parsing/top/template/TemplateBody.scala
|
Scala
|
apache-2.0
| 375 |
trait A { type T; type M >: T }
trait B extends A {
val x : String;
val u : A { type T = B.this.T } ;
type T = x.type;
type M = u.M
}
|
densh/dotty
|
tests/pos/t758.scala
|
Scala
|
bsd-3-clause
| 142 |
package lables.pimpmylib
object LabelMaker {
def createLabel(from : CanBeLabeled): String = from.label
}
trait CanBeLabeled {
def label: String
}
|
tupol/scala-patterns-tc-pml
|
src/main/scala/lables/pimpmylib/LabelMaker.scala
|
Scala
|
apache-2.0
| 155 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package grails_scala_test
/** Sample base class */
// abstract class Event
trait Event
trait Component {
def name: Option[String]
}
trait ComponentEvent extends Event {
val source: Component
}
trait Container {
def elements: Seq[Component]
}
trait ContainerEvent extends Event {
val source: Container
}
|
smartiniOnGitHub/grails-scala
|
src/scala/grails_scala_test/Events.scala
|
Scala
|
apache-2.0
| 1,113 |
package no.vestein.webapp.game
import no.vestein.webapp.App.Ctx2D
trait Renderable {
def render(ctx: Ctx2D): Unit
}
|
WoodStone/PurpleRain-ScalaJS
|
src/main/scala/no/vestein/webapp/game/Renderable.scala
|
Scala
|
gpl-3.0
| 122 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.example
import au.com.cba.omnia.thermometer.core.Thermometer._
import au.com.cba.omnia.thermometer.hive.ThermometerHiveSpec
import au.com.cba.omnia.thermometer.fact.PathFactoids._
import au.com.cba.omnia.ebenezer.ParquetLogging
import au.com.cba.omnia.ebenezer.test.ParquetThermometerRecordReader
import au.com.cba.omnia.maestro.api._, Maestro._
import au.com.cba.omnia.maestro.test.Records
import au.com.cba.omnia.maestro.example.thrift.Customer
object CustomerHiveJobSpec
extends ThermometerHiveSpec
with Records
with ParquetLogging { def is = s2"""
Customer Hive Job
=================
end to end pipeline $pipeline
"""
def pipeline = {
val actualReader = ParquetThermometerRecordReader[Customer]
val expectedReader = delimitedThermometerRecordReader[Customer]('|', "null", implicitly[Decode[Customer]])
val dbRawprefix = "dr"
val customerWarehouse = hiveWarehouse </> s"${dbRawprefix}_customer_customer.db"
val expectedDir = "expected" </> "customer"
withEnvironment(path(getClass.getResource("/customer").toString)) {
val args = Map(
"hdfs-root" -> List(s"$dir/user"),
"local-root" -> List(s"$dir/user"),
"archive-root" -> List(s"$dir/user/archive"),
"db-raw-prefix" -> List(dbRawprefix)
)
executesSuccessfully(CustomerHiveJob.job, args) must_== JobFinished
facts(
customerWarehouse </> "by_date" ==> recordsByDirectory(actualReader, expectedReader, expectedDir </> "by-date"),
customerWarehouse </> "by_cat" ==> recordsByDirectory(actualReader, expectedReader, expectedDir </> "by-cat")
)
}
}
}
|
CommBank/maestro
|
maestro-example/src/test/scala/au/com/cba/omnia/maestro/example/CustomerHiveJobSpec.scala
|
Scala
|
apache-2.0
| 2,324 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
package cmwell.bg
import akka.actor.{Actor, Props}
import cmwell.fts.FTSService
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import org.elasticsearch.action.admin.indices.alias.Alias
import collection.JavaConverters._
import concurrent.duration._
/**
* Created by israel on 25/07/2016.
*/
object IndicesManager {
def props(ftsService: FTSService, config: Config) = Props(new IndicesManager(ftsService, config))
}
class IndicesManager(ftsService: FTSService, config: Config) extends Actor with LazyLogging {
import context._
val allIndicesAliasName = config.getString("cmwell.bg.allIndicesAliasName")
val latestIndexAliasName = config.getString("cmwell.bg.latestIndexAliasName")
val indexNamePrefix = config.getString("cmwell.bg.indexNamePrefix")
val maxDocsPerShard = config.getLong("cmwell.bg.maxDocsPerShard")
//TODO return the line below after solving JVM arg with space problem
// val maintainIndicesInterval = Duration.fromNanos(config.getDuration("cmwell.bg.maintainIndicesInterval").toNanos)
val maintainIndicesInterval = Duration.apply(config.getLong("cmwell.bg.maintainIndicesInterval"), "minutes")
logger.info(s"""config params:
|allIndicesAliasName:$allIndicesAliasName
|latestIndexAliasName:$latestIndexAliasName
|indexNamePrefix:$indexNamePrefix
|maxDocsPerShard:$maxDocsPerShard
|maintainIndicesInterval:$maintainIndicesInterval""".stripMargin)
override def preStart(): Unit = self ! CheckIndices
override def receive: Receive = {
case CheckIndices =>
maintainIndices()
case UpdateCurrentAlias(previousIndex, newIndex) =>
val replaceCurrentIndexAliasRes = ftsService.client
.admin()
.indices()
.prepareAliases()
.removeAlias(previousIndex, latestIndexAliasName)
.addAlias(newIndex, latestIndexAliasName)
.execute()
.actionGet()
logger.info(
s"Replace current index alias from: $previousIndex to: $newIndex acknowledged: ${replaceCurrentIndexAliasRes.isAcknowledged}"
)
context.system.scheduler.scheduleOnce(maintainIndicesInterval, self, CheckIndices)
}
private def maintainIndices() = {
logger.debug("maintaining indices")
val indicesStats =
ftsService.client.admin().indices().prepareStats(allIndicesAliasName).clear().setDocs(true).execute().actionGet()
// find latest index name
val currentAliasRes =
ftsService.client.admin.indices().prepareGetAliases(latestIndexAliasName).execute().actionGet()
val lastCurrentIndexName = currentAliasRes.getAliases.keysIt().next()
val numOfDocumentsCurrent = indicesStats.getIndex(lastCurrentIndexName).getTotal.getDocs.getCount
val lastCurrentIndexRecovery = ftsService.client.admin().indices().prepareRecoveries(latestIndexAliasName).get()
val numOfShardsCurrent = lastCurrentIndexRecovery
.shardResponses()
.get(lastCurrentIndexName)
.asScala
.filter(_.recoveryState().getPrimary)
.size
logger.debug(s"number of docs per shard:${numOfDocumentsCurrent / numOfShardsCurrent}")
// If number of document per shard in latest index is greater than threshold
// create new index while adding it to the appropriate indices
if ((numOfDocumentsCurrent / numOfShardsCurrent) > maxDocsPerShard) {
logger.info(
s"number of docs per shard:${numOfDocumentsCurrent / numOfShardsCurrent} has passed the threshold of: $maxDocsPerShard , shifting gear"
)
// create new index
val lastCurrentIndexCounter =
lastCurrentIndexName.substring(lastCurrentIndexName.lastIndexOf('_') + 1, lastCurrentIndexName.length).toInt
val nextCurrentIndexName = indexNamePrefix + (lastCurrentIndexCounter + 1)
// create new index while adding it to 'latest' alias
val createNextCurrentIndexRes = ftsService.client
.admin()
.indices()
.prepareCreate(nextCurrentIndexName)
.addAlias(new Alias(allIndicesAliasName))
.execute()
.actionGet()
logger.info(
s"Create new current index named:$nextCurrentIndexName acknowledged: ${createNextCurrentIndexRes.isAcknowledged}"
)
system.scheduler.scheduleOnce(10.seconds, self, UpdateCurrentAlias(lastCurrentIndexName, nextCurrentIndexName))
} else {
context.system.scheduler.scheduleOnce(maintainIndicesInterval, self, CheckIndices)
logger.debug("nothing to do this time")
}
}
case object CheckIndices
case class UpdateCurrentAlias(previousIndex: String, newIndex: String)
}
*/
|
dudi3001/CM-Well
|
server/cmwell-bg/src/main/scala/cmwell/bg/IndicesManager.scala
|
Scala
|
apache-2.0
| 5,280 |
package com.twitter.finagle.http2.transport.common
import com.twitter.finagle.http.filter.HttpNackFilter
import com.twitter.finagle.http2.RstException
import io.netty.buffer.Unpooled
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.handler.codec.http.FullHttpMessage
import io.netty.handler.codec.http2._
import io.netty.util.ReferenceCounted
import org.mockito.Mockito.when
import org.scalacheck.Gen
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import org.scalatest.funsuite.AnyFunSuite
class Http2StreamMessageHandlerTest
extends AnyFunSuite
with MockitoSugar
with ScalaCheckDrivenPropertyChecks {
test("doesn't leak message written post-RST") {
forAll { isServer: Boolean =>
val em = new EmbeddedChannel(Http2StreamMessageHandler(isServer))
val rstFrame = new DefaultHttp2ResetFrame(404)
val stream = mock[Http2FrameStream]
when(stream.id()).thenReturn(1)
rstFrame.stream(stream)
if (isServer) em.pipeline.fireUserEventTriggered(rstFrame)
else
intercept[RstException] {
// The client propagates an exception forward to close the pipeline.
em.pipeline.fireUserEventTriggered(rstFrame)
em.checkException()
}
val msg = io.netty.buffer.Unpooled.buffer(10)
assert(msg.refCnt() == 1)
em.writeOneOutbound(msg)
assert(msg.refCnt() == 0)
}
}
test("Strips Http2WindowUpdate frames") {
forAll { isServer: Boolean =>
val em = new EmbeddedChannel(Http2StreamMessageHandler(isServer))
em.writeInbound(new DefaultHttp2WindowUpdateFrame(1))
assert(em.readInbound[Object]() == null)
}
}
test("Propagates an IllegalArgumentException for unknown frame types") {
def badFrames = Seq(
new DefaultHttp2HeadersFrame(new DefaultHttp2Headers),
new DefaultHttp2DataFrame(Unpooled.directBuffer(10)),
new Object
)
val gen = for {
isServer <- Gen.oneOf(true, false)
badFrame <- Gen.oneOf(badFrames)
} yield isServer -> badFrame
forAll(gen) {
case (isServer, badFrame) =>
val em = new EmbeddedChannel(Http2StreamMessageHandler(isServer))
intercept[IllegalArgumentException] {
em.writeInbound(badFrame)
}
badFrame match {
case r: ReferenceCounted => assert(r.refCnt == 0)
case _ => ()
}
}
}
test(
"RST frames of type REFUSED_STREAM get propagated as a 503 " +
"with the finagle retryable nack header") {
val em = new EmbeddedChannel(Http2StreamMessageHandler(isServer = false))
em.pipeline.fireUserEventTriggered(new DefaultHttp2ResetFrame(Http2Error.REFUSED_STREAM))
val response = em.readInbound[FullHttpMessage]()
assert(response.headers.get(HttpNackFilter.RetryableNackHeader) == "true")
assert(!response.headers.contains(HttpNackFilter.NonRetryableNackHeader))
}
test(
"RST frames of type ENHANCE_YOUR_CALM get propagated as a 503 " +
"with the finagle non-retryable nack header") {
val em = new EmbeddedChannel(Http2StreamMessageHandler(isServer = false))
em.pipeline.fireUserEventTriggered(new DefaultHttp2ResetFrame(Http2Error.ENHANCE_YOUR_CALM))
val response = em.readInbound[FullHttpMessage]()
assert(response.headers.get(HttpNackFilter.NonRetryableNackHeader) == "true")
assert(!response.headers.contains(HttpNackFilter.RetryableNackHeader))
}
test(
"RST frames of type other than REFUSED_STREAM and ENHANCE_YOUR_CALM " +
"gets propagated as a RstException") {
val em = new EmbeddedChannel(Http2StreamMessageHandler(isServer = false))
em.pipeline.fireUserEventTriggered(new DefaultHttp2ResetFrame(Http2Error.CANCEL))
val ex = intercept[RstException] { em.checkException() }
assert(ex.errorCode == Http2Error.CANCEL.code)
}
}
|
twitter/finagle
|
finagle-http2/src/test/scala/com/twitter/finagle/http2/transport/common/Http2StreamMessageHandlerTest.scala
|
Scala
|
apache-2.0
| 3,906 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.calculations.LowEmissionCarsCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CPAux3(value: Int) extends CtBoxIdentifier("SpecialRatePoolSum") with CtInteger
object CPAux3 extends Calculated[CPAux3, ComputationsBoxRetriever] with LowEmissionCarsCalculator {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CPAux3 =
CPAux3(getSpecialRatePoolSum(fieldValueRetriever.lec01()))
}
|
pncampbell/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/CPAux3.scala
|
Scala
|
apache-2.0
| 1,195 |
import org.qirx.littlespec.Specification
object Test extends Specification {
"Test should be able to" - {
"run a test" - {
1 + 1 is 2
}
"show an example" - {
example {
1 + 1 is 2
}
}
}
}
|
EECOLOR/little-spec
|
scalajs/src/sbt-test/test-dependencies/scalajs/testClasses/Test.scala
|
Scala
|
mit
| 234 |
package is.hail.utils
import scala.collection.mutable
import scala.reflect.ClassTag
object FastSeq {
def empty[T](implicit tct: ClassTag[T]): IndexedSeq[T] = FastSeq()
def apply[T](args: T*)(implicit tct: ClassTag[T]): IndexedSeq[T] = {
args match {
case args: mutable.WrappedArray[T] => args
case args: mutable.ArrayBuffer[T] => args
case _ => args.toArray[T]
}
}
}
|
hail-is/hail
|
hail/src/main/scala/is/hail/utils/FastSeq.scala
|
Scala
|
mit
| 402 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.recommendation
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext._
import org.jblas._
/**
* Model representing the result of matrix factorization.
*
* @param rank Rank for the features in this model.
* @param userFeatures RDD of tuples where each tuple represents the userId and
* the features computed for this user.
* @param productFeatures RDD of tuples where each tuple represents the productId
* and the features computed for this product.
*/
class MatrixFactorizationModel(
val rank: Int,
val userFeatures: RDD[(Int, Array[Double])],
val productFeatures: RDD[(Int, Array[Double])])
extends Serializable
{
/** Predict the rating of one user for one product. */
def predict(user: Int, product: Int): Double = {
val userVector = new DoubleMatrix(userFeatures.lookup(user).head)
val productVector = new DoubleMatrix(productFeatures.lookup(product).head)
userVector.dot(productVector)
}
// TODO: Figure out what good bulk prediction methods would look like.
// Probably want a way to get the top users for a product or vice-versa.
}
|
mkolod/incubator-spark
|
mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
|
Scala
|
apache-2.0
| 1,977 |
/*
* Twitter Korean Text - Scala library to process Korean text
*
* Copyright 2014 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.penguin.korean.tokenizer
import com.twitter.penguin.korean.TestBase
import com.twitter.penguin.korean.tokenizer.KoreanChunker._
class KoreanChunkerTest extends TestBase {
test("getChunks should correctly split a string into Korean-sensitive chunks") {
assert(
getChunks("안녕? iphone6안녕? 세상아?").mkString("/")
=== "안녕/?/ /iphone/6/안녕/?/ /세상아/?"
)
assert(
getChunks("This is an 한국어가 섞인 English tweet.").mkString("/")
=== "This/ /is/ /an/ /한국어가/ /섞인/ /English/ /tweet/."
)
assert(
getChunks("이 日本것은 日本語Eng").mkString("/")
=== "이/ /日本/것은/ /日本語/Eng"
)
assert(
getChunks("무효이며").mkString("/")
=== "무효이며"
)
assert(
getChunks("#해쉬태그 이라는 것 #hash @hello 123 이런이런 #여자최애캐_5명으로_취향을_드러내자").mkString("/")
=== "#해쉬태그/ /이라는/ /것/ /#hash/ /@hello/ /123/ /이런이런/ /#여자최애캐_5명으로_취향을_드러내자"
)
}
test("getChunks should correctly extract numbers") {
assert(
getChunks("300위안짜리 밥").mkString("/")
=== "300위안/짜리/ /밥"
)
assert(
getChunks("200달러와 300유로").mkString("/")
=== "200달러/와/ /300유로"
)
assert(
getChunks("$200이나 한다").mkString("/")
=== "$200/이나/ /한다"
)
assert(
getChunks("300옌이었다.").mkString("/")
=== "300옌/이었다/."
)
assert(
getChunks("3,453,123,123원 3억3천만원").mkString("/")
=== "3,453,123,123원/ /3억/3천만원"
)
assert(
getChunks("6/4 지방 선거").mkString("/")
=== "6/4/ /지방/ /선거"
)
assert(
getChunks("6.4 지방 선거").mkString("/")
=== "6.4/ /지방/ /선거"
)
assert(
getChunks("6-4 지방 선거").mkString("/")
=== "6-4/ /지방/ /선거"
)
assert(
getChunks("6.25 전쟁").mkString("/")
=== "6.25/ /전쟁"
)
assert(
getChunks("1998년 5월 28일").mkString("/")
=== "1998년/ /5월/ /28일"
)
assert(
getChunks("62:45의 결과").mkString("/")
=== "62:45/의/ /결과"
)
}
test("getChunkTokens should correctly find chunks with correct POS tags") {
assert(
chunk("한국어와 English와 1234와 pic.twitter.com " +
"http://news.kukinews.com/article/view.asp?" +
"page=1&gCode=soc&arcid=0008599913&code=41121111 " +
"[email protected] 갤럭시 S5").mkString("/")
===
"한국어와(Korean: 0, 4)/ (Space: 4, 1)/English(Alpha: 5, 7)/와(Korean: 12, 1)/" +
" (Space: 13, 1)/1234(Number: 14, 4)/와(Korean: 18, 1)/ (Space: 19, 1)/" +
"pic.twitter.com(URL: 20, 15)/ (Space: 35, 1)/http://news.kukinews.com/" +
"article/view.asp?page=1&gCode=soc&arcid=0008599913&code=41121111(URL: 36, 89)/" +
" (Space: 125, 1)/[email protected](Email: 126, 21)/ (Space: 147, 1)/" +
"갤럭시(Korean: 148, 3)/ (Space: 151, 1)/S(Alpha: 152, 1)/5(Number: 153, 1)"
)
assert(
chunk("우와!!! 완전ㅋㅋㅋㅋ").mkString("/")
=== "우와(Korean: 0, 2)/!!!(Punctuation: 2, 3)/ (Space: 5, 1)/완전(Korean: 6, 2)/" +
"ㅋㅋㅋㅋ(KoreanParticle: 8, 4)"
)
assert(
chunk("@nlpenguin @edeng #korean_tokenizer_rocks 우하하").mkString("/")
=== "@nlpenguin(ScreenName: 0, 10)/ (Space: 10, 1)/@edeng(ScreenName: 11, 6)/" +
" (Space: 17, 1)/#korean_tokenizer_rocks(Hashtag: 18, 23)/ (Space: 41, 1)/" +
"우하하(Korean: 42, 3)"
)
}
test("getChunkTokens should correctly detect Korean-specific punctuations.") {
assert(
chunk("중·고등학교에서…").mkString("/")
=== "중(Korean: 0, 1)/·(Punctuation: 1, 1)/고등학교에서(Korean: 2, 6)/…(Punctuation: 8, 1)"
)
}
}
|
tglstory/twitter-korean-text
|
src/test/scala/com/twitter/penguin/korean/tokenizer/KoreanChunkerTest.scala
|
Scala
|
apache-2.0
| 4,710 |
package me.apidoc.avro
import lib.Text
object Util {
def formatName(name: String): String = {
//Text.camelCaseToUnderscore(name).toLowerCase
name.trim
}
def toOption(value: String): Option[String] = {
if (value == null || value.trim.isEmpty) {
None
} else {
Some(value.trim)
}
}
}
|
Seanstoppable/apidoc
|
avro/src/main/scala/me/apidoc/avro/Util.scala
|
Scala
|
mit
| 326 |
package models
import java.util.UUID
import database.Cassandra
import com.datastax.driver.core.querybuilder.QueryBuilder
import constants.Table
object Target {
def getRow(target: String, input: String) = {
try {
val id = UUID.fromString(input)
val row = Cassandra.session.execute(
QueryBuilder.select().all().from(target)
.where(QueryBuilder.eq("id", id))
).one()
if(row != null) Some(row) else None
}
catch {
case e: Exception => None
}
}
}
|
lequangdzung/quora-clone
|
api-app/app/models/Target.scala
|
Scala
|
gpl-2.0
| 514 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
// GENERATED CODE: DO NOT EDIT. See scala.Function0 for timestamp.
package scala
/** A tuple of 17 elements; the canonical representation of a [[scala.Product17]].
*
* @constructor Create a new tuple with 17 elements. Note that it is more idiomatic to create a Tuple17 via `(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17)`
* @param _1 Element 1 of this Tuple17
* @param _2 Element 2 of this Tuple17
* @param _3 Element 3 of this Tuple17
* @param _4 Element 4 of this Tuple17
* @param _5 Element 5 of this Tuple17
* @param _6 Element 6 of this Tuple17
* @param _7 Element 7 of this Tuple17
* @param _8 Element 8 of this Tuple17
* @param _9 Element 9 of this Tuple17
* @param _10 Element 10 of this Tuple17
* @param _11 Element 11 of this Tuple17
* @param _12 Element 12 of this Tuple17
* @param _13 Element 13 of this Tuple17
* @param _14 Element 14 of this Tuple17
* @param _15 Element 15 of this Tuple17
* @param _16 Element 16 of this Tuple17
* @param _17 Element 17 of this Tuple17
*/
final case class Tuple17[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +T13, +T14, +T15, +T16, +T17](_1: T1, _2: T2, _3: T3, _4: T4, _5: T5, _6: T6, _7: T7, _8: T8, _9: T9, _10: T10, _11: T11, _12: T12, _13: T13, _14: T14, _15: T15, _16: T16, _17: T17)
extends Product17[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17]
{
override def toString(): String = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 +
"," + _9 + "," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + ")"
}
|
lrytz/scala
|
src/library/scala/Tuple17.scala
|
Scala
|
apache-2.0
| 2,012 |
package nars.language
import java.util.ArrayList
import nars.io.Symbols
import nars.storage.Memory
import Inheritance._
//remove if not needed
import scala.collection.JavaConversions._
import CompoundTerm._
import Statement._
object Inheritance {
/**
* Try to make a new compound from two components. Called by the inference rules.
* @param subject The first component
* @param predicate The second component
* @param memory Reference to the memory
* @return A compound generated or null
*/
def make(subject: Term, predicate: Term, memory: Memory): Inheritance = {
if (invalidStatement(subject, predicate)) {
return null
}
val name = makeStatementName(subject, Symbols.INHERITANCE_RELATION, predicate)
val t = memory.nameToListedTerm(name)
if (t != null) {
return t.asInstanceOf[Inheritance]
}
val argument = argumentsToList(subject, predicate)
new Inheritance(argument)
}
}
/**
* A Statement about an Inheritance relation.
*/
class Inheritance private (arg: ArrayList[Term]) extends Statement(arg) {
/**
* Constructor with full values, called by clone
* @param n The name of the term
* @param cs Component list
* @param open Open variable list
* @param i Syntactic complexity of the compound
*/
private def this(n: String,
cs: ArrayList[Term],
con: Boolean,
i: Short) {
// super(n, cs, con, i)
this(cs)
setName(n)
this.isConstant_ = con
this.complexity = i
}
/**
* Clone an object
* @return A new object, to be casted into a SetExt
*/
override def clone(): AnyRef = {
new Inheritance(name, cloneList(components).asInstanceOf[ArrayList[Term]], isConstant_, complexity)
}
/**
* Get the operator of the term.
* @return the operator of the term
*/
def operator(): String = Symbols.INHERITANCE_RELATION
}
|
automenta/opennars
|
nars_scala/src/main/scala/nars/language/Inheritance.scala
|
Scala
|
gpl-2.0
| 1,880 |
/*
* Copyright (c) 2016. <[email protected]>
*
* LeaderTest.scala is part of marc4scala.
*
* marc4scala is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* marc4scala is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with marc4scala; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package org.marc4scala
import org.scalatest.FlatSpec
class LeaderTest extends FlatSpec{
val leader = new Leader
behavior of "Leader"
"Leader" must "unmarshal correctly" in {
leader.unmarshal("00714cam a2200205 a 4500")
assert("00714cam a2200205 a 4500" == leader.toString)
info("unmarshal properly")
assert(714 == leader.recordLength)
info("record length is correct")
assert('c' == leader.recordStatus)
info("record status is correct")
}
it must "marshal correctly" is (pending)
}
|
jasonzou/marc4scala
|
src/test/scala/org/marc4scala/LeaderTest.scala
|
Scala
|
gpl-3.0
| 1,368 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.