code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.sksamuel.elastic4s.analyzers
import org.elasticsearch.common.xcontent.{XContentFactory, XContentBuilder}
import scala.collection.JavaConverters._
trait AnalyzerFilter {
def name: String
}
case class PredefinedTokenFilter(name: String) extends TokenFilter
case class PredefinedCharFilter(name: String) extends CharFilter
trait AnalyzerFilterDefinition {
def filterType: String
protected[elastic4s] def build(source: XContentBuilder): Unit
def json: XContentBuilder = {
val builder = XContentFactory.jsonBuilder
builder.startObject()
builder.field("type", filterType)
build(builder)
builder.endObject()
builder
}
}
trait CharFilter extends AnalyzerFilter
trait CharFilterDefinition extends CharFilter with AnalyzerFilterDefinition
case object HtmlStripCharFilter extends CharFilter {
val name = "html_strip"
}
case class MappingCharFilter(name: String, mappings: (String, String)*)
extends CharFilterDefinition {
val filterType = "mapping"
def build(source: XContentBuilder): Unit = {
source.field("mappings", mappings.map({ case (k, v) => s"$k=>$v" }).asJava)
}
}
case class PatternReplaceCharFilter(name: String, pattern: String, replacement: String)
extends CharFilterDefinition {
val filterType = "pattern_replace"
def build(source: XContentBuilder): Unit = {
source.field("pattern", pattern)
source.field("replacement", replacement)
}
}
| ulric260/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/analyzers/analyzerFilters.scala | Scala | apache-2.0 | 1,434 |
/* Title: Pure/GUI/color_value.scala
Author: Makarius
Cached color values.
*/
package isabelle
import java.awt.Color
import java.util.Locale
object Color_Value
{
private var cache = Map.empty[String, Color]
def parse(s: String): Color =
{
val i = java.lang.Long.parseLong(s, 16)
val r = ((i >> 24) & 0xFF).toInt
val g = ((i >> 16) & 0xFF).toInt
val b = ((i >> 8) & 0xFF).toInt
val a = (i & 0xFF).toInt
new Color(r, g, b, a)
}
def print(c: Color): String =
{
val r = new java.lang.Integer(c.getRed)
val g = new java.lang.Integer(c.getGreen)
val b = new java.lang.Integer(c.getBlue)
val a = new java.lang.Integer(c.getAlpha)
Word.uppercase(String.format(Locale.ROOT, "%02x%02x%02x%02x", r, g, b, a))
}
def apply(s: String): Color =
synchronized {
cache.get(s) match {
case Some(c) => c
case None =>
val c = parse(s)
cache += (s -> c)
c
}
}
}
| larsrh/libisabelle | modules/pide/2019-RC4/src/main/scala/GUI/color_value.scala | Scala | apache-2.0 | 989 |
/* * Copyright 2011 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package code.snippet
import net.liftweb.common.Logger
import net.liftweb.http.{S, SHtml}
import net.liftweb.http.js.JsCmds.SetValById
import code.comet.LogServer
object LogFilePost extends Logger {
def render = SHtml.onSubmit(s => {
SetValById("field1", "hello")
SetValById("field2", s)
SetValById("new_log_record", s)
})
}
| haroldl/kumiho | src/main/scala/code/snippet/LogFilePost.scala | Scala | apache-2.0 | 938 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*************************************************************************************
*/
package com.normation.ldap.sdk
import com.unboundid.ldap.sdk.{DN,Attribute}
import com.unboundid.ldap.sdk.schema.Schema
import com.normation.exceptions.TechnicalException
import scala.collection.JavaConversions.asSet
import com.normation.utils.HashcodeCaching
/**
* LDAP syntax
*
*/
sealed trait TypedAttribute {
val name:String
}
case class BooleanAttribute(name:String,value:List[Boolean]) extends TypedAttribute with HashcodeCaching
case class LongAttribute(name:String,values:List[Long]) extends TypedAttribute with HashcodeCaching
case class DNAttribute(name:String,values: List[DN]) extends TypedAttribute with HashcodeCaching
case class StringAttribute(name:String,values:List[String]) extends TypedAttribute with HashcodeCaching
case class BinaryAttribute(name:String,values: List[Array[Byte]]) extends TypedAttribute with HashcodeCaching
case class GeneralizedTimeAttribute(name:String,values:List[GeneralizedTime]) extends TypedAttribute with HashcodeCaching
object TypedAttribute {
private def toBoolean(s:String) : Boolean = s.toLowerCase match {
case "true" | "t" | "yes" | "y" | "on" | "1" => true
case "false" | "f" | "no" | "n" | "off" | "0" => false
case x => throw new TechnicalException("Can not interpret %s as a boolean value".format(x))
}
def apply(attribute:Attribute)(implicit schema:Schema) : TypedAttribute = {
schema.getAttributeType(attribute.getName) match {
case null =>
StringAttribute(attribute.getName,attribute.getValues.toList)
case attrDef => attrDef.getSyntaxOID(schema) match {
case null => StringAttribute(attribute.getName,attribute.getValues.toList)
case oid => schema.getAttributeSyntax(oid) match {
case null =>
StringAttribute(attribute.getName,attribute.getValues.toList)
case syntaxDef => syntaxDef.getOID match {
case "1.3.6.1.4.1.1466.115.121.1.7" => //boolean
BooleanAttribute(attribute.getName,attribute.getValues.map(v => toBoolean(v)).toList)
case "1.3.6.1.4.1.1466.115.121.1.41" => // Postal addr.
StringAttribute(attribute.getName,attribute.getValues.toList)
case "1.3.6.1.4.1.1466.115.121.1.12" |
"1.3.6.1.4.1.1466.115.121.1.34" => // name&optional UID
DNAttribute(attribute.getName,attribute.getValues.map(v => new DN(v)).toList)
case "1.3.6.1.4.1.1466.115.121.1.24" |
"1.3.6.1.4.1.1466.115.121.1.53" => // UTC time
GeneralizedTimeAttribute(attribute.getName,attribute.getValues.map(v => GeneralizedTime(v)).toList)
case "1.3.6.1.4.1.1466.115.121.1.27" => // Integer
LongAttribute(attribute.getName,attribute.getValues.map(v => v.toLong).toList)
case "1.3.6.1.4.1.1466.115.121.1.36" => // numeric
StringAttribute(attribute.getName,attribute.getValues.toList)
case "1.3.6.1.4.1.4203.1.1.2" | // auth password
"1.3.6.1.4.1.1466.115.121.1.5" | // binary
"1.3.6.1.4.1.1466.115.121.1.8" | // certificate
"1.3.6.1.4.1.1466.115.121.1.9" | // cert list
"1.3.6.1.4.1.1466.115.121.1.10" | // cert pair
"1.3.6.1.4.1.1466.115.121.1.28" | // JPEG
"1.3.6.1.4.1.1466.115.121.1.40" => // octet string
BinaryAttribute(attribute.getName,attribute.getRawValues.map(v => v.getValue).toList)
case "1.3.6.1.4.1.1466.115.121.1.50" => //telephone number
StringAttribute(attribute.getName,attribute.getValues.toList)
case _ => //other are mapped as string
StringAttribute(attribute.getName,attribute.getValues.toList)
}
}
}
}
}
}
| fanf/scala-ldap | src/main/scala/com/normation/ldap/sdk/TypedAttribute.scala | Scala | apache-2.0 | 4,621 |
package co.blocke.scalajack
package json.collections
import TestUtil._
import munit._
import munit.internal.console
import co.blocke.scalajack.json.JSON
import scala.collection.immutable._
class Tuples() extends FunSuite:
val sj = co.blocke.scalajack.ScalaJack()
test("null tuples work") {
describe("-----------------\\n: Tuple Tests :\\n-----------------", Console.BLUE)
val jsNull = "null".asInstanceOf[JSON]
assert(sj.read[(Int, Boolean)](jsNull) == null)
assert(sj.render[(Int,Boolean)](null) == jsNull)
}
test("missing start bracken") {
val js = """12,5""".asInstanceOf[JSON]
val msg =
"""Expected start of tuple here
|12,5
|^""".stripMargin
interceptMessage[co.blocke.scalajack.ScalaJackError](msg){
sj.read[(Int, Int)](js)
}
}
test("missing comma") {
val js = """[12""".asInstanceOf[JSON]
val msg =
"""Expected comma here
|[12
|---^""".stripMargin
interceptMessage[co.blocke.scalajack.ScalaJackError](msg){
sj.read[(Int, Int)](js)
}
}
test("no closing bracket") {
val js = """[12,5""".asInstanceOf[JSON]
val msg =
"""Expected end of tuple here
|[12,5
|-----^""".stripMargin
interceptMessage[co.blocke.scalajack.ScalaJackError](msg){
sj.read[(Int, Int)](js)
}
}
| gzoller/ScalaJack | core/src/test/scala/co.blocke.scalajack/json/collections/Tuples.scala | Scala | mit | 1,338 |
import java.io._
import scala.io.Source
object FileDemo {
def main(args: Array[String]){
val writer = new PrintWriter(new File("text.txt"))
writer.write("Hello Scala")
writer.close()
//println("Following is the content read:")
//Source.fromFile("test.txt").foreach{
// print
//}
}
}
| fbartnitzek/notes | 7_languages/Scala/FileDemo.scala | Scala | apache-2.0 | 302 |
/**
* COPYRIGHT (C) 2015 Alpine Data Labs Inc. All Rights Reserved.
*/
package com.alpine.plugin.core.io.defaults
import com.alpine.plugin.core.io.{LocalTable, OperatorInfo, Row}
/**
* AbstractLocalTable, for boilerplate implementation of LocalTable.
*/
abstract class AbstractLocalTable(val tableName: String, val rows: Seq[Row],
val sourceOperatorInfo: Option[OperatorInfo],
val addendum: Map[String, AnyRef])
extends LocalTable {
/**
* Get the number of columns in the table.
* @return The number of columns.
*/
def getNumCols: Int = {
if (this.rows.nonEmpty) {
rows.head.getNumCols
} else {
0
}
}
/**
* Get the number of rows in the table.
* @return The number of rows.
*/
def getNumRows: Int = this.rows.length
def displayName: String = tableName
}
/**
* Default implementation.
* Developers wanting to change behaviour can extend AbstractLocalTable.
*/
case class LocalTableDefault(override val tableName: String,
override val rows: Seq[Row],
override val sourceOperatorInfo: Option[OperatorInfo],
override val addendum: Map[String, AnyRef] = Map[String, AnyRef]())
extends AbstractLocalTable(tableName, rows, sourceOperatorInfo, addendum) | holdenk/PluginSDK | plugin-io-impl/src/main/scala/com/alpine/plugin/core/io/defaults/LocalTableDefault.scala | Scala | apache-2.0 | 1,373 |
package drt.client.components.styles
import scalacss.internal.mutable.StyleSheet
import ScalaCssImplicits.CssSettings._
case class ScenarioSimulationStyle(common: CommonStyle = DefaultCommonStyle) extends StyleSheet.Inline {
import dsl._
val container = style(
unsafeChild("label")(
fontSize(1.5.rem)
)
)
}
object DefaultScenarioSimulationStyle extends ScenarioSimulationStyle
| UKHomeOffice/drt-scalajs-spa-exploration | client/src/main/scala/drt/client/components/styles/ScenarioSimulationStyle.scala | Scala | apache-2.0 | 403 |
/*
* Copyright (c) 2014 Dufresne Management Consulting LLC.
*/
package com.nickelsoftware.bettercare4me.cassandra
import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.JavaConversions.asScalaSet
import scala.collection.JavaConversions.seqAsJavaList
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import org.joda.time.DateTime
import com.datastax.driver.core.BoundStatement
import com.datastax.driver.core.Cluster
import com.datastax.driver.core.Metadata
import com.datastax.driver.core.ResultSetFuture
import com.datastax.driver.core.exceptions.NoHostAvailableException
import com.nickelsoftware.bettercare4me.hedis.HEDISRule
import com.nickelsoftware.bettercare4me.hedis.HEDISRuleInfo
import com.nickelsoftware.bettercare4me.hedis.HEDISRules
import com.nickelsoftware.bettercare4me.hedis.HEDISScoreSummary
import com.nickelsoftware.bettercare4me.hedis.RuleScoreSummary
import com.nickelsoftware.bettercare4me.models.Claim
import com.nickelsoftware.bettercare4me.models.ClaimGeneratorConfig
import com.nickelsoftware.bettercare4me.models.ClaimParser
import com.nickelsoftware.bettercare4me.models.CriteriaResult
import com.nickelsoftware.bettercare4me.models.Patient
import com.nickelsoftware.bettercare4me.models.PatientParser
import com.nickelsoftware.bettercare4me.models.PatientScorecardResult
import com.nickelsoftware.bettercare4me.models.Provider
import com.nickelsoftware.bettercare4me.models.ProviderParser
import com.nickelsoftware.bettercare4me.utils.NickelException
import com.nickelsoftware.bettercare4me.utils.Properties
import com.nickelsoftware.bettercare4me.utils.Utils
import com.nickelsoftware.bettercare4me.utils.cassandra.resultset.toFuture
import play.api.Logger
import java.io.FileNotFoundException
/**
* Class managing a connection to Cassandra cluster and
* session to keyspace using configuration file
*
* Default config file name: "data/cassandra.yaml"
*/
class Cassandra {
private lazy val fname: String = Properties.cassandraConfig.path
lazy val config: Map[String,Object] = try {
val c = Utils.loadYamlConfig(fname)
Logger.info("Cassandra config read from: " + fname)
c
} catch {
case ex: FileNotFoundException =>
Logger.error("Cassandra.config: FileNotFoundException caught when trying to load "+fname)
Map()
}
lazy val node = {
val n = config.getOrElse("node", "127.0.0.1").asInstanceOf[String]
Logger.info("Cassandra Node IP: " + n)
n
}
lazy val cluster = {
val c = Cluster.builder().addContactPoint(node).build()
log(c.getMetadata)
c
}
lazy val session = {
val s = cluster.connect(config.getOrElse("keyspace", "bettercare4me").asInstanceOf[String])
Logger.info(s"Session connected to keyspace: ${s.getLoggedKeyspace()}")
s
}
private def log(metadata: Metadata): Unit = {
Logger.info(s"Connected to cluster: ${metadata.getClusterName} using $fname")
for (host <- metadata.getAllHosts()) {
Logger.info(s"Datatacenter: ${host.getDatacenter()}; Host: ${host.getAddress()}; Rack: ${host.getRack()}")
}
}
/**
* Close the underlying cassandra connection
*/
def close = {
session.close()
cluster.close()
}
}
/**
* Class to handle Bettercare4me data access,
* wrapper class around Cassandra connection class
*
* Local class that manage the data access.
*/
protected[cassandra] class Bc4me {
/**
* Should that be private?
*/
val cassandra = new Cassandra
/**
* Close the underlying cassandra connection
*/
def close = {
cassandra.close
}
// prepared statements
private val queryPatientsStmt = cassandra.session.prepare("SELECT data FROM patients WHERE batch_id = ?")
private val queryProvidersStmt = cassandra.session.prepare("SELECT data FROM providers WHERE batch_id = ?")
private val queryClaimsStmt = cassandra.session.prepare("SELECT data FROM claims_patients WHERE batch_id = ?")
private val insertPatientsStmt = cassandra.session.prepare("INSERT INTO patients (batch_id, id, data) VALUES (?, ?, ?)")
private val insertProvidersStmt = cassandra.session.prepare("INSERT INTO providers (batch_id, id, data) VALUES (?, ?, ?)")
private val insertClaims1Stmt = cassandra.session.prepare("INSERT INTO claims_patients (batch_id, id, patient_id, dos, data) VALUES (?, ?, ?, ?, ?)")
private val insertClaims2Stmt = cassandra.session.prepare("INSERT INTO claims_providers (batch_id, id, provider_id, dos, data) VALUES (?, ?, ?, ?, ?)")
// Summary tables
private val queryHEDISSummaryStmt = cassandra.session.prepare("SELECT name, hedis_date, patient_count, score_summaries, claim_generator_config FROM hedis_summary LIMIT 1000")
private val queryHEDISReportStmt = cassandra.session.prepare("SELECT patient_count, score_summaries, claim_generator_config FROM hedis_summary WHERE hedis_date = ?")
private val queryClaimGeneratorConfigStmt = cassandra.session.prepare("SELECT claim_generator_config FROM hedis_summary WHERE hedis_date = ?")
private val insertHEDISSummaryStmt = cassandra.session.prepare("INSERT INTO hedis_summary (name, hedis_date, patient_count, score_summaries, claim_generator_config) VALUES (?, ?, ?, ?, ?)")
private val insertRuleInformationStmt1 = cassandra.session.prepare("INSERT INTO rules_information (rule_name, hedis_date, full_name, description, patient_count, page_count, rule_score_summary) VALUES (?, ?, ?, ?, ?, 1, ?)")
private val insertRuleInformationStmt2 = cassandra.session.prepare("INSERT INTO rules_information (rule_name, hedis_date, page_count) VALUES (?, ?, ?)")
private val queryRuleInformationStmt = cassandra.session.prepare("SELECT rule_name, hedis_date, full_name, description, patient_count, page_count, rule_score_summary FROM rules_information WHERE rule_name = ? AND hedis_date = ?")
private val queryRuleScorecardStmt = cassandra.session.prepare("SELECT batch_id, patient_data, is_excluded, is_meet_criteria FROM rule_scorecards WHERE rule_name = ? AND hedis_date = ?")
private val insertRuleScorecardStmt = cassandra.session.prepare("INSERT INTO rule_scorecards (rule_name, hedis_date, batch_id, patient_name, patient_id, patient_data, is_excluded, is_meet_criteria) VALUES (?, ?, ?, ?, ?, ?, ?, ?)")
private val queryRuleScorecardPaginatedStmt = cassandra.session.prepare("SELECT batch_id, patient_data, is_excluded, is_meet_criteria FROM rule_scorecards_paginated WHERE rule_name = ? AND hedis_date = ? AND page_id = ?")
private val insertRuleScorecardPaginatedStmt = cassandra.session.prepare("INSERT INTO rule_scorecards_paginated (rule_name, hedis_date, batch_id, page_id, patient_name, patient_id, patient_data, is_excluded, is_meet_criteria) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)")
private val queryPatientScorecardResultStmt = cassandra.session.prepare("SELECT patient_data, rule_name, rule_full_name, is_eligible, eligible_score, is_excluded, excluded_score, is_meet_criteria, meet_criteria_score FROM patient_scorecards WHERE batch_id = ? AND patient_id = ? AND hedis_date = ?")
private val insertPatientScorecardResultStmt = cassandra.session.prepare("INSERT INTO patient_scorecards (batch_id, hedis_date, patient_id, patient_data, rule_name, rule_full_name, is_eligible, eligible_score, is_excluded, excluded_score, is_meet_criteria, meet_criteria_score) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
/**
* execute provided query, can be used for testing to initialize database
*/
def execute(s: String) = {
cassandra.session.execute(s)
}
/**
* Get all patients by batch_id
*/
def queryPatients(batchId: Int): Future[Iterable[Patient]] = {
val future: ResultSetFuture = cassandra.session.executeAsync(new BoundStatement(queryPatientsStmt).bind(batchId: java.lang.Integer))
// use the implicit conversion of ResultSetFuture into Future[ResultSet] using the import:
// import com.nickelsoftware.bettercare4me.utils.cassandra.resultset.toFuture above
// the convert the ResultSet into List[Row] using ResultSet.all()
future.map(_.all().map(row => PatientParser.fromList(row.getList("data", classOf[String]).toList)))
}
/**
* Get all providers by batch_id
*/
def queryProviders(batchId: Int): Future[Iterable[Provider]] = {
val future: ResultSetFuture = cassandra.session.executeAsync(new BoundStatement(queryProvidersStmt).bind(batchId: java.lang.Integer))
future.map(_.all().map(row => ProviderParser.fromList(row.getList("data", classOf[String]).toList)))
}
/**
* Get all claims by batch_id
*/
def queryClaims(batchId: Int): Future[Iterable[Claim]] = {
val future: ResultSetFuture = cassandra.session.executeAsync(new BoundStatement(queryClaimsStmt).bind(batchId: java.lang.Integer))
future.map(_.all().map(row => ClaimParser.fromList(row.getList("data", classOf[String]).toList)))
}
/**
* Batch insert into patients table
* Turns out it's better to loop on each items than to batch them
*
* INSERT INTO patients (batch_id, id, data) VALUES (?, ?, ?)
*/
def batchPatients(batchId: Int, patients: List[Patient]): Future[List[Unit.type]] = {
val f = patients map { p => cassandra.session.executeAsync(insertPatientsStmt.bind(batchId: java.lang.Integer, p.patientID, p.toList: java.util.List[String])).map(rs => Unit) }
Future.sequence(f)
}
/**
* Batch insert into providers table
*
* INSERT INTO providers (batch_id, id, data) VALUES (?, ?, ?)
*/
def batchProviders(batchId: Int, providers: List[Provider]): Future[List[Unit.type]] = {
val f = providers map { p => cassandra.session.executeAsync(insertProvidersStmt.bind(batchId: java.lang.Integer, p.providerID, p.toList: java.util.List[String])).map(rs => Unit) }
Future.sequence(f)
}
/**
* Batch insert into claims by patient table
*
* INSERT INTO claims_patients (batch_id, id, patient_id, dos, data) VALUES (?, ?, ?, ?, ?)
*/
def batchClaimsByPatients(batchId: Int, claims: List[Claim]): Future[List[Unit.type]] = {
val f = claims map { c => cassandra.session.executeAsync(insertClaims1Stmt.bind(batchId: java.lang.Integer, c.claimID, c.patientID, c.date.toDate(), c.toList: java.util.List[String])).map(rs => Unit) }
Future.sequence(f)
}
/**
* Batch insert into claims by provider table
*
* INSERT INTO claims_providers (batch_id, id, provider_id, dos, data) VALUES (?, ?, ?, ?, ?)
*/
def batchClaimsByProviders(batchId: Int, claims: List[Claim]): Future[List[Unit.type]] = {
val f = claims map { c => cassandra.session.executeAsync(insertClaims2Stmt.bind(batchId: java.lang.Integer, c.claimID, c.providerID, c.date.toDate(), c.toList: java.util.List[String])).map(rs => Unit) }
Future.sequence(f)
}
/**
* Query all HEDIS report summary
*/
def queryHEDISSummary: Future[Iterable[(HEDISScoreSummary, String)]] = {
val future: ResultSetFuture = cassandra.session.executeAsync(new BoundStatement(queryHEDISSummaryStmt).bind())
future.map { rs =>
rs.all() map { row =>
val configTxt = row.getString("claim_generator_config")
val config = ClaimGeneratorConfig.loadConfig(configTxt)
val rules: List[HEDISRule] = config.rulesConfig.map { c => HEDISRules.createRuleByName(c.name, c, config.hedisDate) }.toList
(HEDISScoreSummary(rules, row.getLong("patient_count"): Long, row.getList("score_summaries", classOf[String]).toList), configTxt)
}
}
}
/**
* Query a specific HEDIS report
*/
def queryHEDISReport(hedisDate: DateTime): Future[(HEDISScoreSummary, String)] = {
val bs = new BoundStatement(queryHEDISReportStmt).bind(hedisDate.toDate)
val future: ResultSetFuture = cassandra.session.executeAsync(bs)
future.map { rs =>
if (rs.isExhausted()) throw NickelException("Bettercare4me.queryHEDISReport: No report found with date " + hedisDate.toString())
val row = rs.one()
val configTxt = row.getString("claim_generator_config")
val config = ClaimGeneratorConfig.loadConfig(configTxt)
val rules: List[HEDISRule] = config.rulesConfig.map { c => HEDISRules.createRuleByName(c.name, c, config.hedisDate) }.toList
(HEDISScoreSummary(rules, row.getLong("patient_count"): Long, row.getList("score_summaries", classOf[String]).toList), configTxt)
}
}
/**
* Query the Claim Generator Configuration for a specific HEDIS date (run)
*/
def queryClaimGeneratorConfig(hedisDate: DateTime): Future[(ClaimGeneratorConfig, String)] = {
val future: ResultSetFuture = cassandra.session.executeAsync(new BoundStatement(queryClaimGeneratorConfigStmt).bind(hedisDate.toDate))
future.map { rs =>
val row = rs.one()
val configTxt = row.getString("claim_generator_config")
val config = ClaimGeneratorConfig.loadConfig(configTxt)
(config, configTxt)
}
}
/**
* HEDIS report summary
*/
def insertHEDISSummary(name: String, hedisDate: DateTime, patientCount: Long, scoreSummaries: List[String], claimGeneratorConfig: String): Future[Unit.type] = {
cassandra.session.executeAsync(insertHEDISSummaryStmt.bind(name, hedisDate.toDate(), patientCount: java.lang.Long, scoreSummaries: java.util.List[String], claimGeneratorConfig)).map(rs => Unit)
}
/**
* Insert the rule information based on RuleScoreSummary into rules_information table
*/
def insertRuleInformation(hedisDate: DateTime, patientCount: Long, ruleScoreSummary: RuleScoreSummary): Future[Unit.type] = {
val ri = ruleScoreSummary.ruleInfo
cassandra.session.executeAsync(insertRuleInformationStmt1.bind(ri.name, hedisDate.toDate(), ri.fullName, ri.description, patientCount: java.lang.Long, ruleScoreSummary.toParseString)).map(rs => Unit)
}
/**
* Upsert the number of page count in rule information table
*/
def insertRuleInformation(ruleName: String, hedisDate: DateTime, pageCount: Long): Future[Unit.type] = {
cassandra.session.executeAsync(insertRuleInformationStmt2.bind(ruleName, hedisDate.toDate(), pageCount: java.lang.Long)).map(rs => Unit)
}
/**
* Return the rule information and stats for a hedis measure (RuleScoreSummary)
*/
def queryRuleInformation(ruleName: String, hedisDate: DateTime): Future[(Long, Long, RuleScoreSummary)] = {
val future: ResultSetFuture = cassandra.session.executeAsync(new BoundStatement(queryRuleInformationStmt).bind(ruleName, hedisDate.toDate))
future.map { rs =>
val row = rs.one()
val patientCount = row.getLong("patient_count")
val pageCount = row.getLong("page_count")
val ruleScoreSummary = RuleScoreSummary(HEDISRuleInfo(ruleName, row.getString("full_name"), row.getString("description")), row.getString("rule_score_summary"))
(patientCount, pageCount, ruleScoreSummary)
}
}
/**
* Return the list of patients for a hedis measure (rule_scorecard table)
*/
def queryRuleScorecard(ruleName: String, hedisDate: DateTime): Future[Iterable[(Int, Patient, Boolean, Boolean)]] = {
val future = cassandra.session.executeAsync(new BoundStatement(queryRuleScorecardStmt).bind(ruleName, hedisDate.toDate))
future.map { rs =>
rs.all() map { row =>
val patient = PatientParser.fromList(row.getList("patient_data", classOf[String]).toList)
(row.getInt("batch_id"): Int, patient, row.getBool("is_excluded"): Boolean, row.getBool("is_meet_criteria"): Boolean)
}
}
}
/**
* Insert a rule summary for patient (rule_scorecard table)
*/
def insertRuleScorecards(ruleName: String, hedisDate: DateTime, batchID: Int, patient: Patient, isExcluded: Boolean, isMeetCriteria: Boolean): Future[Unit.type] = {
cassandra.session.executeAsync(insertRuleScorecardStmt.bind(ruleName, hedisDate.toDate(), batchID: java.lang.Integer, patient.lastName + ", " + patient.firstName, patient.patientID, patient.toList: java.util.List[String], isExcluded: java.lang.Boolean, isMeetCriteria: java.lang.Boolean)).map(rs => Unit)
}
/**
* Return the paginated list of patients for a hedis measure (rule_scorecard_paginated table)
*/
def queryRuleScorecardPaginated(ruleName: String, hedisDate: DateTime, pageID: Long): Future[Iterable[(Int, Patient, Boolean, Boolean)]] = {
val future = cassandra.session.executeAsync(new BoundStatement(queryRuleScorecardPaginatedStmt).bind(ruleName, hedisDate.toDate, pageID: java.lang.Long))
future.map { rs =>
rs.all() map { row =>
val patient = PatientParser.fromList(row.getList("patient_data", classOf[String]).toList)
(row.getInt("batch_id"): Int, patient, row.getBool("is_excluded"): Boolean, row.getBool("is_meet_criteria"): Boolean)
}
}
}
/**
* Return the paginated list of patients for a hedis measure (rule_scorecard_paginated table)
*
* Read pageCnt starting at pageID
* @param ruleName name of rule to query
* @param hedisDate end date of reporting period
* @param pageID starting page_id to read from
* @pageCnt the number of page to read
*/
def queryRuleScorecardPaginated(ruleName: String, hedisDate: DateTime, pageID: Long, pageCnt: Int): Future[Iterable[(Int, Patient, Boolean, Boolean)]] = {
if (pageCnt < 1) throw NickelException("queryRuleScorecardPaginated: pageCnt must be >= 1, we got: " + pageCnt)
val listOfFuture = List(pageID to pageID + pageCnt - 1) map { p => queryRuleScorecardPaginated(ruleName, hedisDate, pageID) }
val futureList = Future.sequence(listOfFuture)
// concatenate all results into a single list
futureList map (_.flatten)
}
/**
* Insert a rule summary for patient (rule_scorecard_paginated table)
*/
def insertRuleScorecardsPaginated(ruleName: String, hedisDate: DateTime, batchID: Int, pageID: Long, patient: Patient, isExcluded: Boolean, isMeetCriteria: Boolean): Future[Unit.type] = {
cassandra.session.executeAsync(insertRuleScorecardPaginatedStmt.bind(ruleName, hedisDate.toDate(), batchID: java.lang.Integer, pageID: java.lang.Long, patient.lastName + ", " + patient.firstName, patient.patientID, patient.toList: java.util.List[String], isExcluded: java.lang.Boolean, isMeetCriteria: java.lang.Boolean)).map(rs => Unit)
}
/**
* Saving PatientScorecardResult, populating patient_scorecard table
*/
def insertPatientScorecardResult(batchID: Int, hedisDate: DateTime, patientScorecardResult: PatientScorecardResult): Future[Iterable[Unit.type]] = {
def toList(cr: CriteriaResult): List[String] = cr.criteriaResultReasons map { _.toCSVString }
val p = patientScorecardResult.patient
val f = patientScorecardResult.scorecardResult map {
case (ruleName, rr) =>
val el = rr.eligibleResult
val ex = rr.excludedResult
val mm = rr.meetMeasureResult
cassandra.session.executeAsync(insertPatientScorecardResultStmt.bind(
batchID: java.lang.Integer, hedisDate.toDate(), p.patientID, p.toList: java.util.List[String], ruleName, rr.ruleFullName,
el.isCriteriaMet: java.lang.Boolean, toList(el): java.util.List[String],
ex.isCriteriaMet: java.lang.Boolean, toList(ex): java.util.List[String],
mm.isCriteriaMet: java.lang.Boolean, toList(mm): java.util.List[String])).map(rs => Unit)
}
Future.sequence(f)
}
/**
* Read the patient scorecards, aka patient profile
*/
def queryPatientScorecardResult(batchID: Int, patientID: String, hedisDate: DateTime): Future[PatientScorecardResult] = {
val future = cassandra.session.executeAsync(new BoundStatement(queryPatientScorecardResultStmt).bind(batchID: java.lang.Integer, patientID, hedisDate.toDate))
future.map { rs =>
val rows = rs.all().toList
val patient = PatientParser.fromList(rows.head.getList("patient_data", classOf[String]).toList)
rows.foldLeft(PatientScorecardResult(patient)) { (ps, row) =>
val ruleName = row.getString("rule_name")
val ruleFullName = row.getString("rule_full_name")
ps.addRuleResult(ruleName, ruleFullName, HEDISRule.eligible, row.getBool("is_eligible"), row.getList("eligible_score", classOf[String]).toList).
addRuleResult(ruleName, ruleFullName, HEDISRule.excluded, row.getBool("is_excluded"), row.getList("excluded_score", classOf[String]).toList).
addRuleResult(ruleName, ruleFullName, HEDISRule.meetMeasure, row.getBool("is_meet_criteria"), row.getList("meet_criteria_score", classOf[String]).toList)
}
}
}
}
/**
* Object to maintain single connection to Cassandra for the current application
*/
object Bettercare4me {
private var bc4me: Option[Bc4me] = None
/**
* Connect to Cassandra cluster and open session to keyspace
* based on config file
*
* This is called *only* by Global.onStart at application start or
* in spark worker thread at top of the job.
*
* Therefore the fact that it is no thread safe should not be an issue.
*
* Default config file name: "data/cassandra.yaml"
*/
def connect: Unit = {
// Open a connection only if does not have one already
bc4me match {
case None =>
bc4me = try {
Some(new Bc4me)
} catch {
case ex: NoHostAvailableException => {
Logger.error("Bettercare4me.connect: NoHostAvailableException caught! -- No Cassandra database available.")
None
}
}
case _ => Unit
}
}
/**
* Closing the connection with Cassandra cluster
*
* This is called *only* by Global.onStop at application shutdown.
* Therefore the fact that it is no thread safe should not be an issue.
*/
def close = {
bc4me match {
case Some(c) => c.close
case _ => Logger.warn("Bettercare4me: NOTHING TO CLOSE HERE!!!")
}
bc4me = None
}
/**
* Get all patients by batch_id
*/
def queryPatients(batchId: Int): Future[Iterable[Patient]] = {
bc4me match {
case Some(c) => c.queryPatients(batchId)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Get all providers by batch_id
*/
def queryProviders(batchId: Int): Future[Iterable[Provider]] = {
bc4me match {
case Some(c) => c.queryProviders(batchId)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Get all claims by batch_id
*/
def queryClaims(batchId: Int): Future[Iterable[Claim]] = {
bc4me match {
case Some(c) => c.queryClaims(batchId)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Batch insert into patients table
*/
def batchPatients(batchId: Int, patients: List[Patient]): Future[List[Unit.type]] = {
bc4me match {
case Some(c) => c.batchPatients(batchId, patients)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Batch insert into providers table
*/
def batchProviders(batchId: Int, providers: List[Provider]): Future[List[Unit.type]] = {
bc4me match {
case Some(c) => c.batchProviders(batchId, providers)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Batch insert into claims_patients table
*/
def batchClaimsByPatients(batchId: Int, claims: List[Claim]): Future[List[Unit.type]] = {
bc4me match {
case Some(c) => c.batchClaimsByPatients(batchId, claims)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Batch insert into claims_providers table
*/
def batchClaimsByProviders(batchId: Int, claims: List[Claim]): Future[List[Unit.type]] = {
bc4me match {
case Some(c) => c.batchClaimsByProviders(batchId, claims)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Query all HEDIS report summary
*/
def queryHEDISSummary: Future[Iterable[(HEDISScoreSummary, String)]] = {
bc4me match {
case Some(c) => c.queryHEDISSummary
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Query a specific HEDIS report
*/
def queryHEDISReport(hedisDate: DateTime): Future[(HEDISScoreSummary, String)] = {
bc4me match {
case Some(c) => c.queryHEDISReport(hedisDate)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Query the Claim Generator Configuration for a specific HEDIS date (run)
*/
def queryClaimGeneratorConfig(hedisDate: DateTime): Future[(ClaimGeneratorConfig, String)] = {
bc4me match {
case Some(c) => c.queryClaimGeneratorConfig(hedisDate)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* HEDIS report summary
*/
def insertHEDISSummary(name: String, hedisDate: DateTime, patientCount: Long, scoreSummaries: List[String], claimGeneratorConfig: String): Future[Unit.type] = {
bc4me match {
case Some(c) => c.insertHEDISSummary(name, hedisDate, patientCount, scoreSummaries, claimGeneratorConfig)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Insert the rule information based on RuleScoreSummary into rules_information table
*/
def insertRuleInformation(hedisDate: DateTime, patientCount: Long, ruleScoreSummary: RuleScoreSummary): Future[Unit.type] = {
bc4me match {
case Some(c) => c.insertRuleInformation(hedisDate, patientCount, ruleScoreSummary)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Upsert the number of page count in rule information table
*/
def insertRuleInformation(ruleName: String, hedisDate: DateTime, pageCount: Long): Future[Unit.type] = {
bc4me match {
case Some(c) => c.insertRuleInformation(ruleName, hedisDate, pageCount)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Return the rule information and stats for a hedis measure (RuleScoreSummary)
*/
def queryRuleInformation(ruleName: String, hedisDate: DateTime): Future[(Long, Long, RuleScoreSummary)] = {
bc4me match {
case Some(c) => c.queryRuleInformation(ruleName, hedisDate)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Return the list of patients for a hedis measure
*/
def queryRuleScorecard(ruleName: String, hedisDate: DateTime): Future[Iterable[(Int, Patient, Boolean, Boolean)]] = {
bc4me match {
case Some(c) => c.queryRuleScorecard(ruleName, hedisDate)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Insert a rule summary for a patient
*/
def insertRuleScorecards(ruleName: String, hedisDate: DateTime, batchID: Int, patient: Patient, isExcluded: Boolean, isMeetCriteria: Boolean): Future[Unit.type] = {
bc4me match {
case Some(c) => c.insertRuleScorecards(ruleName, hedisDate, batchID, patient, isExcluded, isMeetCriteria)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Return the paginated list of patients for a hedis measure (rule_scorecards_paginated table)
*/
def queryRuleScorecardPaginated(ruleName: String, hedisDate: DateTime, pageID: Long): Future[Iterable[(Int, Patient, Boolean, Boolean)]] = {
bc4me match {
case Some(c) => c.queryRuleScorecardPaginated(ruleName, hedisDate, pageID)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Return the paginated list of patients for a hedis measure (rule_scorecard_paginated table)
*
* Read pageCnt starting at pageID
* @param ruleName name of rule to query
* @param hedisDate end date of reporting period
* @param pageID starting page_id to read from
* @pageCnt the number of page to read
*/
def queryRuleScorecardPaginated(ruleName: String, hedisDate: DateTime, pageID: Long, pageCnt: Int): Future[Iterable[(Int, Patient, Boolean, Boolean)]] = {
bc4me match {
case Some(c) => c.queryRuleScorecardPaginated(ruleName, hedisDate, pageID, pageCnt)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Insert a rule summary for a patient in paginated table (rule_scorecards_paginated table)
*/
def insertRuleScorecardsPaginated(ruleName: String, hedisDate: DateTime, batchID: Int, pageID: Long, patient: Patient, isExcluded: Boolean, isMeetCriteria: Boolean): Future[Unit.type] = {
bc4me match {
case Some(c) => c.insertRuleScorecardsPaginated(ruleName, hedisDate, batchID, pageID, patient, isExcluded, isMeetCriteria)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Saving PatientScorecardResult, populating patient_scorecard table
*/
def insertPatientScorecardResult(batchID: Int, hedisDate: DateTime, patientScorecardResult: PatientScorecardResult): Future[Iterable[Unit.type]] = {
bc4me match {
case Some(c) => c.insertPatientScorecardResult(batchID, hedisDate, patientScorecardResult)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
/**
* Read the patient scorecards, aka patient profile
*/
def queryPatientScorecardResult(batchID: Int, patientID: String, hedisDate: DateTime): Future[PatientScorecardResult] = {
bc4me match {
case Some(c) => c.queryPatientScorecardResult(batchID, patientID, hedisDate)
case _ => throw NickelException("Bettercare4me: Connection to Cassandra not opened, must call Bettercare4me.connect once before use")
}
}
} | reactivecore01/bettercare4.me | play/app/com/nickelsoftware/bettercare4me/cassandra/Bettercare4me.scala | Scala | apache-2.0 | 30,853 |
package junctions
import Chisel._
import cde.{Parameters, Field}
class PociIO(implicit p: Parameters) extends HastiBundle()(p)
{
val paddr = UInt(OUTPUT, hastiAddrBits)
val pwrite = Bool(OUTPUT)
val psel = Bool(OUTPUT)
val penable = Bool(OUTPUT)
val pwdata = UInt(OUTPUT, hastiDataBits)
val prdata = UInt(INPUT, hastiDataBits)
val pready = Bool(INPUT)
val pslverr = Bool(INPUT)
}
class HastiToPociBridge(implicit p: Parameters) extends HastiModule()(p) {
val io = new Bundle {
val in = new HastiSlaveIO
val out = new PociIO
}
val s_idle :: s_setup :: s_access :: Nil = Enum(UInt(), 3)
val state = Reg(init = s_idle)
val transfer = io.in.hsel & io.in.htrans(1)
switch (state) {
is (s_idle) {
when (transfer) { state := s_setup }
}
is (s_setup) {
state := s_access
}
is (s_access) {
when (io.out.pready & ~transfer) { state := s_idle }
when (io.out.pready & transfer) { state := s_setup }
when (~io.out.pready) { state := s_access }
}
}
val haddr_reg = Reg(UInt(width = hastiAddrBits))
val hwrite_reg = Reg(UInt(width = 1))
when (transfer) {
haddr_reg := io.in.haddr
hwrite_reg := io.in.hwrite
}
io.out.paddr := haddr_reg
io.out.pwrite := hwrite_reg(0)
io.out.psel := (state =/= s_idle)
io.out.penable := (state === s_access)
io.out.pwdata := io.in.hwdata
io.in.hrdata := io.out.prdata
io.in.hready := ((state === s_access) & io.out.pready) | (state === s_idle)
io.in.hresp := io.out.pslverr
}
class PociBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p)
{
val io = new Bundle {
val master = new PociIO().flip
val slaves = Vec(amap.size, new PociIO)
}
val psels = PriorityEncoderOH(
(io.slaves zip amap) map { case (s, afn) => {
s.paddr := io.master.paddr
s.pwrite := io.master.pwrite
s.pwdata := io.master.pwdata
afn(io.master.paddr) && io.master.psel
}})
(io.slaves zip psels) foreach { case (s, psel) => {
s.psel := psel
s.penable := io.master.penable && psel
} }
io.master.prdata := Mux1H(psels, io.slaves.map(_.prdata))
io.master.pready := Mux1H(psels, io.slaves.map(_.pready))
io.master.pslverr := Mux1H(psels, io.slaves.map(_.pslverr))
}
| masc-ucsc/cmpe220fall16 | riscv_cores/zscale_modified/junctions/src/main/scala/poci.scala | Scala | apache-2.0 | 2,281 |
/*
* Copyright (c) 2012-14 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Test
import org.junit.Assert._
import lens._, nat._, test._
package lensTestDataTypes {
sealed trait Sum1
case class Prod1a(s2: Sum2, i: Int) extends Sum1
case class Prod1b(s2: Sum2, s: String) extends Sum1
sealed trait Sum2
case class Prod2a(i: Int) extends Sum2
case class Prod2b(s: String) extends Sum2
sealed trait Tree[T]
case class Node[T](left: Tree[T], right: Tree[T]) extends Tree[T]
case class Leaf[T](value: T) extends Tree[T]
case class Foo(i: Int, s: String)
case class Bar(i: Int, b: Boolean)
case class Address(street : String, city : String, postcode : String)
case class Person(name : String, age : Int, address : Address)
}
import lensTestDataTypes._
trait LensTests {
val address = Address("Southover Street", "Brighton", "BN2 9UA")
val person = Person("Joe Grey", 37, address)
val nameLens: Lens[Person, String]
val ageLens: Lens[Person, Int]
val addressLens: Lens[Person, Address]
val streetLens: Lens[Person, String]
val cityLens: Lens[Person, String]
val postcodeLens: Lens[Person, String]
@Test
def testBasics {
val age1 = ageLens.get(person)
typed[Int](age1)
assertEquals(37, age1)
val person2 = ageLens.set(person)(38)
assertEquals(38, person2.age)
val street1 = streetLens.get(person)
typed[String](street1)
assertEquals("Southover Street", street1)
val person3 = streetLens.set(person)("Montpelier Road")
assertEquals("Montpelier Road", person3.address.street)
}
@Test
def testCompose {
val addressLens = lens[Person] >> 2
val streetLens = lens[Address] >> 0
val personStreetLens1 = streetLens compose addressLens
val personStreetLens2 = compose(streetLens, addressLens)
val personStreetLens3 = (streetLens :: addressLens :: HNil).reduceLeft(compose)
val street1 = personStreetLens1.get(person)
typed[String](street1)
assertEquals("Southover Street", street1)
val street2 = personStreetLens2.get(person)
typed[String](street2)
assertEquals("Southover Street", street2)
val street3 = personStreetLens3.get(person)
typed[String](street3)
assertEquals("Southover Street", street3)
}
@Test
def testTuples {
type ISDB = (Int, (String, (Double, Boolean)))
val tp = (23, ("foo", (2.0, false)))
val lens0 = lens[ISDB] >> 0
val lens1 = lens[ISDB] >> 1
val lens10 = lens[ISDB] >> 1 >> 0
val lens11 = lens[ISDB] >> 1 >> 1
val lens110 = lens[ISDB] >> 1 >> 1 >> 0
val lens111 = lens[ISDB] >> 1 >> 1 >> 1
val i = lens0.get(tp)
typed[Int](i)
assertEquals(23, i)
val tpi = lens0.set(tp)(13)
typed[ISDB](tpi)
assertEquals((13, ("foo", (2.0, false))), tpi)
val sdb = lens1.get(tp)
typed[(String, (Double, Boolean))](sdb)
assertEquals(("foo", (2.0, false)), sdb)
val tpsdb = lens1.set(tp)("bar", (3.0, true))
typed[ISDB](tpsdb)
assertEquals((23, ("bar", (3.0, true))), tpsdb)
val s = lens10.get(tp)
typed[String](s)
assertEquals("foo", s)
val tps = lens10.set(tp)("bar")
typed[ISDB](tps)
assertEquals((23, ("bar", (2.0, false))), tps)
val db = lens11.get(tp)
typed[(Double, Boolean)](db)
assertEquals((2.0, false), db)
val tpdb = lens11.set(tp)(3.0, true)
typed[ISDB](tpdb)
assertEquals((23, ("foo", (3.0, true))), tpdb)
val d = lens110.get(tp)
typed[Double](d)
(2.0, d, Double.MinPositiveValue)
val tpd = lens110.set(tp)(3.0)
typed[ISDB](tpd)
assertEquals((23, ("foo", (3.0, false))), tpd)
val b = lens111.get(tp)
typed[Boolean](b)
assertEquals(false, b)
val tpb = lens111.set(tp)(true)
typed[ISDB](tpb)
assertEquals((23, ("foo", (2.0, true))), tpb)
}
@Test
def testHLists {
type ISB = Int :: String :: Boolean :: HNil
val l = 23 :: "foo" :: true :: HNil
val lens0 = hlistNthLens[ISB, _0]
val lensI = hlistSelectLens[ISB, Int]
val lens1 = hlistNthLens[ISB, _1]
val lensS = hlistSelectLens[ISB, String]
val lens2 = hlistNthLens[ISB, _2]
val lensB = hlistSelectLens[ISB, Boolean]
val i = lens0.get(l)
typed[Int](i)
assertEquals(23, i)
assertEquals(23, lensI.get(l))
val li = lens0.set(l)(13)
typed[ISB](li)
assertEquals(13 :: "foo" :: true :: HNil, li)
assertEquals(13 :: "foo" :: true :: HNil, lensI.set(l)(13))
val s = lens1.get(l)
typed[String](s)
assertEquals("foo", s)
assertEquals("foo", lensS.get(l))
val ls = lens1.set(l)("bar")
typed[ISB](ls)
assertEquals(23 :: "bar" :: true :: HNil, ls)
assertEquals(23 :: "bar" :: true :: HNil, lensS.set(l)("bar"))
val b = lens2.get(l)
typed[Boolean](b)
assertEquals(true, b)
assertEquals(true, lensB.get(l))
val lb = lens2.set(l)(false)
typed[ISB](lb)
assertEquals(23 :: "foo" :: false :: HNil, lb)
assertEquals(23 :: "foo" :: false :: HNil, lensB.set(l)(false))
}
@Test
def testRecords {
import record.FieldType, syntax.singleton._
val (fooT, barT) = (Witness("foo"), Witness("bar"))
type LT = (fooT.T FieldType Int) :: (barT.T FieldType String) :: HNil
val l = ("foo" ->> 42) :: ("bar" ->> "hi") :: HNil
typed[LT](l)
val li = recordLens[LT]("foo")
assertEquals(42, li.get(l))
assertEquals(("foo" ->> 84) :: ("bar" ->> "hi") :: HNil, li.set(l)(84))
val ls = recordLens[LT]("bar")
assertEquals("hi", ls.get(l))
assertEquals(("foo" ->> 42) :: ("bar" ->> "bye") :: HNil, ls.set(l)("bye"))
}
@Test
def testSets {
val s = Set("foo", "bar", "baz")
val lens = setLens[String]("bar")
val b1 = lens.get(s)
assert(b1)
val s2 = lens.set(s)(false)
assertEquals(Set("foo", "baz"), s2)
val b2 = lens.get(s2)
assert(!b2)
val s3 = lens.set(s2)(true)
assertEquals(s, s3)
}
@Test
def testMaps {
val m = Map(23 -> "foo", 13 -> "bar", 11 -> "baz")
val lens = mapLens[Int, String](13)
val s1 = lens.get(m)
assertEquals(Option("bar"), s1)
val m2 = lens.set(m)(Option("wibble"))
assertEquals(Map(23 -> "foo", 13 -> "wibble", 11 -> "baz"), m2)
val s2 = lens.get(m2)
assertEquals(Option("wibble"), s2)
val m3 = lens.set(m)(None)
assertEquals(Map(23 -> "foo", 11 -> "baz"), m3)
val s3 = lens.get(m3)
assertEquals(None, s3)
val m4 = lens.set(m3)(Option("bar"))
assertEquals(m, m4)
val s4 = lens.get(m4)
assertEquals(Option("bar"), s4)
}
@Test
def testProducts {
val nameAgeCityLens = nameLens ~ ageLens ~ cityLens
val nac1 = nameAgeCityLens.get(person)
typed[(String, Int, String)](nac1)
assertEquals(("Joe Grey", 37, "Brighton"), nac1)
val person2 = nameAgeCityLens.set(person)("Joe Soap", 27, "London")
assertEquals(Person("Joe Soap", 27, Address("Southover Street", "London", "BN2 9UA")), person2)
}
}
class LensTestsNat extends LensTests {
val nameLens = lens[Person] >> 0
val ageLens = lens[Person] >> 1
val addressLens = lens[Person] >> 2
val streetLens = lens[Person] >> 2 >> 0
val cityLens = lens[Person] >> 2 >> 1
val postcodeLens = lens[Person] >> 2 >> 2
}
class LensTestsKey extends LensTests {
val nameLens = lens[Person] >> 'name
val ageLens = lens[Person] >> 'age
val addressLens = lens[Person] >> 'address
val streetLens = lens[Person] >> 'address >> 'street
val cityLens = lens[Person] >> 'address >> 'city
val postcodeLens = lens[Person] >> 'address >> 'postcode
}
class OpticTestsDynamic extends LensTests {
val nameLens = lens[Person].name
val ageLens = lens[Person].age
val addressLens = lens[Person].address
val streetLens = lens[Person].address.street
val cityLens = lens[Person].address.city
val postcodeLens = lens[Person].address.postcode
}
class OpticTests {
@Test
def testBasics {
val s1: Sum1 = Prod1a(Prod2a(13), 23)
val s2: Sum1 = Prod1b(Prod2b("foo"), "bar")
val p1 = Prod1a(Prod2b("bar"), 11)
val l1 = optic[Sum1][Prod1a]
val l2 = optic[Sum1][Prod1a].i
val l3 = optic[Sum1][Prod1a].s2
val l4 = optic[Sum1][Prod1a].s2[Prod2a]
val l5 = optic[Sum1][Prod1a].s2[Prod2a].i
val g1 = l1.get(s1)
typed[Option[Prod1a]](g1)
assertEquals(Some(s1), g1)
val g1b = l1.get(s2)
typed[Option[Prod1a]](g1b)
assertEquals(None, g1b)
val g2 = l2.get(s1)
typed[Option[Int]](g2)
assertEquals(Some(23), g2)
val g2b = l2.get(s2)
typed[Option[Int]](g2b)
assertEquals(None, g2b)
val g3 = l3.get(s1)
typed[Option[Sum2]](g3)
assertEquals(Some(Prod2a(13)), g3)
val g3b = l3.get(s2)
typed[Option[Sum2]](g3b)
assertEquals(None, g3b)
val g4 = l4.get(s1)
typed[Option[Prod2a]](g4)
assertEquals(Some(Prod2a(13)), g4)
val g4b = l4.get(s2)
typed[Option[Prod2a]](g4b)
assertEquals(None, g4b)
val g5 = l5.get(s1)
typed[Option[Int]](g5)
assertEquals(Some(13), g5)
val g5b = l5.get(s2)
typed[Option[Int]](g5b)
assertEquals(None, g5b)
val t1 = l1.set(s1)(p1)
typed[Sum1](t1)
assertEquals(p1, t1)
val t1b = l1.set(s2)(p1)
typed[Sum1](t1b)
assertEquals(p1, t1b)
val t2 = l2.set(s1)(17)
typed[Sum1](t2)
assertEquals(Prod1a(Prod2a(13), 17), t2)
val t2b = l2.set(s2)(17)
typed[Sum1](t2b)
assertEquals(s2, t2b)
val t3 = l3.set(s1)(Prod2b("bar"))
typed[Sum1](t3)
assertEquals(Prod1a(Prod2b("bar"), 23), t3)
val t3b = l3.set(s2)(Prod2b("bar"))
typed[Sum1](t3b)
assertEquals(s2, t3b)
val t4 = l4.set(s1)(Prod2a(19))
typed[Sum1](t4)
assertEquals(Prod1a(Prod2a(19), 23), t4)
val t4b = l4.set(s2)(Prod2a(19))
typed[Sum1](t4b)
assertEquals(s2, t4b)
val t5 = l5.set(s1)(19)
typed[Sum1](t5)
assertEquals(Prod1a(Prod2a(19), 23), t5)
val t5b = l5.set(s2)(19)
typed[Sum1](t5b)
assertEquals(s2, t5b)
}
@Test
def testInferredProducts {
val s1: Sum1 = Prod1a(Prod2a(13), 23)
val s2: Sum1 = Prod1b(Prod2b("foo"), "bar")
val p1 = Prod1a(Prod2b("bar"), 11)
val li1 = optic[Sum1].i
val li2 = optic[Sum1].s2
val li3 = optic[Sum1].s2.i
val g1 = li1.get(s1)
typed[Option[Int]](g1)
assertEquals(Some(23), g1)
val g1b = li1.get(s2)
typed[Option[Int]](g1b)
assertEquals(None, g1b)
val g2 = li2.get(s1)
typed[Option[Sum2]](g2)
assertEquals(Some(Prod2a(13)), g2)
val g2b = li2.get(s2)
typed[Option[Sum2]](g2b)
assertEquals(None, g2b)
val g3 = li3.get(s1)
typed[Option[Int]](g3)
assertEquals(Some(13), g3)
val g3b = li3.get(s2)
typed[Option[Int]](g3b)
assertEquals(None, g3b)
val t1 = li1.set(s1)(17)
typed[Sum1](t1)
assertEquals(Prod1a(Prod2a(13), 17), t1)
val t1b = li1.set(s2)(17)
typed[Sum1](t1b)
assertEquals(s2, t1b)
val t2 = li2.set(s1)(Prod2b("bar"))
typed[Sum1](t2)
assertEquals(Prod1a(Prod2b("bar"), 23), t2)
val t2b = li2.set(s2)(Prod2b("bar"))
typed[Sum1](t2b)
assertEquals(s2, t2b)
val t3 = li3.set(s1)(19)
typed[Sum1](t3)
assertEquals(Prod1a(Prod2a(19), 23), t3)
val t3b = li3.set(s2)(19)
typed[Sum1](t3b)
assertEquals(s2, t3b)
}
@Test
def testRecursive {
val t1: Tree[Int] = Node(Node(Leaf(1), Leaf(2)), Leaf(3))
val t2: Tree[Int] = Node(Leaf(4), Node(Leaf(5), Leaf(6)))
val t3: Node[Int] = Node(Leaf(7), Leaf(8))
val l1 = optic[Tree[Int]]
val l2 = optic[Tree[Int]][Node[Int]]
val l3 = optic[Tree[Int]][Node[Int]].left
val l4 = optic[Tree[Int]][Node[Int]].left[Node[Int]].right
val l5 = optic[Tree[Int]][Node[Int]].left[Node[Int]].right[Leaf[Int]].value
val g1 = l1.get(t1)
typed[Tree[Int]](g1)
assertEquals(t1, g1)
val g1b = l1.get(t2)
typed[Tree[Int]](g1b)
assertEquals(t2, g1b)
val g2 = l2.get(t1)
typed[Option[Node[Int]]](g2)
assertEquals(Some(t1), g2)
val g2b = l2.get(t2)
typed[Option[Node[Int]]](g2b)
assertEquals(Some(t2), g2b)
val g3 = l3.get(t1)
typed[Option[Tree[Int]]](g3)
assertEquals(Some(Node(Leaf(1), Leaf(2))), g3)
val g4 = l4.get(t1)
typed[Option[Tree[Int]]](g4)
assertEquals(Some(Leaf(2)), g4)
val g5 = l5.get(t1)
typed[Option[Int]](g5)
assertEquals(Some(2), g5)
val s1 = l1.set(t1)(t3)
typed[Tree[Int]](s1)
assertEquals(t3, s1)
val s1b = l1.set(t2)(t3)
typed[Tree[Int]](s1b)
assertEquals(t3, s1b)
val s2 = l2.set(t1)(t3)
typed[Tree[Int]](s2)
assertEquals(t3, s2)
val s2b = l2.set(t2)(t3)
typed[Tree[Int]](s2b)
assertEquals(t3, s2b)
val s3 = l3.set(t1)(t3)
typed[Tree[Int]](s3)
assertEquals(Node(t3, Leaf(3)), s3)
val s3b = l3.set(t2)(t3)
typed[Tree[Int]](s3b)
assertEquals(Node(t3, Node(Leaf(5), Leaf(6))), s3b)
val s4 = l4.set(t1)(t3)
typed[Tree[Int]](s4)
assertEquals(Node(Node(Leaf(1), t3), Leaf(3)), s4)
val s4b = l4.set(t2)(t3)
typed[Tree[Int]](s4b)
assertEquals(t2, s4b)
val s5 = l5.set(t1)(23)
typed[Tree[Int]](s5)
assertEquals(Node(Node(Leaf(1), Leaf(23)), Leaf(3)), s5)
val s5b = l5.set(t2)(23)
typed[Tree[Int]](s5b)
assertEquals(t2, s5b)
}
@Test
def testRecursiveInferredProducts {
val t1: Tree[Int] = Node(Node(Leaf(1), Leaf(2)), Leaf(3))
val t2: Tree[Int] = Node(Leaf(4), Node(Leaf(5), Leaf(6)))
val t3: Node[Int] = Node(Leaf(7), Leaf(8))
val l1 = optic[Tree[Int]]
val l2 = optic[Tree[Int]].left
val l3 = optic[Tree[Int]].left.right
val l4 = optic[Tree[Int]].left.right.value
val g1 = l1.get(t1)
typed[Tree[Int]](g1)
assertEquals(t1, g1)
val g1b = l1.get(t2)
typed[Tree[Int]](g1b)
assertEquals(t2, g1b)
val g2 = l2.get(t1)
typed[Option[Tree[Int]]](g2)
assertEquals(Some(Node(Leaf(1), Leaf(2))), g2)
val g3 = l3.get(t1)
typed[Option[Tree[Int]]](g3)
assertEquals(Some(Leaf(2)), g3)
val g4 = l4.get(t1)
typed[Option[Int]](g4)
assertEquals(Some(2), g4)
val s1 = l1.set(t1)(t3)
typed[Tree[Int]](s1)
assertEquals(t3, s1)
val s1b = l1.set(t2)(t3)
typed[Tree[Int]](s1b)
assertEquals(t3, s1b)
val s2 = l2.set(t1)(t3)
typed[Tree[Int]](s2)
assertEquals(Node(t3, Leaf(3)), s2)
val s2b = l2.set(t2)(t3)
typed[Tree[Int]](s2b)
assertEquals(Node(t3, Node(Leaf(5), Leaf(6))), s2b)
val s3 = l3.set(t1)(t3)
typed[Tree[Int]](s3)
assertEquals(Node(Node(Leaf(1), t3), Leaf(3)), s3)
val s3b = l3.set(t2)(t3)
typed[Tree[Int]](s3b)
assertEquals(t2, s3b)
val s4 = l4.set(t1)(23)
typed[Tree[Int]](s4)
assertEquals(Node(Node(Leaf(1), Leaf(23)), Leaf(3)), s4)
val s4b = l4.set(t2)(23)
typed[Tree[Int]](s4b)
assertEquals(t2, s4b)
}
@Test
def testPaths {
val t1: Tree[Int] = Node(Node(Leaf(1), Leaf(2)), Leaf(3))
val t2: Tree[Int] = Node(Leaf(4), Node(Leaf(5), Leaf(6)))
val t3: Node[Int] = Node(Leaf(7), Leaf(8))
val pi1 = ^
val pi2 = ^.left
val pi3 = ^.left.right
val pi4 = ^.left.right.value
val l1 = optic[Tree[Int]](pi1)
val l2 = optic[Tree[Int]](pi2)
val l3 = optic[Tree[Int]](pi3)
val l4 = optic[Tree[Int]](pi4)
val g1 = l1.get(t1)
typed[Tree[Int]](g1)
assertEquals(t1, g1)
val g1b = l1.get(t2)
typed[Tree[Int]](g1b)
assertEquals(t2, g1b)
val g2 = l2.get(t1)
typed[Option[Tree[Int]]](g2)
assertEquals(Some(Node(Leaf(1), Leaf(2))), g2)
val g3 = l3.get(t1)
typed[Option[Tree[Int]]](g3)
assertEquals(Some(Leaf(2)), g3)
val g4 = l4.get(t1)
typed[Option[Int]](g4)
assertEquals(Some(2), g4)
val s1 = l1.set(t1)(t3)
typed[Tree[Int]](s1)
assertEquals(t3, s1)
val s1b = l1.set(t2)(t3)
typed[Tree[Int]](s1b)
assertEquals(t3, s1b)
val s2 = l2.set(t1)(t3)
typed[Tree[Int]](s2)
assertEquals(Node(t3, Leaf(3)), s2)
val s2b = l2.set(t2)(t3)
typed[Tree[Int]](s2b)
assertEquals(Node(t3, Node(Leaf(5), Leaf(6))), s2b)
val s3 = l3.set(t1)(t3)
typed[Tree[Int]](s3)
assertEquals(Node(Node(Leaf(1), t3), Leaf(3)), s3)
val s3b = l3.set(t2)(t3)
typed[Tree[Int]](s3b)
assertEquals(t2, s3b)
val s4 = l4.set(t1)(23)
typed[Tree[Int]](s4)
assertEquals(Node(Node(Leaf(1), Leaf(23)), Leaf(3)), s4)
val s4b = l4.set(t2)(23)
typed[Tree[Int]](s4b)
assertEquals(t2, s4b)
}
@Test
def testInferredLenses {
def update[T, E](t: T)(e: E)(implicit mkLens: p.Lens[T, E]): T = mkLens().set(t)(e)
val p = ^.i
val foo = Foo(23, "foo")
val bar = Bar(13, true)
val foo2 = update(foo)(11)
typed[Foo](foo2)
assertEquals(Foo(11, "foo"), foo2)
val bar2 = update(bar)(7)
typed[Bar](bar2)
assertEquals(Bar(7, true), bar2)
}
}
| mandubian/shapeless | core/src/test/scala/shapeless/lenses.scala | Scala | apache-2.0 | 17,462 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import java.util.Random
import scala.annotation.tailrec
import scala.collection.mutable
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.ml.util.Instrumentation
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.linalg.BLAS.axpy
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
/**
* A bisecting k-means algorithm based on the paper "A comparison of document clustering techniques"
* by Steinbach, Karypis, and Kumar, with modification to fit Spark.
* The algorithm starts from a single cluster that contains all points.
* Iteratively it finds divisible clusters on the bottom level and bisects each of them using
* k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
* The bisecting steps of clusters on the same level are grouped together to increase parallelism.
* If bisecting all divisible clusters on the bottom level would result more than `k` leaf clusters,
* larger clusters get higher priority.
*
* @param k the desired number of leaf clusters (default: 4). The actual number could be smaller if
* there are no divisible leaf clusters.
* @param maxIterations the max number of k-means iterations to split clusters (default: 20)
* @param minDivisibleClusterSize the minimum number of points (if greater than or equal 1.0) or
* the minimum proportion of points (if less than 1.0) of a divisible
* cluster (default: 1)
* @param seed a random seed (default: hash value of the class name)
*
* @see <a href="http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf">
* Steinbach, Karypis, and Kumar, A comparison of document clustering techniques,
* KDD Workshop on Text Mining, 2000.</a>
*/
@Since("1.6.0")
class BisectingKMeans private (
private var k: Int,
private var maxIterations: Int,
private var minDivisibleClusterSize: Double,
private var seed: Long,
private var distanceMeasure: String) extends Logging {
import BisectingKMeans._
/**
* Constructs with the default configuration
*/
@Since("1.6.0")
def this() = this(4, 20, 1.0, classOf[BisectingKMeans].getName.##, DistanceMeasure.EUCLIDEAN)
/**
* Sets the desired number of leaf clusters (default: 4).
* The actual number could be smaller if there are no divisible leaf clusters.
*/
@Since("1.6.0")
def setK(k: Int): this.type = {
require(k > 0, s"k must be positive but got $k.")
this.k = k
this
}
/**
* Gets the desired number of leaf clusters.
*/
@Since("1.6.0")
def getK: Int = this.k
/**
* Sets the max number of k-means iterations to split clusters (default: 20).
*/
@Since("1.6.0")
def setMaxIterations(maxIterations: Int): this.type = {
require(maxIterations > 0, s"maxIterations must be positive but got $maxIterations.")
this.maxIterations = maxIterations
this
}
/**
* Gets the max number of k-means iterations to split clusters.
*/
@Since("1.6.0")
def getMaxIterations: Int = this.maxIterations
/**
* Sets the minimum number of points (if greater than or equal to `1.0`) or the minimum proportion
* of points (if less than `1.0`) of a divisible cluster (default: 1).
*/
@Since("1.6.0")
def setMinDivisibleClusterSize(minDivisibleClusterSize: Double): this.type = {
require(minDivisibleClusterSize > 0.0,
s"minDivisibleClusterSize must be positive but got $minDivisibleClusterSize.")
this.minDivisibleClusterSize = minDivisibleClusterSize
this
}
/**
* Gets the minimum number of points (if greater than or equal to `1.0`) or the minimum proportion
* of points (if less than `1.0`) of a divisible cluster.
*/
@Since("1.6.0")
def getMinDivisibleClusterSize: Double = minDivisibleClusterSize
/**
* Sets the random seed (default: hash value of the class name).
*/
@Since("1.6.0")
def setSeed(seed: Long): this.type = {
this.seed = seed
this
}
/**
* Gets the random seed.
*/
@Since("1.6.0")
def getSeed: Long = this.seed
/**
* The distance suite used by the algorithm.
*/
@Since("2.4.0")
def getDistanceMeasure: String = distanceMeasure
/**
* Set the distance suite used by the algorithm.
*/
@Since("2.4.0")
def setDistanceMeasure(distanceMeasure: String): this.type = {
DistanceMeasure.validateDistanceMeasure(distanceMeasure)
this.distanceMeasure = distanceMeasure
this
}
private[spark] def runWithWeight(
instances: RDD[(Vector, Double)],
handlePersistence: Boolean,
instr: Option[Instrumentation]): BisectingKMeansModel = {
val d = instances.map(_._1.size).first
logInfo(s"Feature dimension: $d.")
val dMeasure = DistanceMeasure.decodeFromString(this.distanceMeasure)
val norms = instances.map(d => Vectors.norm(d._1, 2.0))
val vectors = instances.zip(norms)
.map { case ((x, weight), norm) => new VectorWithNorm(x, norm, weight) }
if (handlePersistence) {
vectors.persist(StorageLevel.MEMORY_AND_DISK)
} else {
// Compute and cache vector norms for fast distance computation.
norms.persist(StorageLevel.MEMORY_AND_DISK)
}
var assignments = vectors.map(v => (ROOT_INDEX, v))
var activeClusters = summarize(d, assignments, dMeasure)
instr.foreach(_.logNumExamples(activeClusters.values.map(_.size).sum))
instr.foreach(_.logSumOfWeights(activeClusters.values.map(_.weightSum).sum))
val rootSummary = activeClusters(ROOT_INDEX)
val n = rootSummary.size
logInfo(s"Number of points: $n.")
logInfo(s"Initial cost: ${rootSummary.cost}.")
val minSize = if (minDivisibleClusterSize >= 1.0) {
math.ceil(minDivisibleClusterSize).toLong
} else {
math.ceil(minDivisibleClusterSize * n).toLong
}
logInfo(s"The minimum number of points of a divisible cluster is $minSize.")
var inactiveClusters = mutable.Seq.empty[(Long, ClusterSummary)]
val random = new Random(seed)
var numLeafClustersNeeded = k - 1
var level = 1
var preIndices: RDD[Long] = null
var indices: RDD[Long] = null
while (activeClusters.nonEmpty && numLeafClustersNeeded > 0 && level < LEVEL_LIMIT) {
// Divisible clusters are sufficiently large and have non-trivial cost.
var divisibleClusters = activeClusters.filter { case (_, summary) =>
(summary.size >= minSize) && (summary.cost > MLUtils.EPSILON * summary.size)
}
// If we don't need all divisible clusters, take the larger ones.
if (divisibleClusters.size > numLeafClustersNeeded) {
divisibleClusters = divisibleClusters.toSeq.sortBy { case (_, summary) =>
-summary.size
}.take(numLeafClustersNeeded)
.toMap
}
if (divisibleClusters.nonEmpty) {
val divisibleIndices = divisibleClusters.keys.toSet
logInfo(s"Dividing ${divisibleIndices.size} clusters on level $level.")
var newClusterCenters = divisibleClusters.flatMap { case (index, summary) =>
val (left, right) = splitCenter(summary.center, random, dMeasure)
Iterator((leftChildIndex(index), left), (rightChildIndex(index), right))
}.map(identity) // workaround for a Scala bug (SI-7005) that produces a not serializable map
var newClusters: Map[Long, ClusterSummary] = null
var newAssignments: RDD[(Long, VectorWithNorm)] = null
for (iter <- 0 until maxIterations) {
newAssignments = updateAssignments(assignments, divisibleIndices, newClusterCenters,
dMeasure)
.filter { case (index, _) =>
divisibleIndices.contains(parentIndex(index))
}
newClusters = summarize(d, newAssignments, dMeasure)
newClusterCenters = newClusters.mapValues(_.center).map(identity).toMap
}
if (preIndices != null) {
preIndices.unpersist()
}
preIndices = indices
indices = updateAssignments(assignments, divisibleIndices, newClusterCenters, dMeasure).keys
.persist(StorageLevel.MEMORY_AND_DISK)
assignments = indices.zip(vectors)
inactiveClusters ++= activeClusters
activeClusters = newClusters
numLeafClustersNeeded -= divisibleClusters.size
} else {
logInfo(s"None active and divisible clusters left on level $level. Stop iterations.")
inactiveClusters ++= activeClusters
activeClusters = Map.empty
}
level += 1
}
if (preIndices != null) { preIndices.unpersist() }
if (indices != null) { indices.unpersist() }
if (handlePersistence) { vectors.unpersist() } else { norms.unpersist() }
val clusters = activeClusters ++ inactiveClusters
val root = buildTree(clusters, dMeasure)
val totalCost = root.leafNodes.map(_.cost).sum
new BisectingKMeansModel(root, this.distanceMeasure, totalCost)
}
/**
* Runs the bisecting k-means algorithm.
* @param input RDD of vectors
* @return model for the bisecting kmeans
*/
@Since("1.6.0")
def run(input: RDD[Vector]): BisectingKMeansModel = {
val instances = input.map(point => (point, 1.0))
val handlePersistence = input.getStorageLevel == StorageLevel.NONE
runWithWeight(instances, handlePersistence, None)
}
/**
* Java-friendly version of `run()`.
*/
def run(data: JavaRDD[Vector]): BisectingKMeansModel = run(data.rdd)
}
private object BisectingKMeans extends Serializable {
/** The index of the root node of a tree. */
private val ROOT_INDEX: Long = 1
private val MAX_DIVISIBLE_CLUSTER_INDEX: Long = Long.MaxValue / 2
private val LEVEL_LIMIT = math.log10(Long.MaxValue) / math.log10(2)
/** Returns the left child index of the given node index. */
private def leftChildIndex(index: Long): Long = {
require(index <= MAX_DIVISIBLE_CLUSTER_INDEX, s"Child index out of bound: 2 * $index.")
2 * index
}
/** Returns the right child index of the given node index. */
private def rightChildIndex(index: Long): Long = {
require(index <= MAX_DIVISIBLE_CLUSTER_INDEX, s"Child index out of bound: 2 * $index + 1.")
2 * index + 1
}
/** Returns the parent index of the given node index, or 0 if the input is 1 (root). */
private def parentIndex(index: Long): Long = {
index / 2
}
/**
* Summarizes data by each cluster as Map.
* @param d feature dimension
* @param assignments pairs of point and its cluster index
* @return a map from cluster indices to corresponding cluster summaries
*/
private def summarize(
d: Int,
assignments: RDD[(Long, VectorWithNorm)],
distanceMeasure: DistanceMeasure): Map[Long, ClusterSummary] = {
assignments.aggregateByKey(new ClusterSummaryAggregator(d, distanceMeasure))(
seqOp = (agg, v) => agg.add(v),
combOp = (agg1, agg2) => agg1.merge(agg2)
).mapValues(_.summary)
.collect().toMap
}
/**
* Cluster summary aggregator.
* @param d feature dimension
*/
private class ClusterSummaryAggregator(val d: Int, val distanceMeasure: DistanceMeasure)
extends Serializable {
private var n: Long = 0L
private var weightSum: Double = 0.0
private val sum: Vector = Vectors.zeros(d)
private var sumSq: Double = 0.0
/** Adds a point. */
def add(v: VectorWithNorm): this.type = {
n += 1L
weightSum += v.weight
// TODO: use a numerically stable approach to estimate cost
sumSq += v.norm * v.norm * v.weight
distanceMeasure.updateClusterSum(v, sum)
this
}
/** Merges another aggregator. */
def merge(other: ClusterSummaryAggregator): this.type = {
n += other.n
weightSum += other.weightSum
sumSq += other.sumSq
axpy(1.0, other.sum, sum)
this
}
/** Returns the summary. */
def summary: ClusterSummary = {
val center = distanceMeasure.centroid(sum.copy, weightSum)
val cost = distanceMeasure.clusterCost(center, new VectorWithNorm(sum), weightSum,
sumSq)
ClusterSummary(n, weightSum, center, cost)
}
}
/**
* Bisects a cluster center.
*
* @param center current cluster center
* @param random a random number generator
* @return initial centers
*/
private def splitCenter(
center: VectorWithNorm,
random: Random,
distanceMeasure: DistanceMeasure): (VectorWithNorm, VectorWithNorm) = {
val d = center.vector.size
val norm = center.norm
val level = 1e-4 * norm
val noise = Vectors.dense(Array.fill(d)(random.nextDouble()))
distanceMeasure.symmetricCentroids(level, noise, center.vector)
}
/**
* Updates assignments.
* @param assignments current assignments
* @param divisibleIndices divisible cluster indices
* @param newClusterCenters new cluster centers
* @return new assignments
*/
private def updateAssignments(
assignments: RDD[(Long, VectorWithNorm)],
divisibleIndices: Set[Long],
newClusterCenters: Map[Long, VectorWithNorm],
distanceMeasure: DistanceMeasure): RDD[(Long, VectorWithNorm)] = {
assignments.map { case (index, v) =>
if (divisibleIndices.contains(index)) {
val children = Seq(leftChildIndex(index), rightChildIndex(index))
val newClusterChildren = children.filter(newClusterCenters.contains)
val newClusterChildrenCenterToId =
newClusterChildren.map(id => newClusterCenters(id) -> id).toMap
val newClusterChildrenCenters = newClusterChildrenCenterToId.keys.toArray
if (newClusterChildren.nonEmpty) {
val selected = distanceMeasure.findClosest(newClusterChildrenCenters, v)._1
val center = newClusterChildrenCenters(selected)
val id = newClusterChildrenCenterToId(center)
(id, v)
} else {
(index, v)
}
} else {
(index, v)
}
}
}
/**
* Builds a clustering tree by re-indexing internal and leaf clusters.
* @param clusters a map from cluster indices to corresponding cluster summaries
* @return the root node of the clustering tree
*/
private def buildTree(
clusters: Map[Long, ClusterSummary],
distanceMeasure: DistanceMeasure): ClusteringTreeNode = {
var leafIndex = 0
var internalIndex = -1
/**
* Builds a subtree from this given node index.
*/
def buildSubTree(rawIndex: Long): ClusteringTreeNode = {
val cluster = clusters(rawIndex)
val size = cluster.size
val center = cluster.center
val cost = cluster.cost
val isInternal = clusters.contains(leftChildIndex(rawIndex))
if (isInternal) {
val index = internalIndex
internalIndex -= 1
val leftIndex = leftChildIndex(rawIndex)
val rightIndex = rightChildIndex(rawIndex)
val indexes = Seq(leftIndex, rightIndex).filter(clusters.contains)
val height = indexes.map { childIndex =>
distanceMeasure.distance(center, clusters(childIndex).center)
}.max
val children = indexes.map(buildSubTree).toArray
new ClusteringTreeNode(index, size, center, cost, height, children)
} else {
val index = leafIndex
leafIndex += 1
val height = 0.0
new ClusteringTreeNode(index, size, center, cost, height, Array.empty)
}
}
buildSubTree(ROOT_INDEX)
}
/**
* Summary of a cluster.
*
* @param size the number of points within this cluster
* @param weightSum the weightSum within this cluster
* @param center the center of the points within this cluster
* @param cost the sum of squared distances to the center
*/
private case class ClusterSummary(
size: Long,
weightSum: Double,
center: VectorWithNorm,
cost: Double)
}
/**
* Represents a node in a clustering tree.
*
* @param index node index, negative for internal nodes and non-negative for leaf nodes
* @param size size of the cluster
* @param centerWithNorm cluster center with norm
* @param cost cost of the cluster, i.e., the sum of squared distances to the center
* @param height height of the node in the dendrogram. Currently this is defined as the max distance
* from the center to the centers of the children's, but subject to change.
* @param children children nodes
*/
@Since("1.6.0")
private[clustering] class ClusteringTreeNode private[clustering] (
val index: Int,
val size: Long,
private[clustering] val centerWithNorm: VectorWithNorm,
val cost: Double,
val height: Double,
val children: Array[ClusteringTreeNode]) extends Serializable {
/** Whether this is a leaf node. */
val isLeaf: Boolean = children.isEmpty
require((isLeaf && index >= 0) || (!isLeaf && index < 0))
/** Cluster center. */
def center: Vector = centerWithNorm.vector
/** Predicts the leaf cluster node index that the input point belongs to. */
def predict(point: Vector, distanceMeasure: DistanceMeasure): Int = {
val (index, _) = predict(new VectorWithNorm(point), distanceMeasure)
index
}
/** Returns the full prediction path from root to leaf. */
def predictPath(point: Vector, distanceMeasure: DistanceMeasure): Array[ClusteringTreeNode] = {
predictPath(new VectorWithNorm(point), distanceMeasure).toArray
}
/** Returns the full prediction path from root to leaf. */
private def predictPath(
pointWithNorm: VectorWithNorm,
distanceMeasure: DistanceMeasure): List[ClusteringTreeNode] = {
if (isLeaf) {
this :: Nil
} else {
val selected = children.minBy { child =>
distanceMeasure.distance(child.centerWithNorm, pointWithNorm)
}
selected :: selected.predictPath(pointWithNorm, distanceMeasure)
}
}
/**
* Computes the cost of the input point.
*/
def computeCost(point: Vector, distanceMeasure: DistanceMeasure): Double = {
val (_, cost) = predict(new VectorWithNorm(point), distanceMeasure)
cost
}
/**
* Predicts the cluster index and the cost of the input point.
*/
private def predict(
pointWithNorm: VectorWithNorm,
distanceMeasure: DistanceMeasure): (Int, Double) = {
predict(pointWithNorm, distanceMeasure.cost(centerWithNorm, pointWithNorm), distanceMeasure)
}
/**
* Predicts the cluster index and the cost of the input point.
* @param pointWithNorm input point
* @param cost the cost to the current center
* @return (predicted leaf cluster index, cost)
*/
@tailrec
private def predict(
pointWithNorm: VectorWithNorm,
cost: Double,
distanceMeasure: DistanceMeasure): (Int, Double) = {
if (isLeaf) {
(index, cost)
} else {
val (selectedChild, minCost) = children.map { child =>
(child, distanceMeasure.cost(child.centerWithNorm, pointWithNorm))
}.minBy(_._2)
selectedChild.predict(pointWithNorm, minCost, distanceMeasure)
}
}
/**
* Returns all leaf nodes from this node.
*/
def leafNodes: Array[ClusteringTreeNode] = {
if (isLeaf) {
Array(this)
} else {
children.flatMap(_.leafNodes)
}
}
}
| maropu/spark | mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala | Scala | apache-2.0 | 20,142 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.read
import com.esotericsoftware.kryo.{ Kryo, Serializer }
import com.esotericsoftware.kryo.io.{ Output, Input }
import org.bdgenomics.utils.misc.Logging
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.serialization.AvroSerializer
import org.bdgenomics.formats.avro.{
AlignmentRecord,
Fragment
}
import scala.collection.JavaConversions._
private class FragmentIterator(
reads: Iterator[AlignmentRecord]) extends Iterator[Iterable[AlignmentRecord]] with Serializable {
private var readIter: BufferedIterator[AlignmentRecord] = reads.buffered
def hasNext: Boolean = {
readIter.hasNext
}
def next: Iterable[AlignmentRecord] = {
// get the read name
val readName = readIter.head.getReadName
// take the reads that have this read name
readIter.takeWhile(_.getReadName == readName).toIterable
}
}
/**
* Companion object for building SingleReadBuckets.
*/
private[read] object SingleReadBucket extends Logging {
private def fromGroupedReads(reads: Iterable[AlignmentRecord]): SingleReadBucket = {
// split by mapping
val (mapped, unmapped) = reads.partition(_.getReadMapped)
val (primaryMapped, secondaryMapped) = mapped.partition(_.getPrimaryAlignment)
// TODO: consider doing validation here
// (e.g. read says mate mapped but it doesn't exist)
new SingleReadBucket(primaryMapped, secondaryMapped, unmapped)
}
/**
* Builds an RDD of SingleReadBuckets from a queryname sorted RDD of AlignmentRecords.
*
* @param rdd The RDD of AlignmentRecords to build the RDD of single read
* buckets from.
* @return Returns an RDD of SingleReadBuckets.
*
* @note We do not validate that the input RDD is sorted by read name.
*/
def fromQuerynameSorted(rdd: RDD[AlignmentRecord]): RDD[SingleReadBucket] = {
rdd.mapPartitions(iter => new FragmentIterator(iter).map(fromGroupedReads))
}
/**
* Builds an RDD of SingleReadBuckets from an RDD of AlignmentRecords.
*
* @param rdd The RDD of AlignmentRecords to build the RDD of single read
* buckets from.
* @return Returns an RDD of SingleReadBuckets.
*/
def apply(rdd: RDD[AlignmentRecord]): RDD[SingleReadBucket] = {
rdd.groupBy(p => (p.getRecordGroupName, p.getReadName))
.map(kv => {
val (_, reads) = kv
fromGroupedReads(reads)
})
}
def apply(fragment: Fragment): SingleReadBucket = {
fromGroupedReads(fragment.getAlignments.toIterable)
}
}
/**
* A representation of all of the read alignments that came from a single sequenced
* fragment.
*
* @param primaryMapped All read alignments that are primary alignments.
* @param secondaryMapped All read alignments that are non-primary (i.e.,
* secondary or supplementary alignments).
* @param unmapped All reads from the fragment that are unmapped.
*/
private[adam] case class SingleReadBucket(
primaryMapped: Iterable[AlignmentRecord] = Iterable.empty,
secondaryMapped: Iterable[AlignmentRecord] = Iterable.empty,
unmapped: Iterable[AlignmentRecord] = Iterable.empty) {
/**
* @return The union of the primary, secondary, and unmapped buckets.
*/
def allReads = {
primaryMapped ++ secondaryMapped ++ unmapped
}
/**
* Converts to an Avro Fragment record.
*
* @return Converts this bucket to a Fragment type, which does not have the
* various alignment buckets, but is otherwise equivalent.
*/
def toFragment: Fragment = {
// take union of all reads, as we will need this for building and
// want to pay the cost exactly once
val unionReads = allReads
// start building fragment
val builder = Fragment.newBuilder()
.setReadName(unionReads.head.getReadName)
.setAlignments(seqAsJavaList(allReads.toSeq))
// is an insert size defined for this fragment?
primaryMapped.headOption
.foreach(r => {
Option(r.getInferredInsertSize).foreach(is => {
builder.setFragmentSize(is.toInt)
})
})
// set record group name, if known
Option(unionReads.head.getRecordGroupName)
.foreach(n => builder.setRunId(n))
builder.build()
}
}
class SingleReadBucketSerializer extends Serializer[SingleReadBucket] {
val recordSerializer = new AvroSerializer[AlignmentRecord]()
def writeArray(kryo: Kryo, output: Output, reads: Seq[AlignmentRecord]): Unit = {
output.writeInt(reads.size, true)
for (read <- reads) {
recordSerializer.write(kryo, output, read)
}
}
def readArray(kryo: Kryo, input: Input): Seq[AlignmentRecord] = {
val numReads = input.readInt(true)
(0 until numReads).foldLeft(List[AlignmentRecord]()) {
(a, b) => recordSerializer.read(kryo, input, classOf[AlignmentRecord]) :: a
}
}
def write(kryo: Kryo, output: Output, groupedReads: SingleReadBucket) = {
writeArray(kryo, output, groupedReads.primaryMapped.toSeq)
writeArray(kryo, output, groupedReads.secondaryMapped.toSeq)
writeArray(kryo, output, groupedReads.unmapped.toSeq)
}
def read(kryo: Kryo, input: Input, klazz: Class[SingleReadBucket]): SingleReadBucket = {
val primaryReads = readArray(kryo, input)
val secondaryReads = readArray(kryo, input)
val unmappedReads = readArray(kryo, input)
new SingleReadBucket(primaryReads, secondaryReads, unmappedReads)
}
}
| massie/adam | adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/SingleReadBucket.scala | Scala | apache-2.0 | 6,149 |
/**
* Copyright (c) 2012 Alexey Aksenov [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.digimead.digi.ctrl.lib.message
import scala.annotation.implicitNotFound
@implicitNotFound(msg = "don't know what to do with message, please define implicit Dispatcher")
trait Dispatcher {
def process(message: DMessage)
} | ezh/android-DigiLib | src/main/scala/org/digimead/digi/ctrl/lib/message/Dispatcher.scala | Scala | apache-2.0 | 855 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt
import Path._
import IO.{pathSplit, wrapNull}
import java.io.File
import java.net.URL
import scala.collection.{generic, immutable, mutable}
final class RichFile(val asFile: File)
{
def / (component: String): File = if(component == ".") asFile else new File(asFile, component)
/** True if and only if the wrapped file exists.*/
def exists = asFile.exists
/** True if and only if the wrapped file is a directory.*/
def isDirectory = asFile.isDirectory
/** The last modified time of the wrapped file.*/
def lastModified = asFile.lastModified
/* True if and only if the wrapped file `asFile` exists and the file 'other'
* does not exist or was modified before the `asFile`.*/
def newerThan(other: File): Boolean = Path.newerThan(asFile, other)
/* True if and only if the wrapped file `asFile` does not exist or the file `other`
* exists and was modified after `asFile`.*/
def olderThan(other: File): Boolean = Path.newerThan(other, asFile)
/** The wrapped file converted to a <code>URL</code>.*/
def asURL = asFile.toURI.toURL
def absolutePath: String = asFile.getAbsolutePath
/** The last component of this path.*/
def name = asFile.getName
/** The extension part of the name of this path. This is the part of the name after the last period, or the empty string if there is no period.*/
def ext = baseAndExt._2
/** The base of the name of this path. This is the part of the name before the last period, or the full name if there is no period.*/
def base = baseAndExt._1
def baseAndExt: (String, String) =
{
val nme = name
val dot = nme.lastIndexOf('.')
if(dot < 0) (nme, "") else (nme.substring(0, dot), nme.substring(dot+1))
}
def relativize(sub: File): Option[File] = Path.relativizeFile(asFile, sub)
def relativeTo(base: File): Option[File] = Path.relativizeFile(base, asFile)
def hash: Array[Byte] = Hash(asFile)
def hashString: String = Hash.toHex(hash)
def hashStringHalf: String = Hash.halve(hashString)
}
import java.io.File
import File.pathSeparator
trait PathLow
{
implicit def singleFileFinder(file: File): PathFinder = PathFinder(file)
}
trait PathExtra extends Alternatives with Mapper with PathLow
{
implicit def richFile(file: File): RichFile = new RichFile(file)
implicit def filesToFinder(cc: Traversable[File]): PathFinder = PathFinder.strict(cc)
}
object Path extends PathExtra
{
def apply(f: File): RichFile = new RichFile(f)
def apply(f: String): RichFile = new RichFile(new File(f))
def fileProperty(name: String): File = new File(System.getProperty(name))
def userHome: File = fileProperty("user.home")
def absolute(file: File): File = new File(file.toURI.normalize).getAbsoluteFile
def makeString(paths: Seq[File]): String = makeString(paths, pathSeparator)
def makeString(paths: Seq[File], sep: String): String = paths.map(_.getAbsolutePath).mkString(sep)
def newerThan(a: File, b: File): Boolean = a.exists && (!b.exists || a.lastModified > b.lastModified)
/** The separator character of the platform.*/
val sep = java.io.File.separatorChar
def relativizeFile(baseFile: File, file: File): Option[File] = relativize(baseFile, file).map { path => new File(path) }
private[sbt] def relativize(baseFile: File, file: File): Option[String] =
{
val pathString = file.getAbsolutePath
baseFileString(baseFile) flatMap
{
baseString =>
{
if(pathString.startsWith(baseString))
Some(pathString.substring(baseString.length))
else
None
}
}
}
private def baseFileString(baseFile: File): Option[String] =
if(baseFile.isDirectory)
{
val cp = baseFile.getAbsolutePath
assert(cp.length > 0)
if(cp.charAt(cp.length - 1) == File.separatorChar)
Some(cp)
else
Some(cp + File.separatorChar)
}
else
None
def toURLs(files: Seq[File]): Array[URL] = files.map(_.toURI.toURL).toArray
}
object PathFinder
{
/** A <code>PathFinder</code> that always produces the empty set of <code>Path</code>s.*/
val empty = new PathFinder { private[sbt] def addTo(fileSet: mutable.Set[File]) {} }
def strict(files: Traversable[File]): PathFinder = apply(files)
def apply(files: => Traversable[File]): PathFinder = new PathFinder {
private[sbt] def addTo(fileSet: mutable.Set[File]) = fileSet ++= files
}
def apply(file: File): PathFinder = new SingleFile(file)
}
/** A path finder constructs a set of paths. The set is evaluated by a call to the <code>get</code>
* method. The set will be different for different calls to <code>get</code> if the underlying filesystem
* has changed.*/
sealed abstract class PathFinder
{
/** The union of the paths found by this <code>PathFinder</code> with the paths found by 'paths'.*/
def +++(paths: PathFinder): PathFinder = new Paths(this, paths)
/** Excludes all paths from <code>excludePaths</code> from the paths selected by this <code>PathFinder</code>.*/
def ---(excludePaths: PathFinder): PathFinder = new ExcludeFiles(this, excludePaths)
/** Constructs a new finder that selects all paths with a name that matches <code>filter</code> and are
* descendants of paths selected by this finder.*/
def **(filter: FileFilter): PathFinder = new DescendantOrSelfPathFinder(this, filter)
def *** : PathFinder = **(AllPassFilter)
/** Constructs a new finder that selects all paths with a name that matches <code>filter</code> and are
* immediate children of paths selected by this finder.*/
def *(filter: FileFilter): PathFinder = new ChildPathFinder(this, filter)
/** Constructs a new finder that selects all paths with name <code>literal</code> that are immediate children
* of paths selected by this finder.*/
def / (literal: String): PathFinder = new ChildPathFinder(this, new ExactFilter(literal))
/** Constructs a new finder that selects all paths with name <code>literal</code> that are immediate children
* of paths selected by this finder.*/
final def \\ (literal: String): PathFinder = this / literal
def x_: Traversable[(File,T)] = x(mapper, false)
/** Applies `mapper` to each path selected by this PathFinder and returns the path paired with the non-empty result.
* If the result is empty (None) and `errorIfNone` is true, an exception is thrown.
* If `errorIfNone` is false, the path is dropped from the returned Traversable.*/
def pair[T](mapper: File => Option[T], errorIfNone: Boolean = true): Seq[(File,T)] =
x(mapper, errorIfNone)
/** Applies `mapper` to each path selected by this PathFinder and returns the path paired with the non-empty result.
* If the result is empty (None) and `errorIfNone` is true, an exception is thrown.
* If `errorIfNone` is false, the path is dropped from the returned Traversable.*/
def x[T](mapper: File => Option[T], errorIfNone: Boolean = true): Seq[(File,T)] =
{
val apply = if(errorIfNone) mapper | fail else mapper
for(file <- get; mapped <- apply(file)) yield (file, mapped)
}
/** Selects all descendant paths with a name that matches <code>include</code> and do not have an intermediate
* path with a name that matches <code>intermediateExclude</code>. Typical usage is:
*
* <code>descendantsExcept("*.jar", ".svn")</code>*/
def descendantsExcept(include: FileFilter, intermediateExclude: FileFilter): PathFinder =
(this ** include) --- (this ** intermediateExclude ** include)
@deprecated("Use `descendantsExcept` instead.", "0.12.0")
def descendentsExcept(include: FileFilter, intermediateExclude: FileFilter): PathFinder =
descendantsExcept(include, intermediateExclude)
/** Evaluates this finder and converts the results to a `Seq` of distinct `File`s. The files returned by this method will reflect the underlying filesystem at the
* time of calling. If the filesystem changes, two calls to this method might be different.*/
final def get: Seq[File] =
{
import collection.JavaConversions._
val pathSet: mutable.Set[File] = new java.util.LinkedHashSet[File]
addTo(pathSet)
pathSet.toSeq
}
@deprecated("Use `get`"/*, "0.9.7"*/) def getFiles: Seq[File] = get
/** Only keeps paths for which `f` returns true. It is non-strict, so it is not evaluated until the returned finder is evaluated.*/
final def filter(f: File => Boolean): PathFinder = PathFinder(get filter f)
/* Non-strict flatMap: no evaluation occurs until the returned finder is evaluated.*/
final def flatMap(f: File => PathFinder): PathFinder = PathFinder(get.flatMap(p => f(p).get))
/** Evaluates this finder and converts the results to an `Array` of `URL`s..*/
final def getURLs: Array[URL] = get.toArray.map(_.toURI.toURL)
/** Evaluates this finder and converts the results to a distinct sequence of absolute path strings.*/
final def getPaths: Seq[String] = get.map(_.absolutePath)
private[sbt] def addTo(fileSet: mutable.Set[File])
/** Create a PathFinder from this one where each path has a unique name.
* A single path is arbitrarily selected from the set of paths with the same name.*/
def distinct: PathFinder = PathFinder { get.map(p => (p.asFile.getName, p)).toMap.values }
/** Constructs a string by evaluating this finder, converting the resulting Paths to absolute path strings, and joining them with the platform path separator.*/
final def absString = Path.makeString(get)
/** Constructs a debugging string for this finder by evaluating it and separating paths by newlines.*/
override def toString = get.mkString("\\n ", "\\n ","")
}
private class SingleFile(asFile: File) extends PathFinder
{
private[sbt] def addTo(fileSet: mutable.Set[File]): Unit = if(asFile.exists) fileSet += asFile
}
private abstract class FilterFiles extends PathFinder with FileFilter
{
def parent: PathFinder
def filter: FileFilter
final def accept(file: File) = filter.accept(file)
protected def handleFile(file: File, fileSet: mutable.Set[File]): Unit =
for(matchedFile <- wrapNull(file.listFiles(this)))
fileSet += new File(file, matchedFile.getName)
}
private class DescendantOrSelfPathFinder(val parent: PathFinder, val filter: FileFilter) extends FilterFiles
{
private[sbt] def addTo(fileSet: mutable.Set[File])
{
for(file <- parent.get)
{
if(accept(file))
fileSet += file
handleFileDescendant(file, fileSet)
}
}
private def handleFileDescendant(file: File, fileSet: mutable.Set[File])
{
handleFile(file, fileSet)
for(childDirectory <- wrapNull(file listFiles DirectoryFilter))
handleFileDescendant(new File(file, childDirectory.getName), fileSet)
}
}
private class ChildPathFinder(val parent: PathFinder, val filter: FileFilter) extends FilterFiles
{
private[sbt] def addTo(fileSet: mutable.Set[File]): Unit =
for(file <- parent.get)
handleFile(file, fileSet)
}
private class Paths(a: PathFinder, b: PathFinder) extends PathFinder
{
private[sbt] def addTo(fileSet: mutable.Set[File])
{
a.addTo(fileSet)
b.addTo(fileSet)
}
}
private class ExcludeFiles(include: PathFinder, exclude: PathFinder) extends PathFinder
{
private[sbt] def addTo(pathSet: mutable.Set[File])
{
val includeSet = new mutable.LinkedHashSet[File]
include.addTo(includeSet)
val excludeSet = new mutable.HashSet[File]
exclude.addTo(excludeSet)
includeSet --= excludeSet
pathSet ++= includeSet
}
}
| harrah/xsbt | util/io/src/main/scala/sbt/Path.scala | Scala | bsd-3-clause | 11,217 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.batches
/** [[Batch]] implementation specialized for `Int`.
*
* Under the hood it uses an [[monix.tail.batches.ArrayBatch ArrayBatch]]
* implementation, which is `@specialized`. Using `IntegersBatch`
* might be desirable instead for `isInstanceOf` checks.
*/
final class IntegersBatch(underlying: ArrayBatch[Int])
extends Batch[Int] {
override def cursor(): IntegersCursor =
new IntegersCursor(underlying.cursor())
override def take(n: Int): IntegersBatch =
new IntegersBatch(underlying.take(n))
override def drop(n: Int): IntegersBatch =
new IntegersBatch(underlying.drop(n))
override def slice(from: Int, until: Int): IntegersBatch =
new IntegersBatch(underlying.slice(from, until))
override def filter(p: (Int) => Boolean): IntegersBatch =
new IntegersBatch(underlying.filter(p))
override def map[B](f: (Int) => B): ArrayBatch[B] =
underlying.map(f)
override def collect[B](pf: PartialFunction[Int, B]): ArrayBatch[B] =
underlying.collect(pf)
override def foldLeft[R](initial: R)(op: (R, Int) => R): R =
underlying.foldLeft(initial)(op)
}
| Wogan/monix | monix-tail/shared/src/main/scala/monix/tail/batches/IntegersBatch.scala | Scala | apache-2.0 | 1,801 |
package com.sksamuel.elastic4s.http.search.aggs
import com.sksamuel.elastic4s.searches.aggs.TopHitsAggregationDefinition
import com.sksamuel.elastic4s.searches.sort.FieldSortDefinition
import org.scalatest.{FunSuite, Matchers}
class TopHitsAggregationBuilderTest extends FunSuite with Matchers {
test("top hits aggregation should generate expected json") {
val q = TopHitsAggregationDefinition("top_items")
.size(5)
.version(true)
.explain(false)
.sortBy(List(FieldSortDefinition("price")))
TopHitsAggregationBuilder(q).string() shouldBe
"""{"top_hits":{"size":5,"sort":[{"price":{"order":"asc"}}],"explain":false,"version":true}}"""
}
}
| FabienPennequin/elastic4s | elastic4s-http/src/test/scala/com/sksamuel/elastic4s/http/search/aggs/TopHitsAggregationBuilderTest.scala | Scala | apache-2.0 | 682 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.rst
import cats.data.NonEmptyChain
import laika.ast._
import laika.bundle.{BlockParser, BlockParserBuilder}
import laika.collection.Stack
import laika.collection.TransitionalCollectionOps.Zip3Iterator
import laika.parse.builders._
import laika.parse.implicits._
import laika.parse._
import laika.parse.markup.RecursiveParsers
import scala.annotation.nowarn
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
/** Provides parsers for the two table types supported by reStructuredText.
*
* @author Jens Halm
*/
object TableParsers {
private abstract class TableElement
private abstract class TableDecoration extends TableElement
private case object Intersection extends TableDecoration {
override def toString = "+"
}
private case object RowSeparator extends TableDecoration
private case object TableBoundary extends TableDecoration
private case class CellSeparator (decoration: String) extends TableDecoration {
override def toString = decoration
}
private case class CellElement (text: LineSource) extends TableElement {
override def toString = text.input
}
private class CellBuilder (recParser: RecursiveParsers) {
private val seps = new ListBuffer[TableElement]
private val previousLines = new ListBuffer[LineSource]
private var currentLine: Option[LineSource] = None
private def allLines: mutable.Buffer[LineSource] = previousLines ++ currentLine.toBuffer
var rowSpan = 1
var colSpan = 1
var removed: Boolean = false
def nextLine (sep: TableElement, line: LineSource, nextRow: Boolean): Unit = {
seps += sep
currentLine.foreach(previousLines += _)
currentLine = Some(line)
if (nextRow) rowSpan += 1
}
def currentLine (sep: TableElement, line: LineSource): Unit = {
currentLine.foreach { current =>
currentLine = Some(LineSource(current.input + sep.toString + line.input, current.parent))
}
}
def merge (rightBuilder: CellBuilder): Unit = if (currentLine.isDefined) {
val newLines = Zip3Iterator(allLines, rightBuilder.seps, rightBuilder.allLines).map {
case (left, sep, right) =>
LineSource(left.input + sep.toString + right.input, left.parent)
}.toSeq
previousLines.clear()
previousLines ++= newLines.tail
currentLine = newLines.headOption
colSpan += 1
}
@nowarn("cat=deprecation")
def trimmedCellContent: Option[BlockSource] = {
NonEmptyChain.fromSeq(allLines.toSeq).map { nonEmptyLines =>
val minIndent = nonEmptyLines.map { line =>
if (line.input.trim.isEmpty) Int.MaxValue
else line.input.prefixLength(_ == ' ')
}.iterator.min
val trimmedLines = nonEmptyLines.map { line =>
if (line.input.trim.isEmpty) LineSource("", line.parent) else {
val padding = " " * (line.input.prefixLength(_ == ' ') - minIndent)
LineSource(padding + line.input.trim, line.parent.consume(minIndent))
}
}
BlockSource(trimmedLines)
}
}
def parsedCellContent: Seq[Block] = trimmedCellContent.fold[Seq[Block]](Nil)(src => recParser.recursiveBlocks.parse(src).getOrElse(Nil))
def toCell (ct: CellType): Cell = Cell(ct, parsedCellContent, colSpan, rowSpan)
}
private class CellBuilderRef (val cell: CellBuilder, val mergedLeft: Boolean = false)
private class RowBuilder {
private val cells = new ListBuffer[CellBuilder]
def addCell (cell: CellBuilder): Unit = cells += cell
def toRow (ct: CellType): Row = Row(cells.filterNot(_.removed).map(_.toCell(ct)).toList)
}
private class ColumnBuilder (left: Option[ColumnBuilder], recParser: RecursiveParsers) {
private var rowSpan = 1 // only used for sanity checks
private val cells = new Stack[CellBuilderRef]
def currentCell: CellBuilder = cells.top.cell
def previousCell: CellBuilder = cells.elements(1).cell
def nextCell: CellBuilder = {
if (cells.nonEmpty && cells.top.mergedLeft && rowspanDif != 0)
throw new MalformedTableException("Illegal merging of rows with different cellspans")
val cell = new CellBuilder(recParser)
cells push new CellBuilderRef(cell)
cell
}
private def removeCell: CellBuilder = {
val cell = cells.pop.cell
cell.removed = true
cell
}
def mergeLeft (previous: Boolean = false): Unit = {
if (rowspanDif != 0)
throw new MalformedTableException("Illegal merging of cells with different rowspans")
val leftCell = if (previous) left.get.previousCell else left.get.currentCell
leftCell.merge(removeCell)
cells push new CellBuilderRef(leftCell, true)
}
def rowspanDif: Int = left.get.rowSpan - rowSpan
def addLine (sep: TableElement, line: LineSource, nextRow: Boolean): Unit = {
val ref = cells.top
if (ref.mergedLeft) {
if (nextRow && rowspanDif != 1)
throw new MalformedTableException("Illegal merging of rows with different cellspans")
ref.cell.currentLine(sep, line)
}
else {
ref.cell.nextLine(sep, line, nextRow)
sep match {
case CellElement(_) => mergeLeft()
case _ => ()
}
}
if (nextRow) rowSpan += 1
}
}
private class TableBuilder (columnWidths: List[Int], recParser: RecursiveParsers) {
private object ColumnFactory {
var lastColumn: Option[ColumnBuilder] = None
val columnWidthIt = columnWidths.iterator
def next = { lastColumn = Some(new ColumnBuilder(lastColumn, recParser)); lastColumn.get }
}
val columns: List[ColumnBuilder] = List.fill(columnWidths.length)(ColumnFactory.next)
private val rows = new ListBuffer[RowBuilder]
private def init (): Unit = {
val row = nextRow
columns.foreach(col => row.addCell(col.nextCell))
}
init()
def nextRow: RowBuilder = {
val row = new RowBuilder
rows += row
row
}
def toRowList (ct: CellType): List[Row] = rows.map(_.toRow(ct)).toList
}
private def flattenElements (result: Any): List[TableElement] = result match {
case x:TableElement => List(x)
case x ~ y => flattenElements(x) ::: flattenElements(y)
}
/** Parses a grid table.
*
* See [[http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#grid-tables]].
*/
lazy val gridTable: BlockParserBuilder = BlockParser.recursive { recParsers =>
val intersectChar = '+'
val intersect = oneOf(intersectChar).as(Intersection)
val rowSep = someOf('-').count
val topBorder = intersect ~> (rowSep <~ intersect).rep.min(1) <~ wsEol
val colSep = oneOf('|').as(CellSeparator("|")) | intersect
val colSepOrText = colSep | oneChar.line.map(CellElement.apply)
topBorder >> { cols =>
val separators = colSep :: List.fill(cols.length - 1)(colSepOrText)
val colsWithSep = Zip3Iterator(separators, cols, separators.reverse)
def rowSep (width: Int): Parser[Any] =
intersect ~ anyOf('-').take(width).as(RowSeparator) <~ nextIn(intersectChar)
def boundaryPart (width: Int): Parser[Any] =
intersect ~ anyOf('=').take(width).as(TableBoundary) <~ nextIn(intersectChar)
def cell (sepL: Parser[Any], width: Int, sepR: Parser[Any]): Parser[Any] =
sepL ~ anyChars.take(width).line.map(CellElement.apply) <~ lookAhead(sepR)
val row = colsWithSep.map {
case (separatorL, colWidth, separatorR) =>
rowSep(colWidth) | cell(separatorL, colWidth, separatorR)
}
.reduceRight(_ ~ _)
.map(flattenElements)
val tableBoundary: Parser[TableDecoration] =
cols.map(boundaryPart).reduceRight(_ ~ _).as(TableBoundary)
def isSeparatorRow (row: List[TableElement]): Boolean = {
row.forall {
case RowSeparator => true
case Intersection => true
case _ => false
}
}
def buildRowList (rows: List[List[TableElement]], ct: CellType): List[Row] = {
val tableBuilder = new TableBuilder(cols map (_ + 1), recParsers) // column width includes separator
rows foreach { row =>
val hasSeparator = row exists { case RowSeparator => true; case _ => false }
val newRowBuilder = if (hasSeparator) Some(tableBuilder.nextRow) else None
row.sliding(2,2).zip(tableBuilder.columns.iterator).foreach {
case (_ :: RowSeparator :: Nil, column) => newRowBuilder.get.addCell(column.nextCell)
case (sep :: CellElement(text) :: Nil, column) => column.addLine(sep, text, hasSeparator)
case _ => () // cannot happen, just to avoid the warning
}
}
tableBuilder.toRowList(ct)
}
def validateLastRow (rows: List[List[TableElement]]): Unit = {
if (rows.isEmpty || !isSeparatorRow(rows.last)) throw new MalformedTableException("Table not terminated correctly")
}
val boundaryRow = tableBoundary <~ oneChar ~ wsEol
val tablePart = (not(tableBoundary) ~> row <~ oneChar ~ wsEol).rep
(tablePart ~ opt(boundaryRow ~> tablePart)).evalMap { result =>
/* Need to fail for certain illegal constructs in the interim model,
* so that the next parser can pick up the (broken) table input */
try {
val table = result match {
case head ~ Some(body) => validateLastRow(body); Table(TableHead(buildRowList(head, HeadCell)), TableBody(buildRowList(body.init, BodyCell)))
case body ~ None => validateLastRow(body); Table(TableHead(Nil), TableBody(buildRowList(body.init, BodyCell)))
}
Right(table)
}
catch {
case ex: MalformedTableException => Left(ex.getMessage)
}
}
}
}
/** Parses a simple table.
*
* See [[http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#simple-tables]].
*/
lazy val simpleTable: BlockParserBuilder = BlockParser.recursive { recParsers =>
val intersect = someOf(' ').count
val tableBorder = someOf('=').count
val columnSpec = tableBorder ~ opt(intersect) ^^ {
case col ~ Some(sep) => (col, sep)
case col ~ None => (col, 0)
}
val topBorder = columnSpec.rep.min(2) <~ wsEol
topBorder >> { cols =>
val (rowColumns, boundaryColumns): (Seq[Parser[Any]],Seq[Parser[Any]]) = (cols map { case (col, sep) =>
val cellText = if (sep == 0) anyNot('\\n', '\\r').line.map(CellElement.apply)
else anyChars.take(col).line.map(CellElement.apply)
val separator = anyOf(' ').take(sep).map(CellSeparator.apply)
val textInSep = anyChars.take(sep).map(CellSeparator.apply)
val textColumn = cellText ~ (separator | textInSep)
val rowSep = anyOf('-').take(col).as(RowSeparator)
val merged = anyOf('-').take(sep).as(RowSeparator)
val split = anyOf(' ').take(sep).as(Intersection)
val underline = rowSep ~ (split | merged)
val bCell = anyOf('=').take(col).as(TableBoundary)
val bMerged = anyOf('=').take(sep).as(TableBoundary)
val bSplit = anyOf(' ').take(sep).as(Intersection)
val boundary = bCell ~ (bSplit | bMerged)
(underline | not(boundary) ~> textColumn, boundary)
}).unzip
val row: Parser[Any] = (rowColumns reduceRight (_ ~ _)) <~ wsEol
val boundary: Parser[Any] = (boundaryColumns reduceRight (_ ~ _)) <~ wsEol
val blank: Parser[Any] = not(eof) ~> blankLine
val tablePart: Parser[List[Any]] = ((blank | row).rep ~ boundary).map {
case rows ~ boundary => rows :+ boundary
}
def buildRowList (rows: List[Any], ct: CellType): List[Row] = {
val tableBuilder = new TableBuilder(cols map { col => col._1 + col._2 }, recParsers)
def addBlankLines (acc: ListBuffer[List[TableElement]], parentSource: SourceCursor) =
acc += cols.flatMap { case (cell, sep) =>
List(CellElement(LineSource(" " * cell, parentSource)), CellSeparator(" " * sep))
}
def addRowSeparators (acc: ListBuffer[List[TableElement]]) =
acc += (cols flatMap { _ => List(RowSeparator, Intersection) })
/* in contrast to the grid table, some rows need to be processed in context,
* as their exact behaviour depends on preceding or following lines. */
val rowBuffer = rows.foldLeft((ListBuffer[List[TableElement]](), 0, false)) { case ((acc, blanks, rowOpen), row) =>
row match {
case result: ~[_,_] =>
val row = flattenElements(result)
row.head match {
case RowSeparator => (acc += row, 0, false)
case TableBoundary => (acc += row, 0, false)
case CellElement(text) =>
if (text.input.trim.isEmpty) for (_ <- 1 to blanks) addBlankLines(acc, text.parent)
else if (rowOpen) addRowSeparators(acc)
(acc += row, 0, true)
case _ => (acc, blanks, rowOpen) // cannot happen, just to avoid the warning
}
case _ => (acc, blanks + 1, rowOpen) // blank line
}
}._1
rowBuffer foreach { row =>
def foreachColumn (row: List[TableElement])(f: ((List[TableElement], ColumnBuilder)) => Any): Unit = {
row.tail.dropRight(1).sliding(2,2).zip(tableBuilder.columns.tail.iterator).foreach(f)
}
row.head match {
case RowSeparator =>
val newRowBuilder = tableBuilder.nextRow
newRowBuilder.addCell(tableBuilder.columns.head.nextCell)
foreachColumn(row) {
case (Intersection :: RowSeparator :: Nil, column) =>
newRowBuilder.addCell(column.nextCell)
case (RowSeparator :: RowSeparator :: Nil, column) =>
column.mergeLeft(true)
newRowBuilder.addCell(column.nextCell)
case _ => ()
}
case TableBoundary =>
foreachColumn(row) {
case (Intersection :: TableBoundary :: Nil, _) => ()
case (TableBoundary :: TableBoundary :: Nil, column) => column.mergeLeft()
case _ => ()
}
case CellElement(text) =>
tableBuilder.columns.head.addLine(CellSeparator(""), text, nextRow = false)
foreachColumn(row) {
case (sep :: CellElement(text) :: Nil, column) => column.addLine(sep, text, nextRow = false)
case _ => ()
}
case _ => ()
}
}
tableBuilder.toRowList(ct)
}
tablePart ~ opt(tablePart) ^^ {
case head ~ Some(body) => Table(TableHead(buildRowList(head, HeadCell)), TableBody(buildRowList(body, BodyCell)))
case body ~ None => Table(TableHead(Nil), TableBody(buildRowList(body, BodyCell)))
}
}
}
/** Internal control-flow exception. */
private class MalformedTableException (msg: String) extends RuntimeException(msg)
}
| planet42/Laika | core/shared/src/main/scala/laika/rst/TableParsers.scala | Scala | apache-2.0 | 16,128 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// iotaz package was used to get access to packed private code, mostly for extracting
// type list from CopK. Also most of the code here that is not specific to SubInject
// was copied from iotaz (because it was private).
package iotaz.internal
import quasar.fp.Injectable
import iotaz.CopK
import scala.reflect.macros.whitebox
import scalaz._, Scalaz._
import slamdata.Predef._
class SubInjectMacros(val c: whitebox.Context) {
import c.universe._
private[this] val tb = IotaMacroToolbelt(c)
def create[Inner[a] <: CopK[_, a], Outer[a] <: CopK[_, a]](
implicit
evInner: c.WeakTypeTag[Inner[_]],
evOuter: c.WeakTypeTag[Outer[_]]
): c.Expr[Injectable[Inner, Outer]] = {
val Inner = evInner.tpe
val Outer = evOuter.tpe
tb.foldAbort(for {
_ <- guardAssumptions("Inner", Inner)
_ <- guardAssumptions("Outer", Outer)
innerTypes <- extractCoproductTypes(Inner)
outerTypes <- extractCoproductTypes(Outer)
innerTypeToOuterIndex <- innerTypes.traverseU { tpe =>
findIndex(outerTypes, tpe).map(tpe -> _).toSuccessNel(s"$tpe could not be found in $Outer")
}.map(_.toMap).toEither
} yield {
makeInjectable(Inner, Outer, innerTypes, outerTypes, innerTypeToOuterIndex)
})
}
private def makeInjectable(
InnerType: Type,
OuterType: Type,
innerTypes: List[Type],
outerTypes: List[Type],
innerTypeToOuterIndex: Map[Type, Int]
): Tree = {
val Inner = toTypeTree(InnerType)
val Outer = toTypeTree(OuterType)
val NaturalTransformation = tq"_root_.scalaz.NaturalTransformation"
val A = TypeName(c.freshName("A"))
val fa = TermName(c.freshName("fa"))
val CopK = q"_root_.iotaz.CopK"
val projectReturnType = {
val Lambda = TypeName(c.freshName("Lambda"))
val a = TypeName(c.freshName("a"))
tq"({ type $Lambda[$a] = scala.Option[$Inner[$a]] })#$Lambda"
}
val injectCases = innerTypes.zipWithIndex.map {
case (tpe, index) =>
val mappedIndex = innerTypeToOuterIndex(tpe)
cq"$index => $CopK.unsafeApply($mappedIndex, $fa.value)"
}
val projectCases = {
val projectableCases = innerTypes.zipWithIndex.map {
case (tpe, index) =>
val mappedIndex = innerTypeToOuterIndex(tpe)
cq"$mappedIndex => scala.Some($CopK.unsafeApply($index, $fa.value))"
}
val nonProjectableCases = (outerTypes.indices.toSet -- innerTypeToOuterIndex.values).map { index =>
cq"$index => scala.None"
}
projectableCases ++ nonProjectableCases
}
q"""
_root_.quasar.fp.Injectable.make[$Inner, $Outer](
new $NaturalTransformation[$Inner, $Outer] {
@java.lang.SuppressWarnings(scala.Array("org.wartremover.warts.Throw"))
override def apply[$A]($fa: $Inner[$A]): $Outer[$A] = {
$fa.index match {
case ..$injectCases
case other => throw new _root_.java.lang.Exception(
"subinject internal error: index " + other.toString + " out of bounds for " + $fa.toString)
}
}
},
new $NaturalTransformation[$Outer, $projectReturnType] {
@java.lang.SuppressWarnings(scala.Array("org.wartremover.warts.Throw"))
override def apply[$A]($fa: $Outer[$A]): scala.Option[$Inner[$A]] = {
$fa.index match {
case ..$projectCases
case other => throw new _root_.java.lang.Exception(
"subinject internal error: index " + other.toString + " out of bounds for " + $fa.toString)
}
}
}
)
"""
}
private def findIndex(haystack: List[Type], needle: Type) = {
Option(haystack.indexWhere(_ =:= needle)).filter(_ =/= -1)
}
private def extractCoproductTypes(T: Type): Either[NonEmptyList[String], List[Type]] = {
for {
copK <- tb.destructCopK(T).leftMap(NonEmptyList(_))
tpes <- tb.memoizedTListKTypes(copK.L).leftMap(NonEmptyList(_))
} yield tpes
}
private def guardAssumptions(
name: String, T: Type
): Either[NonEmptyList[String], _] = T.resultType match {
case _: ExistentialType => Left(NonEmptyList(
s"type parameter $name was inferred to be existential type $T and must be specified"))
case _ if T =:= typeOf[Nothing] => Left(NonEmptyList(
s"type parameter $name was inferred to be Nothing and must be specified"))
case _ => Right(())
}
/** Converts a `Type` to a `Tree` so that it can be safely
* lifted into quasiquotes
*/
private[this] final def toTypeTree(tpe: Type): Tree = tpe match {
case poly: PolyType => projectPoly(poly)
case TypeRef(_, sym, Nil) => c.internal.gen.mkAttributedIdent(sym)
case _ => c.internal.gen.mkAttributedIdent(tpe.typeSymbol)
}
/** Converts an eta expanded `PolyType` such as `[z]Either[String, z]`
* into a type lambda `Tree` `({ type ξ$[z] = Either[String, z] })#ξ$`.
* The parameter `z` is taken from the original type and used in
* resulting tree.
*/
private[this] final def projectPoly(tpe: PolyType): Tree = {
val lambdaName = TypeName("ξ$")
SelectFromTypeTree(CompoundTypeTree(
Template(
q"_root_.scala.AnyRef" :: Nil,
ValDef(NoMods, termNames.WILDCARD, TypeTree(), EmptyTree),
TypeDef(NoMods, lambdaName, tpe.typeParams.map(internal.typeDef(_)),
q"${tpe.resultType}") :: Nil)),
lambdaName)
}
}
| djspiewak/quasar | foundation/src/main/scala/quasar/contrib/iota/SubInjectMacros.scala | Scala | apache-2.0 | 6,041 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 MineFormers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.mineformers.core.inventory.slot
import net.minecraft.inventory.IInventory
import net.minecraft.item.ItemStack
/**
* OutputSlot
*
* @author PaleoCrafter
*/
class OutputSlot(inventory: IInventory, id: Int, x: Int, y: Int) extends BaseSlot(inventory, id, x, y) {
override def isItemValid(par1ItemStack: ItemStack): Boolean = false
}
| MineFormers/MFCore | src/main/scala/de/mineformers/core/inventory/slot/OutputSlot.scala | Scala | mit | 1,493 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms
import analysis._
import collection.JavaConversions._
import collection.JavaConverters._
import org.orbeon.oxf.xml._
import dom4j.{LocationDocumentResult, Dom4jUtils, LocationData}
import org.orbeon.oxf.xml.XMLConstants._
import java.util.{List ⇒ JList, Set ⇒ JSet, Map ⇒ JMap}
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms.{XFormsProperties ⇒ P}
import org.orbeon.oxf.common.{OXFException, Version}
import org.orbeon.oxf.pipeline.api.XMLReceiver
import org.xml.sax.Attributes
import org.orbeon.oxf.xforms.XFormsStaticStateImpl.StaticStateDocument
import state.AnnotatedTemplate
import xbl.Scope
import org.dom4j.{Element, Document}
import org.orbeon.oxf.util.{StringReplacer, NumberUtils}
import org.orbeon.oxf.util.ScalaUtils.{stringOptionToSet, nonEmptyOrNone}
class XFormsStaticStateImpl(
val encodedState: String,
val digest: String,
val startScope: Scope,
metadata: Metadata,
val template: Option[AnnotatedTemplate],
val staticStateDocument: StaticStateDocument)
extends XFormsStaticState {
require(encodedState ne null)
require(digest ne null)
val getIndentedLogger = Loggers.getIndentedLogger("analysis")
val locationData = staticStateDocument.locationData
// Create top-level part once vals are all initialized
val topLevelPart = new PartAnalysisImpl(this, None, startScope, metadata, staticStateDocument)
// Analyze top-level part
topLevelPart.analyze()
// Delegation to top-level part
def dumpAnalysis() = topLevelPart.dumpAnalysis()
def toXML(helper: ContentHandlerHelper) = topLevelPart.toXML(helper)
// Properties
lazy val allowedExternalEvents = stringOptionToSet(Option(getProperty[String](P.EXTERNAL_EVENTS_PROPERTY)))
lazy val isNoscript = XFormsStaticStateImpl.isNoscript(staticStateDocument.nonDefaultProperties)
lazy val isHTMLDocument = staticStateDocument.isHTMLDocument
lazy val isXPathAnalysis = Version.instance.isPEFeatureEnabled(getProperty[Boolean](P.XPATH_ANALYSIS_PROPERTY), P.XPATH_ANALYSIS_PROPERTY)
lazy val sanitizeInput = StringReplacer(getProperty[String](P.SANITIZE_PROPERTY))(getIndentedLogger)
def isCacheDocument = staticStateDocument.isCacheDocument
def isClientStateHandling = staticStateDocument.isClientStateHandling
def isServerStateHandling = staticStateDocument.isServerStateHandling
// Whether to keep the annotated template in the document itself (dynamic state)
// See: http://wiki.orbeon.com/forms/doc/contributor-guide/xforms-state-handling#TOC-Handling-of-the-HTML-template
def isDynamicNoscriptTemplate = isNoscript && ! template.isDefined
def getProperty[T](propertyName: String): T = staticStateDocument.getProperty[T](propertyName)
// Legacy methods
def getAllowedExternalEvents: JSet[String] = allowedExternalEvents
def getNonDefaultProperties: Map[String, AnyRef] = staticStateDocument.nonDefaultProperties
def getStringProperty(propertyName: String) = getProperty[String](propertyName)
def getBooleanProperty(propertyName: String) = getProperty[Boolean](propertyName)
def getIntegerProperty(propertyName: String) = getProperty[Int](propertyName)
}
object XFormsStaticStateImpl {
val BASIC_NAMESPACE_MAPPING =
new NamespaceMapping(Map(
XFORMS_PREFIX → XFORMS_NAMESPACE_URI,
XFORMS_SHORT_PREFIX → XFORMS_NAMESPACE_URI,
XXFORMS_PREFIX → XXFORMS_NAMESPACE_URI,
XXFORMS_SHORT_PREFIX → XXFORMS_NAMESPACE_URI,
XML_EVENTS_PREFIX → XML_EVENTS_NAMESPACE_URI,
XHTML_PREFIX → XMLConstants.XHTML_NAMESPACE_URI,
XHTML_SHORT_PREFIX → XMLConstants.XHTML_NAMESPACE_URI
))
// Create static state from an encoded version. This is used when restoring a static state from a serialized form.
// NOTE: `digest` can be None when using client state, if all we have are serialized static and dynamic states.
def restore(digest: Option[String], encodedState: String) = {
val staticStateDocument = new StaticStateDocument(XFormsUtils.decodeXML(encodedState))
// Restore template
val template = staticStateDocument.template map AnnotatedTemplate.apply
// Restore metadata
val metadata = Metadata(staticStateDocument, template)
new XFormsStaticStateImpl(
encodedState,
staticStateDocument.getOrComputeDigest(digest),
new Scope(null, ""),
metadata,
template,
staticStateDocument
)
}
// Create analyzed static state for the given static state document.
// Used by XFormsToXHTML.
def createFromStaticStateBits(staticStateXML: Document, digest: String, metadata: Metadata, template: AnnotatedTemplate): XFormsStaticStateImpl = {
val startScope = new Scope(null, "")
val staticStateDocument = new StaticStateDocument(staticStateXML)
new XFormsStaticStateImpl(
staticStateDocument.asBase64,
digest,
startScope,
metadata,
staticStateDocument.template map (_ ⇒ template), // only keep the template around if needed
staticStateDocument
)
}
// Create analyzed static state for the given XForms document.
// Used by unit tests.
def createFromDocument(formDocument: Document): (SAXStore, XFormsStaticState) = {
val startScope = new Scope(null, "")
def create(staticStateXML: Document, digest: String, metadata: Metadata, template: AnnotatedTemplate): XFormsStaticStateImpl = {
val staticStateDocument = new StaticStateDocument(staticStateXML)
new XFormsStaticStateImpl(
staticStateDocument.asBase64,
digest,
startScope,
metadata,
staticStateDocument.template map (_ ⇒ template), // only keep the template around if needed
staticStateDocument
)
}
createFromDocument(formDocument, startScope, create)
}
// Create template and analyzed part for the given XForms document.
// Used by xxf:dynamic.
def createPart(staticState: XFormsStaticState, parent: PartAnalysis, formDocument: Document, startScope: Scope) =
createFromDocument(formDocument, startScope, (staticStateDocument: Document, digest: String, metadata: Metadata, _) ⇒ {
val part = new PartAnalysisImpl(staticState, Some(parent), startScope, metadata, new StaticStateDocument(staticStateDocument))
part.analyze()
part
})
private def createFromDocument[T](formDocument: Document, startScope: Scope, create: (Document, String, Metadata, AnnotatedTemplate) ⇒ T): (SAXStore, T) = {
val identity = TransformerUtils.getIdentityTransformerHandler
val documentResult = new LocationDocumentResult
identity.setResult(documentResult)
val metadata = new Metadata
val digestContentHandler = new XMLUtils.DigestContentHandler
val template = new SAXStore
val prefix = startScope.fullPrefix
// Annotator with prefix
class Annotator(extractorReceiver: XMLReceiver) extends XFormsAnnotatorContentHandler(template, extractorReceiver, metadata) {
protected override def rewriteId(id: String) = prefix + id
}
// Extractor with prefix
class Extractor(xmlReceiver: XMLReceiver) extends XFormsExtractorContentHandler(xmlReceiver, metadata, AnnotatedTemplate(template), ".", XXBLScope.inner, startScope.isTopLevelScope, false) {
override def startXFormsOrExtension(uri: String, localname: String, attributes: Attributes, scope: XFormsConstants.XXBLScope) {
val staticId = attributes.getValue("id")
if (staticId ne null) {
val prefixedId = prefix + staticId
if (metadata.getNamespaceMapping(prefixedId) ne null) {
if (startScope.contains(staticId))
throw new OXFException("Duplicate id found for static id: " + staticId)
startScope += staticId → prefixedId
}
}
}
}
// Read the input through the annotator and gather namespace mappings
TransformerUtils.writeDom4j(formDocument, new Annotator(new Extractor(new TeeXMLReceiver(identity, digestContentHandler))))
// Get static state document and create static state object
val staticStateXML = documentResult.getDocument
val digest = NumberUtils.toHexString(digestContentHandler.getResult)
(template, create(staticStateXML, digest, metadata, AnnotatedTemplate(template)))
}
def getPropertyJava[T](nonDefaultProperties: JMap[String, AnyRef], propertyName: String) =
getProperty[T](nonDefaultProperties.asScala, propertyName)
private def defaultPropertyValue(propertyName: String) =
Option(P.getPropertyDefinition(propertyName)) map (_.defaultValue) orNull
def getProperty[T](nonDefaultProperties: collection.Map[String, AnyRef], propertyName: String): T =
nonDefaultProperties.getOrElse(propertyName, defaultPropertyValue(propertyName)).asInstanceOf[T]
// For Java callers
def isNoscriptJava(nonDefaultProperties: JMap[String, AnyRef]) =
isNoscript(nonDefaultProperties.asScala)
// Determine, based on configuration and properties, whether noscript is allowed and enabled
def isNoscript(nonDefaultProperties: collection.Map[String, AnyRef]) = {
val noscriptRequested =
getProperty[Boolean](nonDefaultProperties, P.NOSCRIPT_PROPERTY) &&
getProperty[Boolean](nonDefaultProperties, P.NOSCRIPT_SUPPORT_PROPERTY)
Version.instance.isPEFeatureEnabled(noscriptRequested, P.NOSCRIPT_PROPERTY)
}
// Represent the static state XML document resulting from the extractor
//
// - The underlying document produced by the extractor used to be further transformed to extract various documents.
// This is no longer the case and the underlying document should be considered immutable (it would be good if it
// was in fact immutable).
// - The template, when kept for full update marks, is stored in the static state document as Base64. In noscript
// mode, it is stored in the dynamic state.
class StaticStateDocument(val xmlDocument: Document) {
private def staticStateElement = xmlDocument.getRootElement
require(xmlDocument ne null)
// Pointers to nested elements
def rootControl = staticStateElement.element("root")
def xblElements = rootControl.elements(XBL_XBL_QNAME).asInstanceOf[JList[Element]].asScala
// TODO: if staticStateDocument contains XHTML document, get controls and models from there
// Extract location data
val locationData = staticStateElement.attributeValue("system-id") match {
case systemId: String ⇒ new LocationData(systemId, staticStateElement.attributeValue("line").toInt, staticStateElement.attributeValue("column").toInt)
case _ ⇒ null
}
// Return the last id generated
def lastId: Int = {
val idElement = staticStateElement.element(XFormsExtractorContentHandler.LAST_ID_QNAME)
require(idElement ne null)
val lastId = XFormsUtils.getElementId(idElement)
require(lastId ne null)
Integer.parseInt(lastId)
}
// Optional template as Base64
def template = Option(staticStateElement.element("template")) map (_.getText)
// Extract properties
// NOTE: XFormsExtractorContentHandler takes care of propagating only non-default properties
val nonDefaultProperties = {
for {
element ← Dom4jUtils.elements(staticStateElement, STATIC_STATE_PROPERTIES_QNAME).asScala
attribute ← Dom4jUtils.attributes(element).asScala
propertyName = attribute.getName
propertyValue = P.parseProperty(propertyName, attribute.getValue)
} yield
(propertyName, propertyValue)
} toMap
// Get a property by name
def getProperty[T](propertyName: String): T =
XFormsStaticStateImpl.getProperty[T](nonDefaultProperties, propertyName)
def isCacheDocument = getProperty[Boolean](P.CACHE_DOCUMENT_PROPERTY)
def isClientStateHandling = getProperty[String](P.STATE_HANDLING_PROPERTY) == P.STATE_HANDLING_CLIENT_VALUE
def isServerStateHandling = getProperty[String](P.STATE_HANDLING_PROPERTY) == P.STATE_HANDLING_SERVER_VALUE
val isHTMLDocument = Option(staticStateElement.attributeValue("is-html")) exists (_ == "true")
def getOrComputeDigest(digest: Option[String]) =
digest getOrElse {
val digestContentHandler = new XMLUtils.DigestContentHandler
TransformerUtils.writeDom4j(xmlDocument, digestContentHandler)
NumberUtils.toHexString(digestContentHandler.getResult)
}
// Get the encoded static state
// If an existing state is passed in, use it, otherwise encode from XML, encrypting if necessary.
// NOTE: We do compress the result as we think we can afford this for the static state (probably not so for the dynamic state).
def asBase64 =
XFormsUtils.encodeXML(xmlDocument, compress = true, encrypt = isClientStateHandling, location = true)
}
} | evlist/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/XFormsStaticStateImpl.scala | Scala | lgpl-2.1 | 14,489 |
package com.highcharts
import scala.scalajs.js
@js.native
sealed trait CleanJsObject[+T] extends scalajs.js.Object
object CleanJsObject {
def apply(v: js.Object): js.Object = v match {
case null ⇒
null
case und if js.isUndefined(und) ⇒
js.undefined.asInstanceOf[js.Object]
case array if js.Array.isArray(array) ⇒ // Pass arrays
array
case obj ⇒
val newObj: js.Object = new js.Object()
def cleanProperty(p: js.Any): js.Any = p match {
case array if js.Array.isArray(array) ⇒
array.asInstanceOf[js.Array[js.Any]]
.filterNot(js.isUndefined)
.map(cleanProperty)
// case subObj if js.typeOf(subObj) == "object" ⇒
// cleanObject(subObj.asInstanceOf[js.Object])
case newValue ⇒
newValue
}
val properties = js.Object.keys(obj)
.filterNot(key ⇒ js.isUndefined(obj.asInstanceOf[js.Dynamic].selectDynamic(key)))
.map(key ⇒ key → cleanProperty(obj.asInstanceOf[js.Dynamic].selectDynamic(key)))
properties.foreach { case (key, value) ⇒
newObj.asInstanceOf[js.Dynamic].updateDynamic(key)(value)
}
newObj
}
} | Karasiq/scalajs-highcharts | src/main/scala/com/highcharts/CleanJsObject.scala | Scala | mit | 1,225 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.storage
import javax.servlet.http.HttpServletRequest
import org.mockito.Mockito._
import org.apache.spark.SparkFunSuite
import org.apache.spark.status.StreamBlockData
import org.apache.spark.status.api.v1.RDDStorageInfo
import org.apache.spark.storage._
class StoragePageSuite extends SparkFunSuite {
val storageTab = mock(classOf[StorageTab])
when(storageTab.basePath).thenReturn("http://localhost:4040")
val storagePage = new StoragePage(storageTab, null)
val request = mock(classOf[HttpServletRequest])
test("rddTable") {
val rdd1 = new RDDStorageInfo(1,
"rdd1",
10,
10,
StorageLevel.MEMORY_ONLY.description,
100L,
0L,
None,
None)
val rdd2 = new RDDStorageInfo(2,
"rdd2",
10,
5,
StorageLevel.DISK_ONLY.description,
0L,
200L,
None,
None)
val rdd3 = new RDDStorageInfo(3,
"rdd3",
10,
10,
StorageLevel.MEMORY_AND_DISK_SER.description,
400L,
500L,
None,
None)
val xmlNodes = storagePage.rddTable(request, Seq(rdd1, rdd2, rdd3))
val headers = Seq(
"ID",
"RDD Name",
"Storage Level",
"Cached Partitions",
"Fraction Cached",
"Size in Memory",
"Size on Disk")
assert((xmlNodes \\\\ "th").map(_.text) === headers)
assert((xmlNodes \\\\ "tr").size === 3)
assert(((xmlNodes \\\\ "tr")(0) \\\\ "td").map(_.text.trim) ===
Seq("1", "rdd1", "Memory Deserialized 1x Replicated", "10", "100%", "100.0 B", "0.0 B"))
// Check the url
assert(((xmlNodes \\\\ "tr")(0) \\\\ "td" \\ "a")(0).attribute("href").map(_.text) ===
Some("http://localhost:4040/storage/rdd?id=1"))
assert(((xmlNodes \\\\ "tr")(1) \\\\ "td").map(_.text.trim) ===
Seq("2", "rdd2", "Disk Serialized 1x Replicated", "5", "50%", "0.0 B", "200.0 B"))
// Check the url
assert(((xmlNodes \\\\ "tr")(1) \\\\ "td" \\ "a")(0).attribute("href").map(_.text) ===
Some("http://localhost:4040/storage/rdd?id=2"))
assert(((xmlNodes \\\\ "tr")(2) \\\\ "td").map(_.text.trim) ===
Seq("3", "rdd3", "Disk Memory Serialized 1x Replicated", "10", "100%", "400.0 B", "500.0 B"))
// Check the url
assert(((xmlNodes \\\\ "tr")(2) \\\\ "td" \\ "a")(0).attribute("href").map(_.text) ===
Some("http://localhost:4040/storage/rdd?id=3"))
}
test("empty rddTable") {
assert(storagePage.rddTable(request, Seq.empty).isEmpty)
}
test("streamBlockStorageLevelDescriptionAndSize") {
val memoryBlock = new StreamBlockData("0",
"0",
"localhost:1111",
StorageLevel.MEMORY_ONLY.description,
true,
false,
true,
100,
0)
assert(("Memory", 100) === storagePage.streamBlockStorageLevelDescriptionAndSize(memoryBlock))
val memorySerializedBlock = new StreamBlockData("0",
"0",
"localhost:1111",
StorageLevel.MEMORY_ONLY_SER.description,
true,
false,
false,
memSize = 100,
diskSize = 0)
assert(("Memory Serialized", 100) ===
storagePage.streamBlockStorageLevelDescriptionAndSize(memorySerializedBlock))
val diskBlock = new StreamBlockData("0",
"0",
"localhost:1111",
StorageLevel.DISK_ONLY.description,
false,
true,
false,
0,
100)
assert(("Disk", 100) === storagePage.streamBlockStorageLevelDescriptionAndSize(diskBlock))
}
test("receiverBlockTables") {
val blocksForExecutor0 = Seq(
new StreamBlockData(StreamBlockId(0, 0).name,
"0",
"localhost:10000",
StorageLevel.MEMORY_ONLY.description,
true,
false,
true,
100,
0),
new StreamBlockData(StreamBlockId(1, 1).name,
"0",
"localhost:10000",
StorageLevel.DISK_ONLY.description,
false,
true,
false,
0,
100)
)
val blocksForExecutor1 = Seq(
new StreamBlockData(StreamBlockId(0, 0).name,
"1",
"localhost:10001",
StorageLevel.MEMORY_ONLY.description,
true,
false,
true,
memSize = 100,
diskSize = 0),
new StreamBlockData(StreamBlockId(1, 1).name,
"1",
"localhost:10001",
StorageLevel.MEMORY_ONLY_SER.description,
true,
false,
false,
100,
0)
)
val xmlNodes = storagePage.receiverBlockTables(blocksForExecutor0 ++ blocksForExecutor1)
val executorTable = (xmlNodes \\\\ "table")(0)
val executorHeaders = Seq(
"Executor ID",
"Address",
"Total Size in Memory",
"Total Size on Disk",
"Stream Blocks")
assert((executorTable \\\\ "th").map(_.text) === executorHeaders)
assert((executorTable \\\\ "tr").size === 2)
assert(((executorTable \\\\ "tr")(0) \\\\ "td").map(_.text.trim) ===
Seq("0", "localhost:10000", "100.0 B", "100.0 B", "2"))
assert(((executorTable \\\\ "tr")(1) \\\\ "td").map(_.text.trim) ===
Seq("1", "localhost:10001", "200.0 B", "0.0 B", "2"))
val blockTable = (xmlNodes \\\\ "table")(1)
val blockHeaders = Seq(
"Block ID",
"Replication Level",
"Location",
"Storage Level",
"Size")
assert((blockTable \\\\ "th").map(_.text) === blockHeaders)
assert((blockTable \\\\ "tr").size === 4)
assert(((blockTable \\\\ "tr")(0) \\\\ "td").map(_.text.trim) ===
Seq("input-0-0", "2", "localhost:10000", "Memory", "100.0 B"))
// Check "rowspan=2" for the first 2 columns
assert(((blockTable \\\\ "tr")(0) \\\\ "td")(0).attribute("rowspan").map(_.text) === Some("2"))
assert(((blockTable \\\\ "tr")(0) \\\\ "td")(1).attribute("rowspan").map(_.text) === Some("2"))
assert(((blockTable \\\\ "tr")(1) \\\\ "td").map(_.text.trim) ===
Seq("localhost:10001", "Memory", "100.0 B"))
assert(((blockTable \\\\ "tr")(2) \\\\ "td").map(_.text.trim) ===
Seq("input-1-1", "2", "localhost:10000", "Disk", "100.0 B"))
// Check "rowspan=2" for the first 2 columns
assert(((blockTable \\\\ "tr")(2) \\\\ "td")(0).attribute("rowspan").map(_.text) === Some("2"))
assert(((blockTable \\\\ "tr")(2) \\\\ "td")(1).attribute("rowspan").map(_.text) === Some("2"))
assert(((blockTable \\\\ "tr")(3) \\\\ "td").map(_.text.trim) ===
Seq("localhost:10001", "Memory Serialized", "100.0 B"))
}
test("empty receiverBlockTables") {
assert(storagePage.receiverBlockTables(Seq.empty).isEmpty)
}
}
| bravo-zhang/spark | core/src/test/scala/org/apache/spark/ui/storage/StoragePageSuite.scala | Scala | apache-2.0 | 7,254 |
package api
import com.github.jknack.handlebars.Handlebars
import com.github.jknack.handlebars.cache.ConcurrentMapTemplateCache
import com.github.jknack.handlebars.io.FileTemplateLoader
import com.github.jknack.handlebars.Template
import models.WebsiteDb
import org.codehaus.jackson.JsonNode
import play.api.libs.json.JsValue
import util.JsValueResolver
import play.api.Logger
object RenderingApi {
import ContentApi.EntityRef
// trait TemplateIdentifier {}
trait TemplatingRule {}
type TemplateRef = Template
val cache = new ConcurrentMapTemplateCache()
private def context(json: JsonNode) = {
import com.github.jknack.handlebars.Context
import com.github.jknack.handlebars.JsonNodeValueResolver
Context
.newBuilder(json)
.resolver(JsonNodeValueResolver.INSTANCE)
.build()
}
private def context(json: JsValue) = {
import com.github.jknack.handlebars.Context
Context
.newBuilder(json)
.resolver(JsValueResolver.INSTANCE)
.build()
}
def findTemplate(ref: EntityRef, host: String, rules: List[TemplatingRule])(websiteDb: WebsiteDb): Option[TemplateRef] = try {
for {
website <- websiteDb.all.filter { _.name == host} .headOption
file = website.path // map { _ + s"$content.html"}
loader = new FileTemplateLoader(file, ".html")
handlebars = (new Handlebars(loader)).`with`(cache)
template = handlebars.compile(ref)
} yield template
} catch {
case e: Exception =>
Logger.error(s"ERR: $host / ref: $ref [${e.getClass}: ${e.getMessage}]")
None
}
def render(content: JsonNode, template: TemplateRef) : String = template.apply(context(content))
def render(content: JsValue, template: TemplateRef) : String = template.apply(context(content))
}
| jlcanela/fastcms | app/api/RenderingApi.scala | Scala | apache-2.0 | 1,788 |
package fpinscala.state
import fpinscala.state.RNG.{Rand, Simple}
import org.scalacheck.{Gen, Prop}
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
/**
*
*/
class StateTest extends Specification with ScalaCheck {
"RNG" should {
"nonNegativeInt" ! prop { (s:Int) =>
val (i1, rng2) = RNG.nonNegativeInt( Simple (s) )
testPositiveInt(i1)
}
"double" ! prop { (s: Int) =>
val (i1, rng2) = RNG.double (Simple (s) )
testDouble(i1)
}
"intDouble" ! prop { (s: Int) =>
val ((i,d), rng2) = RNG.intDouble (Simple (s) )
testDouble(d) && testInt(i)
}
"doubleInt" ! prop { (s: Int) =>
val ((d,i), rng2) = RNG.doubleInt(Simple (s) )
testDouble(d) && testInt(i)
}
"double3" ! prop { (s: Int) =>
val ((d1,d2,d3), rng2) = RNG.double3(Simple (s) )
testDouble(d1) && testDouble(d2) && testDouble(d3)
}
import scala.math.Numeric.IntIsIntegral
val allNums = Gen.oneOf(Gen.posNum, Gen.negNum)
"ints" ! Prop.forAll(Gen.posNum, allNums) { (count: Int, s: Int) =>
val (is, rng2) = RNG.ints(count)(Simple (s) )
is.size == count
}
"doubleMap" ! prop { (s: Int) =>
val (d, rng) = RNG.doubleMap(Simple(s))
testDouble(d)
}
"map2" ! prop { (s: Int) =>
val ((i,d), rng):((Int, Double), RNG) = RNG.map2(RNG.int, RNG.double)((_,_))(Simple(s))
testInt(i) && testDouble(d)
}
"nonNegativeLessThan" ! Prop.forAll(Gen.posNum) { (s: Int) =>
val (r, _) = RNG.nonNegativeLessThan(s)(Simple(1))
r >= 0 && r < s
}
"mapByFlatMap" ! prop { (s: Int) =>
val (r, _) = RNG.mapByFlatMap(RNG.unit(s))(_ * 2)(Simple(1))
r == s * 2
}
"map2ByFlatMap" ! prop { (n1: Int, n2:Int) =>
val (r, _) = RNG.map2ByFlatMap(RNG.unit(n1), RNG.unit(n2))((p1,p2) => p1 + p2)(Simple(1))
n1 + n2 <= r
}
}
// "State" should {
// "unit" ! prop { (n:Int) =>
// State((n,(_:Any))).unit(2) ==== State(s => (2,s))
//
// }
// }
def testInt(i: Int) : Boolean = {
// not a very smart test....
i >= Int.MinValue && i <= Int.MaxValue
}
def testPositiveInt(i: Int): Boolean = {
i >= 0 && i <= Int.MaxValue
}
def testDouble(d: Double): Boolean = {
d >= 0.0 && d < 1.0
}
}
| karolchmist/fpinscala | exercises/src/test/scala/fpinscala/state/StateTest.scala | Scala | mit | 2,287 |
package net.fwbrasil.activate.entity
import net.fwbrasil.activate.entity.id.CustomID
import net.fwbrasil.activate.entity.id.UUID
import net.fwbrasil.activate.entity.id.GeneratedID
trait Entity extends BaseEntity with UUID
trait EntityWithCustomID[ID] extends BaseEntity with CustomID[ID]
trait EntityWithGeneratedID[ID] extends BaseEntity with GeneratedID[ID] | xdevelsistemas/activate | activate-core/src/main/scala/net/fwbrasil/activate/entity/Entity.scala | Scala | lgpl-2.1 | 361 |
package org.shelmet.heap.server
import org.shelmet.heap.model.Snapshot
import java.text.SimpleDateFormat
import java.util.{TimeZone,Date}
class HomepagePage(snapshot: Snapshot) extends AbstractPage(snapshot) {
def formatDateAsUTC(date : Date) =
{
val sdf = new SimpleDateFormat("dd MMM yyyy HH:mm:ss z")
sdf.setTimeZone(TimeZone.getTimeZone("UTC"))
sdf.format(date)
}
override def run() {
html("Welcome to SHelmet") {
out.println(
"""| Welcome to SHelmet, use the links under 'Reports' above to begin browsing your heap.
|""".stripMargin
)
table {
tableRow {
tableData("Creation time:")
tableData(snapshot.creationDate.map(formatDateAsUTC).getOrElse("Unknown"))
}
tableRow {
tableData("No Objects:")
tableData {
printAnchor("showInstanceCountsIncPlatform/","" + snapshot.noObjects)
}
}
tableRow {
tableData("Classes (including system classes):")
tableData {
printAnchor("allClassesWithPlatform/","" + snapshot.noClasses)
}
}
tableRow {
tableData("User Classes:")
tableData {
printAnchor("allClassesWithoutPlatform/","" + snapshot.noUserClasses)
}
}
}
val top10 = snapshot.allObjects.toList.sortBy(-_.retainedSize).take(10)
h2("Largest 10 objects by retained size")
table {
tableRow {
tableHeader("Object")
tableHeader("Retained Size")
}
top10 foreach { obj =>
tableRow {
tableData(printThing(obj))
tableData(s"${obj.retainedSize}")
}
}
}
}
}
}
| rorygraves/shelmet | src/main/scala/org/shelmet/heap/server/HomepagePage.scala | Scala | gpl-2.0 | 1,754 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.{File, InterruptedIOException, IOException}
import java.util.concurrent.{CountDownLatch, TimeoutException, TimeUnit}
import scala.reflect.ClassTag
import scala.util.control.ControlThrowable
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkContext
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes
import org.apache.spark.sql.execution.command.ExplainCommand
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreConf, StateStoreId, StateStoreProvider}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.StreamSourceProvider
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
import org.apache.spark.util.Utils
class StreamSuite extends StreamTest {
import testImplicits._
test("map with recovery") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(_ + 1)
testStream(mapped)(
AddData(inputData, 1, 2, 3),
StartStream(),
CheckAnswer(2, 3, 4),
StopStream,
AddData(inputData, 4, 5, 6),
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7))
}
test("join") {
// Make a table and ensure it will be broadcast.
val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
// Join the input stream with a table.
val inputData = MemoryStream[Int]
val joined = inputData.toDS().toDF().join(smallTable, $"value" === $"number")
testStream(joined)(
AddData(inputData, 1, 2, 3),
CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two")),
AddData(inputData, 4),
CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two"), Row(4, 4, "four")))
}
test("SPARK-20432: union one stream with itself") {
val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load().select("a")
val unioned = df.union(df)
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val query =
unioned
.writeStream.format("parquet")
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.start(outputDir.getAbsolutePath)
try {
query.processAllAvailable()
val outputDf = spark.read.parquet(outputDir.getAbsolutePath).as[Long]
checkDatasetUnorderly[Long](outputDf, (0L to 10L).union((0L to 10L)).toArray: _*)
} finally {
query.stop()
}
}
}
}
test("union two streams") {
val inputData1 = MemoryStream[Int]
val inputData2 = MemoryStream[Int]
val unioned = inputData1.toDS().union(inputData2.toDS())
testStream(unioned)(
AddData(inputData1, 1, 3, 5),
CheckAnswer(1, 3, 5),
AddData(inputData2, 2, 4, 6),
CheckAnswer(1, 2, 3, 4, 5, 6),
StopStream,
AddData(inputData1, 7),
StartStream(),
AddData(inputData2, 8),
CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8))
}
test("sql queries") {
val inputData = MemoryStream[Int]
inputData.toDF().createOrReplaceTempView("stream")
val evens = sql("SELECT * FROM stream WHERE value % 2 = 0")
testStream(evens)(
AddData(inputData, 1, 2, 3, 4),
CheckAnswer(2, 4))
}
test("DataFrame reuse") {
def assertDF(df: DataFrame) {
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val query = df.writeStream.format("parquet")
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.start(outputDir.getAbsolutePath)
try {
query.processAllAvailable()
val outputDf = spark.read.parquet(outputDir.getAbsolutePath).as[Long]
checkDataset[Long](outputDf, (0L to 10L).toArray: _*)
} finally {
query.stop()
}
}
}
}
val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load()
assertDF(df)
assertDF(df)
}
test("Within the same streaming query, one StreamingRelation should only be transformed to one " +
"StreamingExecutionRelation") {
val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load()
var query: StreamExecution = null
try {
query =
df.union(df)
.writeStream
.format("memory")
.queryName("memory")
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
query.awaitInitialization(streamingTimeout.toMillis)
val executionRelations =
query
.logicalPlan
.collect { case ser: StreamingExecutionRelation => ser }
assert(executionRelations.size === 2)
assert(executionRelations.distinct.size === 1)
} finally {
if (query != null) {
query.stop()
}
}
}
test("unsupported queries") {
val streamInput = MemoryStream[Int]
val batchInput = Seq(1, 2, 3).toDS()
def assertError(expectedMsgs: Seq[String])(body: => Unit): Unit = {
val e = intercept[AnalysisException] {
body
}
expectedMsgs.foreach { s => assert(e.getMessage.contains(s)) }
}
// Running streaming plan as a batch query
assertError("start" :: Nil) {
streamInput.toDS.map { i => i }.count()
}
// Running non-streaming plan with as a streaming query
assertError("without streaming sources" :: "start" :: Nil) {
val ds = batchInput.map { i => i }
testStream(ds)()
}
// Running streaming plan that cannot be incrementalized
assertError("not supported" :: "streaming" :: Nil) {
val ds = streamInput.toDS.map { i => i }.sort()
testStream(ds)()
}
}
test("minimize delay between batch construction and execution") {
// For each batch, we would retrieve new data's offsets and log them before we run the execution
// This checks whether the key of the offset log is the expected batch id
def CheckOffsetLogLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.offsetLog.getLatest().get._1 == expectedId,
s"offsetLog's latest should be $expectedId")
// Check the latest batchid in the commit log
def CheckCommitLogLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.batchCommitLog.getLatest().get._1 == expectedId,
s"commitLog's latest should be $expectedId")
// Ensure that there has not been an incremental execution after restart
def CheckNoIncrementalExecutionCurrentBatchId(): AssertOnQuery =
AssertOnQuery(_.lastExecution == null, s"lastExecution not expected to run")
// For each batch, we would log the state change during the execution
// This checks whether the key of the state change log is the expected batch id
def CheckIncrementalExecutionCurrentBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.lastExecution.asInstanceOf[IncrementalExecution].currentBatchId == expectedId,
s"lastExecution's currentBatchId should be $expectedId")
// For each batch, we would log the sink change after the execution
// This checks whether the key of the sink change log is the expected batch id
def CheckSinkLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.sink.asInstanceOf[MemorySink].latestBatchId.get == expectedId,
s"sink's lastBatchId should be $expectedId")
val inputData = MemoryStream[Int]
testStream(inputData.toDS())(
StartStream(ProcessingTime("10 seconds"), new StreamManualClock),
/* -- batch 0 ----------------------- */
// Add some data in batch 0
AddData(inputData, 1, 2, 3),
AdvanceManualClock(10 * 1000), // 10 seconds
/* -- batch 1 ----------------------- */
// Check the results of batch 0
CheckAnswer(1, 2, 3),
CheckIncrementalExecutionCurrentBatchId(0),
CheckCommitLogLatestBatchId(0),
CheckOffsetLogLatestBatchId(0),
CheckSinkLatestBatchId(0),
// Add some data in batch 1
AddData(inputData, 4, 5, 6),
AdvanceManualClock(10 * 1000),
/* -- batch _ ----------------------- */
// Check the results of batch 1
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckIncrementalExecutionCurrentBatchId(1),
CheckCommitLogLatestBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
AdvanceManualClock(10 * 1000),
AdvanceManualClock(10 * 1000),
AdvanceManualClock(10 * 1000),
/* -- batch __ ---------------------- */
// Check the results of batch 1 again; this is to make sure that, when there's no new data,
// the currentId does not get logged (e.g. as 2) even if the clock has advanced many times
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckIncrementalExecutionCurrentBatchId(1),
CheckCommitLogLatestBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
/* Stop then restart the Stream */
StopStream,
StartStream(ProcessingTime("10 seconds"), new StreamManualClock(60 * 1000)),
/* -- batch 1 no rerun ----------------- */
// batch 1 would not re-run because the latest batch id logged in commit log is 1
AdvanceManualClock(10 * 1000),
CheckNoIncrementalExecutionCurrentBatchId(),
/* -- batch 2 ----------------------- */
// Check the results of batch 1
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckCommitLogLatestBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
// Add some data in batch 2
AddData(inputData, 7, 8, 9),
AdvanceManualClock(10 * 1000),
/* -- batch 3 ----------------------- */
// Check the results of batch 2
CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8, 9),
CheckIncrementalExecutionCurrentBatchId(2),
CheckCommitLogLatestBatchId(2),
CheckOffsetLogLatestBatchId(2),
CheckSinkLatestBatchId(2))
}
test("insert an extraStrategy") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val inputData = MemoryStream[(String, Int)]
val df = inputData.toDS().map(_._1).toDF("a")
testStream(df)(
AddData(inputData, ("so slow", 1)),
CheckAnswer("so fast"))
} finally {
spark.experimental.extraStrategies = Nil
}
}
testQuietly("handle fatal errors thrown from the stream thread") {
for (e <- Seq(
new VirtualMachineError {},
new ThreadDeath,
new LinkageError,
new ControlThrowable {}
)) {
val source = new Source {
override def getOffset: Option[Offset] = {
throw e
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
throw e
}
override def schema: StructType = StructType(Array(StructField("value", IntegerType)))
override def stop(): Unit = {}
}
val df = Dataset[Int](sqlContext.sparkSession, StreamingExecutionRelation(source))
testStream(df)(
// `ExpectFailure(isFatalError = true)` verifies two things:
// - Fatal errors can be propagated to `StreamingQuery.exception` and
// `StreamingQuery.awaitTermination` like non fatal errors.
// - Fatal errors can be caught by UncaughtExceptionHandler.
ExpectFailure(isFatalError = true)(ClassTag(e.getClass))
)
}
}
test("output mode API in Scala") {
assert(OutputMode.Append === InternalOutputModes.Append)
assert(OutputMode.Complete === InternalOutputModes.Complete)
assert(OutputMode.Update === InternalOutputModes.Update)
}
test("explain") {
val inputData = MemoryStream[String]
val df = inputData.toDS().map(_ + "foo").groupBy("value").agg(count("*"))
// Test `df.explain`
val explain = ExplainCommand(df.queryExecution.logical, extended = false)
val explainString =
spark.sessionState
.executePlan(explain)
.executedPlan
.executeCollect()
.map(_.getString(0))
.mkString("\\n")
assert(explainString.contains("StateStoreRestore"))
assert(explainString.contains("StreamingRelation"))
assert(!explainString.contains("LocalTableScan"))
// Test StreamingQuery.display
val q = df.writeStream.queryName("memory_explain").outputMode("complete").format("memory")
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
try {
assert("No physical plan. Waiting for data." === q.explainInternal(false))
assert("No physical plan. Waiting for data." === q.explainInternal(true))
inputData.addData("abc")
q.processAllAvailable()
val explainWithoutExtended = q.explainInternal(false)
// `extended = false` only displays the physical plan.
assert("LocalRelation".r.findAllMatchIn(explainWithoutExtended).size === 0)
assert("LocalTableScan".r.findAllMatchIn(explainWithoutExtended).size === 1)
// Use "StateStoreRestore" to verify that it does output a streaming physical plan
assert(explainWithoutExtended.contains("StateStoreRestore"))
val explainWithExtended = q.explainInternal(true)
// `extended = true` displays 3 logical plans (Parsed/Optimized/Optimized) and 1 physical
// plan.
assert("LocalRelation".r.findAllMatchIn(explainWithExtended).size === 3)
assert("LocalTableScan".r.findAllMatchIn(explainWithExtended).size === 1)
// Use "StateStoreRestore" to verify that it does output a streaming physical plan
assert(explainWithExtended.contains("StateStoreRestore"))
} finally {
q.stop()
}
}
test("SPARK-19065: dropDuplicates should not create expressions using the same id") {
withTempPath { testPath =>
val data = Seq((1, 2), (2, 3), (3, 4))
data.toDS.write.mode("overwrite").json(testPath.getCanonicalPath)
val schema = spark.read.json(testPath.getCanonicalPath).schema
val query = spark
.readStream
.schema(schema)
.json(testPath.getCanonicalPath)
.dropDuplicates("_1")
.writeStream
.format("memory")
.queryName("testquery")
.outputMode("append")
.start()
try {
query.processAllAvailable()
if (query.exception.isDefined) {
throw query.exception.get
}
} finally {
query.stop()
}
}
}
test("handle IOException when the streaming thread is interrupted (pre Hadoop 2.8)") {
// This test uses a fake source to throw the same IOException as pre Hadoop 2.8 when the
// streaming thread is interrupted. We should handle it properly by not failing the query.
ThrowingIOExceptionLikeHadoop12074.createSourceLatch = new CountDownLatch(1)
val query = spark
.readStream
.format(classOf[ThrowingIOExceptionLikeHadoop12074].getName)
.load()
.writeStream
.format("console")
.start()
assert(ThrowingIOExceptionLikeHadoop12074.createSourceLatch
.await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS),
"ThrowingIOExceptionLikeHadoop12074.createSource wasn't called before timeout")
query.stop()
assert(query.exception.isEmpty)
}
test("handle InterruptedIOException when the streaming thread is interrupted (Hadoop 2.8+)") {
// This test uses a fake source to throw the same InterruptedIOException as Hadoop 2.8+ when the
// streaming thread is interrupted. We should handle it properly by not failing the query.
ThrowingInterruptedIOException.createSourceLatch = new CountDownLatch(1)
val query = spark
.readStream
.format(classOf[ThrowingInterruptedIOException].getName)
.load()
.writeStream
.format("console")
.start()
assert(ThrowingInterruptedIOException.createSourceLatch
.await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS),
"ThrowingInterruptedIOException.createSource wasn't called before timeout")
query.stop()
assert(query.exception.isEmpty)
}
test("SPARK-19873: streaming aggregation with change in number of partitions") {
val inputData = MemoryStream[(Int, Int)]
val agg = inputData.toDS().groupBy("_1").count()
testStream(agg, OutputMode.Complete())(
AddData(inputData, (1, 0), (2, 0)),
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "2")),
CheckAnswer((1, 1), (2, 1)),
StopStream,
AddData(inputData, (3, 0), (2, 0)),
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "5")),
CheckAnswer((1, 1), (2, 2), (3, 1)),
StopStream,
AddData(inputData, (3, 0), (1, 0)),
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")),
CheckAnswer((1, 2), (2, 2), (3, 2)))
}
testQuietly("recover from a Spark v2.1 checkpoint") {
var inputData: MemoryStream[Int] = null
var query: DataStreamWriter[Row] = null
def prepareMemoryStream(): Unit = {
inputData = MemoryStream[Int]
inputData.addData(1, 2, 3, 4)
inputData.addData(3, 4, 5, 6)
inputData.addData(5, 6, 7, 8)
query = inputData
.toDF()
.groupBy($"value")
.agg(count("*"))
.writeStream
.outputMode("complete")
.format("memory")
}
// Get an existing checkpoint generated by Spark v2.1.
// v2.1 does not record # shuffle partitions in the offset metadata.
val resourceUri =
this.getClass.getResource("/structured-streaming/checkpoint-version-2.1.0").toURI
val checkpointDir = new File(resourceUri)
// 1 - Test if recovery from the checkpoint is successful.
prepareMemoryStream()
val dir1 = Utils.createTempDir().getCanonicalFile // not using withTempDir {}, makes test flaky
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(checkpointDir, dir1)
// Checkpoint data was generated by a query with 10 shuffle partitions.
// In order to test reading from the checkpoint, the checkpoint must have two or more batches,
// since the last batch may be rerun.
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
var streamingQuery: StreamingQuery = null
try {
streamingQuery =
query.queryName("counts").option("checkpointLocation", dir1.getCanonicalPath).start()
streamingQuery.processAllAvailable()
inputData.addData(9)
streamingQuery.processAllAvailable()
QueryTest.checkAnswer(spark.table("counts").toDF(),
Row("1", 1) :: Row("2", 1) :: Row("3", 2) :: Row("4", 2) ::
Row("5", 2) :: Row("6", 2) :: Row("7", 1) :: Row("8", 1) :: Row("9", 1) :: Nil)
} finally {
if (streamingQuery ne null) {
streamingQuery.stop()
}
}
}
// 2 - Check recovery with wrong num shuffle partitions
prepareMemoryStream()
val dir2 = Utils.createTempDir().getCanonicalFile
FileUtils.copyDirectory(checkpointDir, dir2)
// Since the number of partitions is greater than 10, should throw exception.
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "15") {
var streamingQuery: StreamingQuery = null
try {
intercept[StreamingQueryException] {
streamingQuery =
query.queryName("badQuery").option("checkpointLocation", dir2.getCanonicalPath).start()
streamingQuery.processAllAvailable()
}
} finally {
if (streamingQuery ne null) {
streamingQuery.stop()
}
}
}
}
test("calling stop() on a query cancels related jobs") {
val input = MemoryStream[Int]
val query = input
.toDS()
.map { i =>
while (!org.apache.spark.TaskContext.get().isInterrupted()) {
// keep looping till interrupted by query.stop()
Thread.sleep(100)
}
i
}
.writeStream
.format("console")
.start()
input.addData(1)
// wait for jobs to start
eventually(timeout(streamingTimeout)) {
assert(sparkContext.statusTracker.getActiveJobIds().nonEmpty)
}
query.stop()
// make sure jobs are stopped
eventually(timeout(streamingTimeout)) {
assert(sparkContext.statusTracker.getActiveJobIds().isEmpty)
}
}
test("batch id is updated correctly in the job description") {
val queryName = "memStream"
@volatile var jobDescription: String = null
def assertDescContainsQueryNameAnd(batch: Integer): Unit = {
// wait for listener event to be processed
spark.sparkContext.listenerBus.waitUntilEmpty(streamingTimeout.toMillis)
assert(jobDescription.contains(queryName) && jobDescription.contains(s"batch = $batch"))
}
spark.sparkContext.addSparkListener(new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
jobDescription = jobStart.properties.getProperty(SparkContext.SPARK_JOB_DESCRIPTION)
}
})
val input = MemoryStream[Int]
val query = input
.toDS()
.map(_ + 1)
.writeStream
.format("memory")
.queryName(queryName)
.start()
input.addData(1)
query.processAllAvailable()
assertDescContainsQueryNameAnd(batch = 0)
input.addData(2, 3)
query.processAllAvailable()
assertDescContainsQueryNameAnd(batch = 1)
input.addData(4)
query.processAllAvailable()
assertDescContainsQueryNameAnd(batch = 2)
query.stop()
}
test("should resolve the checkpoint path") {
withTempDir { dir =>
val checkpointLocation = dir.getCanonicalPath
assert(!checkpointLocation.startsWith("file:/"))
val query = MemoryStream[Int].toDF
.writeStream
.option("checkpointLocation", checkpointLocation)
.format("console")
.start()
try {
val resolvedCheckpointDir =
query.asInstanceOf[StreamingQueryWrapper].streamingQuery.resolvedCheckpointRoot
assert(resolvedCheckpointDir.startsWith("file:/"))
} finally {
query.stop()
}
}
}
testQuietly("specify custom state store provider") {
val providerClassName = classOf[TestStateStoreProvider].getCanonicalName
withSQLConf("spark.sql.streaming.stateStore.providerClass" -> providerClassName) {
val input = MemoryStream[Int]
val df = input.toDS().groupBy().count()
val query = df.writeStream.outputMode("complete").format("memory").queryName("name").start()
input.addData(1, 2, 3)
val e = intercept[Exception] {
query.awaitTermination()
}
assert(e.getMessage.contains(providerClassName))
assert(e.getMessage.contains("instantiated"))
}
}
testQuietly("custom state store provider read from offset log") {
val input = MemoryStream[Int]
val df = input.toDS().groupBy().count()
val providerConf1 = "spark.sql.streaming.stateStore.providerClass" ->
"org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider"
val providerConf2 = "spark.sql.streaming.stateStore.providerClass" ->
classOf[TestStateStoreProvider].getCanonicalName
def runQuery(queryName: String, checkpointLoc: String): Unit = {
val query = df.writeStream
.outputMode("complete")
.format("memory")
.queryName(queryName)
.option("checkpointLocation", checkpointLoc)
.start()
input.addData(1, 2, 3)
query.processAllAvailable()
query.stop()
}
withTempDir { dir =>
val checkpointLoc1 = new File(dir, "1").getCanonicalPath
withSQLConf(providerConf1) {
runQuery("query1", checkpointLoc1) // generate checkpoints
}
val checkpointLoc2 = new File(dir, "2").getCanonicalPath
withSQLConf(providerConf2) {
// Verify new query will use new provider that throw error on loading
intercept[Exception] {
runQuery("query2", checkpointLoc2)
}
// Verify old query from checkpoint will still use old provider
runQuery("query1", checkpointLoc1)
}
}
}
}
abstract class FakeSource extends StreamSourceProvider {
private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil)
override def sourceSchema(
spark: SQLContext,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): (String, StructType) = ("fakeSource", fakeSchema)
}
/** A fake StreamSourceProvider that creates a fake Source that cannot be reused. */
class FakeDefaultSource extends FakeSource {
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
// Create a fake Source that emits 0 to 10.
new Source {
private var offset = -1L
override def schema: StructType = StructType(StructField("a", IntegerType) :: Nil)
override def getOffset: Option[Offset] = {
if (offset >= 10) {
None
} else {
offset += 1
Some(LongOffset(offset))
}
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
val startOffset = start.map(_.asInstanceOf[LongOffset].offset).getOrElse(-1L) + 1
spark.range(startOffset, end.asInstanceOf[LongOffset].offset + 1).toDF("a")
}
override def stop() {}
}
}
}
/** A fake source that throws the same IOException like pre Hadoop 2.8 when it's interrupted. */
class ThrowingIOExceptionLikeHadoop12074 extends FakeSource {
import ThrowingIOExceptionLikeHadoop12074._
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
createSourceLatch.countDown()
try {
Thread.sleep(30000)
throw new TimeoutException("sleep was not interrupted in 30 seconds")
} catch {
case ie: InterruptedException =>
throw new IOException(ie.toString)
}
}
}
object ThrowingIOExceptionLikeHadoop12074 {
/**
* A latch to allow the user to wait until `ThrowingIOExceptionLikeHadoop12074.createSource` is
* called.
*/
@volatile var createSourceLatch: CountDownLatch = null
}
/** A fake source that throws InterruptedIOException like Hadoop 2.8+ when it's interrupted. */
class ThrowingInterruptedIOException extends FakeSource {
import ThrowingInterruptedIOException._
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
createSourceLatch.countDown()
try {
Thread.sleep(30000)
throw new TimeoutException("sleep was not interrupted in 30 seconds")
} catch {
case ie: InterruptedException =>
val iie = new InterruptedIOException(ie.toString)
iie.initCause(ie)
throw iie
}
}
}
object ThrowingInterruptedIOException {
/**
* A latch to allow the user to wait until `ThrowingInterruptedIOException.createSource` is
* called.
*/
@volatile var createSourceLatch: CountDownLatch = null
}
class TestStateStoreProvider extends StateStoreProvider {
override def init(
stateStoreId: StateStoreId,
keySchema: StructType,
valueSchema: StructType,
indexOrdinal: Option[Int],
storeConfs: StateStoreConf,
hadoopConf: Configuration): Unit = {
throw new Exception("Successfully instantiated")
}
override def stateStoreId: StateStoreId = null
override def close(): Unit = { }
override def getStore(version: Long): StateStore = null
}
| someorz/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala | Scala | apache-2.0 | 28,774 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.servlet
import org.fusesource.scalate.TemplateEngine
import javax.servlet.http.HttpServlet
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import javax.servlet.{ ServletContext, ServletConfig }
import org.fusesource.scalate.util.{ Log }
object TemplateEngineServlet extends Log {
protected var singleton: TemplateEngineServlet = _
def apply(): TemplateEngineServlet = singleton
def update(servlet: TemplateEngineServlet): Unit = singleton = servlet
def render(
template: String,
templateEngine: TemplateEngine,
servletContext: ServletContext,
request: HttpServletRequest,
response: HttpServletResponse
): Unit = {
val context = new ServletRenderContext(templateEngine, request, response, servletContext)
if (template == null || template.length == 0 || template == "/") {
// lets try find an index page if we are given an empty URI which sometimes happens
// with jersey filter and guice servlet
TemplateEngine.templateTypes.map("index." + _).find(u => servletContext.getResource(u) != null) match {
case Some(name) =>
servletContext.log("asked to resolve uri: " + template + " so delegating to: " + name)
servletContext.getRequestDispatcher(name).forward(request, response)
//context.include(name, true)
case _ =>
servletContext.log("No template available for: " + template)
response.setStatus(HttpServletResponse.SC_NOT_FOUND)
}
} else {
context.include(template, true)
// we should set the OK here as we might be forwarded from the Jersey
// filter after it detected a 404 and found that there's no JAXRS resource at / or foo.ssp or something
response.setStatus(HttpServletResponse.SC_OK)
}
}
}
/**
* Servlet which renders the requested Scalate template.
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class TemplateEngineServlet extends HttpServlet {
var templateEngine: ServletTemplateEngine = _
override def init(config: ServletConfig) {
super.init(config)
templateEngine = createTemplateEngine(config)
// register the template engine and servlet so they can be easily resolved
TemplateEngineServlet() = this
ServletTemplateEngine(getServletContext) = templateEngine
}
/**
* Allow derived servlets to override and customize the template engine from the configuration
*/
protected def createTemplateEngine(config: ServletConfig): ServletTemplateEngine = {
new ServletTemplateEngine(config)
}
override def service(request: HttpServletRequest, response: HttpServletResponse) {
render(request.getServletPath, request, response)
}
def render(template: String, request: HttpServletRequest, response: HttpServletResponse): Unit = {
TemplateEngineServlet.render(template, templateEngine, getServletContext, request, response)
}
}
| maslovalex/scalate | scalate-core/src/main/scala/org/fusesource/scalate/servlet/TemplateEngineServlet.scala | Scala | apache-2.0 | 3,698 |
package fpinscala.datastructures
sealed trait List[+A] {
def head: A
def tail: List[A]
def setHead[T >: A](elm: T): List[T]
//これだと出来ない
// def setHead[A](elm: A): List[A]
def drop(n: Int): List[A] =
if(n == 0)this
else tail.drop(n-1)
def dropWhile(f: A => Boolean): List[A] =
this match {
case Nil => Nil
case Cons(h, t) =>
if(f(h)) tail.dropWhile(f)
else this
}
def init(): List[A] = {
def initIter(l: List[A] = this, acc:List[A] = Nil): List[A] = {
l match {
case Nil => Nil
case Cons(h, t) if t == Nil =>
acc
case Cons(h, t) =>
initIter(t, List.append(acc, Cons(h, Nil)))
}
}
initIter()
}
}
case object Nil extends List[Nothing] {
override def head:Nothing = throw new NoSuchElementException("head of empty list")
override def tail:List[Nothing] = Nil
override def setHead[T >: Nothing](elm:T) :List[Nothing] = Nil
}
case class Cons[+A] (h:A, t: List[A]) extends List[A] {
override def head:A = h
override def tail:List[A] = t
override def setHead[T >: A](elm: T): List[T] = copy[T](h = elm)
//これだと出来ない
// override def setHead[A](elm: A): List[A] = copy[A](h = elm)
}
object List {
def sum(ints: List[Int]): Int = ints match {
case Nil => 0
case Cons(x, xs) => x + sum(xs)
}
def product(ds: List[Double]): Double = ds match {
case Nil => 1.0
case Cons(0.0, _) => 0.0
case Cons(x, xs) => x * product(xs)
}
def apply[A](as: A*): List[A] =
if(as.isEmpty) Nil
else Cons(as.head, apply(as.tail: _*))
def append[A](a1: List[A], a2: List[A]): List[A] =
a1 match {
case Nil => a2
case Cons(h ,t) => Cons(h, append(t, a2))
}
def foldRight[A, B](as: List[A], z:B)(f: (A, B) => B): B =
as match {
case Nil => z
case Cons(x, xs) => f(x, foldRight(xs, z)(f))
}
def sum2(ns: List[Int]) = foldRight(ns, 0)((x, y) => x +y)
def product2(ns: List[Double]) = foldRight(ns, 1.0)(_ * _)
def length[A](as: List[A]): Int = foldRight(as, 0)((_, n) => n + 1)
def foldLeft[A, B](as: List[A], z:B)(f: (B, A) => B): B = {
as match {
case Nil => z
case Cons(x, xs) => foldLeft(xs, f(z,x))(f)
}
}
def sum3(ns :List[Int]) = foldLeft(ns, 0)((x,y) => x + y)
def product3(ns: List[Double]) = foldLeft(ns, 1.0)(_*_)
def length3[A](as: List[A]): Int = foldLeft(as, 0)((n, _) => n + 1)
def reverse[A](as: List[A]): List[A] = as match {
case Nil => Nil
case Cons(x, xs) => List.append(reverse(xs), Cons(x, Nil))
}
def append2[A](a1: List[A], a2: List[A]): List[A] = foldLeft(a1, a2){(x, y) =>
x
}
def addOne(list:List[Int]): List[Int] = {
list match {
case Nil => Nil
case Cons(h, t) => Cons(h + 1, addOne(t))
}
}
def doubleToString(list:List[Double]): List[String] = {
list match {
case Nil => Nil
case Cons(h, t) => Cons(h.toString, doubleToString(t))
}
}
def map[A, B](as: List[A])(f: A => B): List[B] =
as match {
case Nil => Nil
case Cons(h, t) => Cons(f(h), map(t)(f))
}
def filter[A](as: List[A])(f: A => Boolean): List[A] =
as match {
case Nil => Nil
case Cons(h, t) if !f(h) => filter(t)(f)
case Cons(h, t) => Cons(h, filter(t)(f))
}
def flatMap[A, B](as: List[A])(f: A => List[B]): List[B] =
as match {
case Nil => Nil
case Cons(h, t) => List.append(f(h), flatMap(t)(f))
}
def filter2[A](as: List[A])(f: A => Boolean): List[A] =
flatMap(as) { a =>
if(f(a)) Cons(a, Nil) else Nil
}
def zipPlus(as1: List[Int], as2: List[Int]): List[Int] = {
(as1, as2) match {
case (Nil, Nil) => Nil
case (Nil, _) => as2
case (_, Nil) => as1
case (Cons(h1, t1), Cons(h2, t2)) => Cons(h1 + h2, zipPlus(t1, t2))
}
}
def zipWith[A, B](as1: List[A], as2: List[A])(f: (A, A) => B): List[B] = {
(as1, as2) match {
case (Nil, Nil) => Nil
case (Nil, _) => Nil
case (_, Nil) => Nil
case (Cons(h1, t1), Cons(h2, t2)) =>
Cons(f(h1, h2), zipWith(t1, t2)(f))
}
}
}
| tarata/FunctionalProgramminginScala | src/main/scala/fpinscala/datastructures/List.scala | Scala | mit | 4,177 |
package com.estus.distribution
import routine._
import org.apache.commons.math3.distribution.ZipfDistribution
object zipf {
/** *
*
* Zipf Distribution
*
* d_ - probability density function
* p_ - cumulative density function
* q_ - inverse cumulative density function
* r_ - random number from that distribution
*
*/
private def dzipf_internal(x: Int, log_p: Boolean, dist: ZipfDistribution): Double = {
(dist.getNumberOfElements, dist.getExponent) match {
case (a, b) if a < 0 || b.isNaN => Double.NaN
case _ =>
if(log_p) dist.logProbability(x) else dist.probability(x)
}
}
def dzipf(x: Int, size: Int, exponent: Double, log_p: Boolean): Double = {
val dist = new ZipfDistribution(size, exponent)
dzipf_internal(x, log_p, dist)
}
def dzipf(x: List[Int], size: Int, exponent: Double, log_p: Boolean): List[Double] = {
if(x.isEmpty) throw new IllegalArgumentException
val dist = new ZipfDistribution(size, exponent)
x.map(tup => dzipf_internal(tup, log_p, dist))
}
private def pzipf_internal(x: Int, lower_tail: Boolean, log_p: Boolean, dist: ZipfDistribution): Double = {
val a = dist.getNumberOfElements
val b = dist.getExponent
if(a < 0 || b.isNaN) throw new IllegalArgumentException
val cumprob = dist.cumulativeProbability(x)
(lower_tail, log_p) match {
case (true, false) => cumprob
case (true, true) => math.log(cumprob)
case (false, false) => 1 - cumprob
case (false, true) => math.log(1 - cumprob)
}
}
def pzipf(x: Int, size: Int, exponent: Double, lower_tail: Boolean, log_p: Boolean): Double = {
val dist = new ZipfDistribution(size, exponent)
pzipf_internal(x, lower_tail, log_p, dist)
}
def pzipf(x: List[Int], size: Int, exponent: Double, lower_tail: Boolean, log_p: Boolean): List[Double] = {
if(x.isEmpty) throw new IllegalArgumentException
val dist = new ZipfDistribution(size, exponent)
x.map(tup => pzipf_internal(tup, lower_tail, log_p, dist))
}
private def qzipf_internal(p: Double, lower_tail: Boolean, log_p: Boolean, dist: ZipfDistribution): Int = {
val p_tmp = (lower_tail, log_p) match {
case (true, false) => p
case (true, true) => math.exp(p)
case (false, false) => 1 - p
case (false, true) => 1 - math.exp(p)
}
dist.inverseCumulativeProbability(p_tmp)
}
def qzipf(p: Double, size: Int, exponent: Double, lower_tail: Boolean, log_p: Boolean): Int = {
val dist = new ZipfDistribution(size, exponent)
qzipf_internal(p, lower_tail, log_p, dist)
}
def qzipf(p: List[Double], size: Int, exponent: Double, lower_tail: Boolean, log_p: Boolean): List[Int] = {
if(p.isEmpty) throw new IllegalArgumentException
val dist = new ZipfDistribution(size, exponent)
p.map(tup => qzipf_internal(tup, lower_tail, log_p, dist))
}
def rzipf(n: Int, size: Int, exponent: Double): List[Int] = {
if(n < 1 || (size + exponent).isNaN) throw new IllegalArgumentException
val dist = new ZipfDistribution(size, exponent)
List.fill(n)(dist.sample)
}
}
| EstusDev/Estus | estus-distribution/src/main/scala/zipf.scala | Scala | apache-2.0 | 3,128 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package toolkit.neuralnetwork.policy
import libcog._
import toolkit.neuralnetwork.WeightBinding
case object EmptyBinding extends WeightBinding {
def initialWeights: Option[Field] = None
def register(weights: Field) = {}
}
| hpe-cct/cct-nn | src/main/scala/toolkit/neuralnetwork/policy/EmptyBinding.scala | Scala | apache-2.0 | 855 |
package epic.parser
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.util.{Encoder, Index}
import epic.framework.Feature
import epic.parser.projections.GrammarRefinements
import epic.trees.Rule
import epic.features.IndicatorFeature
/**
* A simple Featurizer that just counts lexical and rule productions that are used.
* @author dlwh
*/
@SerialVersionUID(1L)
class ProductionFeaturizer[L, L2, W](val topology: RuleTopology[L], refinements: GrammarRefinements[L, L2],
lGen: L2=>Seq[Feature] = {(x:L2)=>if (x.isInstanceOf[Feature]) Seq(x.asInstanceOf[Feature]) else Seq(IndicatorFeature(x))},
rGen: Rule[L2] => Seq[Feature] = {(x: Rule[L2]) => Seq(x)},
filterRedundantFeatures: Boolean = false) extends RefinedFeaturizer[L, W, Feature] with Serializable {
private val (index_ :Index[Feature], ruleFeatures: Array[Array[Int]], labelFeatures: Array[Array[Int]]) = {
if (filterRedundantFeatures) {
val index = epic.features.buildNonRedundantFeatureIndex[Either[Rule[L2], L2], Feature](refinements.rules.fineIndex.iterator.map(Left(_)) ++ refinements.labels.fineIndex.iterator.map(Right(_)), {
case Left(r) => rGen(r)
case Right(l) => lGen(l)
})
// TODO: I should figure out how to one pass this
val rules = Encoder.fromIndex(refinements.rules.fineIndex).tabulateArray(r => rGen(r).map(index).toArray.filter(_ != -1))
val labels = Encoder.fromIndex(refinements.labels.fineIndex).tabulateArray(l => lGen(l).map(index).toArray.filter(_ != -1))
(index: Index[Feature], rules, labels)
} else {
val index = Index[Feature]()
val rules = Encoder.fromIndex(refinements.rules.fineIndex).tabulateArray(r => rGen(r).map(index.index).toArray)
val labels = Encoder.fromIndex(refinements.labels.fineIndex).tabulateArray(l => lGen(l).map(index.index).toArray)
(index: Index[Feature], rules, labels)
}
}
assert(ruleFeatures.forall(_.nonEmpty))
assert(labelFeatures.forall(_.nonEmpty))
def index = index_
def featuresFor(rule: Rule[L2]) = featuresForRule(refinements.rules.fineIndex(rule))
def featuresFor(label: L2) = featuresForLabel(refinements.labels.fineIndex(label))
def featuresForRule(r: Int): Array[Int] = ruleFeatures(r)
def featuresForLabel(l: Int): Array[Int] = labelFeatures(l)
override def lock: RefinedFeaturizer[L, W, Feature] = this
def anchor(w: IndexedSeq[W]) = new Anchoring {
val words = w
def featuresForBinaryRule(begin: Int, split: Int, end: Int, rule: Int, ref: Int) = {
featuresForRule(refinements.rules.globalize(rule, ref))
}
def featuresForUnaryRule(begin: Int, end: Int, rule: Int, ref: Int) = {
featuresForRule(refinements.rules.globalize(rule, ref))
}
def featuresForSpan(begin: Int, end: Int, tag: Int, ref: Int) = {
featuresForLabel(refinements.labels.globalize(tag, ref))
}
}
}
| langkilde/epic | src/main/scala/epic/parser/ProductionFeaturizer.scala | Scala | apache-2.0 | 3,504 |
/*
* Copyright 2016 Carlo Micieli
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hascalator
package dst
import Prelude._
import Maybe._
/** @author Carlo Micieli
* @since 0.0.1
*/
private[this] sealed trait BinarySearchTree[+K, +V] extends Any with Tree[K, V] {
def get: (K, V)
def lookup[K1 >: K](key: K1)(implicit ord: Ord[K1]): Maybe[V] = {
this match {
case EmptyTree => none
case Node(k, v, _, _) if k == key => just(v)
case Node(k, _, left, right) =>
import Ord.ops._
if (key < k) {
left.lookup(key)
} else {
right.lookup(key)
}
}
}
def insert[K1 >: K, V1 >: V](key: K1, value: V1)(implicit ord: Ord[K1]): Tree[K1, V1] = {
this match {
case EmptyTree => new Node(key, value)
case node @ Node(k, _, _, _) if k == key => node.copy(value = value)
case node @ Node(k, _, left, right) =>
import Ord.ops._
if (key < k) {
node.copy(left = left.insert(key, value))
} else {
node.copy(right = right.insert(key, value))
}
}
}
def min: Maybe[K] = this match {
case EmptyTree => none
case Node(k, _, EmptyTree, _) => just(k)
case Node(_, _, left, _) => left.min
}
def max: Maybe[K] = this match {
case EmptyTree => none
case Node(k, _, _, EmptyTree) => just(k)
case Node(_, _, _, right) => right.max
}
def size: Int = this match {
case EmptyTree => 0
case Node(_, _, left, right) => 1 + left.size + right.size
}
def toList: List[(K, V)] = this match {
case EmptyTree => List.empty[(K, V)]
case Node(k, v, left, right) => left.toList ++ List((k, v)) ++ right.toList
}
def delete[K1 >: K](key: K1)(implicit ord: Ord[K1]): (Maybe[V], Tree[K1, V]) = {
this match {
case EmptyTree => (none, EmptyTree)
case node @ Node(k, _, left, right) if k != key =>
import Ord.ops._
if (key < k) {
val (rem, tree2) = left.delete(key)
(rem, node.copy(left = tree2))
} else {
val (rem, tree2) = right.delete(key)
(rem, node.copy(right = tree2))
}
case Node(k, v, left, EmptyTree) if k == key => (just(v), left)
case Node(k, v, EmptyTree, right) if k == key => (just(v), right)
case Node(_, v, left, right) =>
val minKey = right.min.get
val (minVal, r) = right.delete[K1](minKey)(ord)
(just(v), Node(minKey, minVal.get, left, r))
}
}
def isEmpty: Boolean
def depth: Int = this match {
case EmptyTree => 0
case Node(_, _, left, right) => 1 + scala.math.max(left.depth, right.depth)
}
def upsert[K1 >: K, V1 >: V](key: K1, value: V1)(f: (V1) => V1)(implicit ord: Ord[K1]): Tree[K1, V1] = {
this match {
case EmptyTree => new Node(key, value)
case node @ Node(k, v, _, _) if k == key => node.copy(value = f(v))
case node @ Node(k, _, left, right) =>
import Ord.ops._
if (key < k) {
node.copy(left = left.upsert(key, value)(f))
} else {
node.copy(right = right.upsert(key, value)(f))
}
}
}
def map[V1](f: V => V1): Tree[K, V1] = {
this match {
case EmptyTree => EmptyTree
case Node(k, v, left, right) =>
Node(k, f(v), left.map(f), right.map(f))
}
}
def fold[V1 >: V](f: (V1, V1) => V1): V1 = {
this match {
case EmptyTree => error("Tree.fold: tree is empty")
case Node(_, v, EmptyTree, EmptyTree) => v
case Node(_, v, left, EmptyTree) => f(left.fold(f), v)
case Node(_, v, EmptyTree, right) => f(v, right.fold(f))
case Node(_, v, left, right) => f(v, f(left.fold(f), right.fold(f)))
}
}
def contains[K1 >: K](key: K1)(implicit ord: Ord[K1]): Boolean = {
this match {
case EmptyTree => false
case Node(k, _, _, _) if k == key => true
case Node(k, _, left, right) =>
import Ord.ops._
if (key < k) {
left.contains(key)
} else {
right.contains(key)
}
}
}
override def toString: String = {
this match {
case EmptyTree => "-"
case Node(k, v, l, r) =>
s"($l [$k->$v] $r)"
}
}
}
object BinarySearchTree {
/** It creates a new empty, binary search tree.
* @tparam K the key type
* @tparam V the value type
* @return an empty `Tree`
*/
def empty[K: Ord, V]: Tree[K, V] = EmptyTree
/** It creates a binary search tree from the list elements.
*
* @param xs the list of elements to insert
* @param ord the ordering
* @tparam K the key type
* @tparam V the value type
* @return a `Tree`
*/
def fromList[K, V](xs: List[(K, V)])(implicit ord: Ord[K]): Tree[K, V] = {
xs.foldLeft(BinarySearchTree.empty[K, V])((tree, x) => tree.insert(x))
}
}
private[this] case object EmptyTree extends BinarySearchTree[Nothing, Nothing] {
def isEmpty: Boolean = true
def get: Nothing = error("Tree.get: this tree is empty")
}
private[this] case class Node[K, V](key: K, value: V, left: Tree[K, V], right: Tree[K, V]) extends BinarySearchTree[K, V] {
def this(key: K, value: V) = {
this(key, value, EmptyTree, EmptyTree)
}
def isEmpty: Boolean = false
def get: (K, V) = (key, value)
} | CarloMicieli/hascalator | core/src/main/scala/io/hascalator/dst/BinarySearchTree.scala | Scala | apache-2.0 | 6,006 |
package spark.storage
import java.io.{Externalizable, ObjectInput, ObjectOutput}
class StorageLevel(
var useDisk: Boolean,
var useMemory: Boolean,
var deserialized: Boolean,
var replication: Int = 1)
extends Externalizable {
// TODO: Also add fields for caching priority, dataset ID, and flushing.
def this(flags: Int, replication: Int) {
this((flags & 4) != 0, (flags & 2) != 0, (flags & 1) != 0, replication)
}
def this() = this(false, true, false) // For deserialization
override def clone(): StorageLevel = new StorageLevel(
this.useDisk, this.useMemory, this.deserialized, this.replication)
override def equals(other: Any): Boolean = other match {
case s: StorageLevel =>
s.useDisk == useDisk &&
s.useMemory == useMemory &&
s.deserialized == deserialized &&
s.replication == replication
case _ =>
false
}
def isValid = ((useMemory || useDisk) && (replication > 0))
def toInt: Int = {
var ret = 0
if (useDisk) {
ret |= 4
}
if (useMemory) {
ret |= 2
}
if (deserialized) {
ret |= 1
}
return ret
}
override def writeExternal(out: ObjectOutput) {
out.writeByte(toInt)
out.writeByte(replication)
}
override def readExternal(in: ObjectInput) {
val flags = in.readByte()
useDisk = (flags & 4) != 0
useMemory = (flags & 2) != 0
deserialized = (flags & 1) != 0
replication = in.readByte()
}
override def toString: String =
"StorageLevel(%b, %b, %b, %d)".format(useDisk, useMemory, deserialized, replication)
}
object StorageLevel {
val NONE = new StorageLevel(false, false, false)
val DISK_ONLY = new StorageLevel(true, false, false)
val MEMORY_ONLY = new StorageLevel(false, true, false)
val MEMORY_ONLY_2 = new StorageLevel(false, true, false, 2)
val MEMORY_ONLY_DESER = new StorageLevel(false, true, true)
val MEMORY_ONLY_DESER_2 = new StorageLevel(false, true, true, 2)
val DISK_AND_MEMORY = new StorageLevel(true, true, false)
val DISK_AND_MEMORY_2 = new StorageLevel(true, true, false, 2)
val DISK_AND_MEMORY_DESER = new StorageLevel(true, true, true)
val DISK_AND_MEMORY_DESER_2 = new StorageLevel(true, true, true, 2)
}
| ankurdave/arthur | core/src/main/scala/spark/storage/StorageLevel.scala | Scala | bsd-3-clause | 2,239 |
package demo.components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import scalacss.ProdDefaults._
object ReactInfiniteInfo {
object Style extends StyleSheet.Inline {
import dsl._
val content = style(
textAlign.center,
fontSize(30.px),
paddingTop(40.px)
)
}
val component = ScalaComponent
.builder[Unit]("ReactInfiniteInfo")
.render(P => {
InfoTemplate(componentFilePath = "listviews/ReactInfinite.scala")(
<.div(
<.h3("React Infinite "),
<.p(
"scalajs-react wrapper for ",
RedLink("react infinite", "https://github.com/seatgeek/react-infinite")
),
<.div(
<.h4("Supported Version :"),
<.span("0.7.1")
),
<.div(
<.h4("How To Use :"),
<.p("Follow the installation guide from :",
RedLink("here", "https://github.com/seatgeek/react-infinite#installation"))
)
)
)
})
.build
def apply() = component()
}
| chandu0101/scalajs-react-components | demo/src/main/scala/demo/components/ReactInfiniteInfo.scala | Scala | apache-2.0 | 1,079 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import java.io.{ File => JFile, _ }
import java.net.{ URISyntaxException, URL }
import java.nio.charset.Charset
import java.nio.charset.StandardCharsets._
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{ FileVisitResult, Files, Path, SimpleFileVisitor }
import scala.io.Source
import scala.util.Try
import scala.util.control.NonFatal
import io.gatling.commons.validation._
object Io {
val DefaultBufferSize = 4 * 1024
implicit class RichURL(val url: URL) extends AnyVal {
def jfile: JFile = Try(new JFile(url.toURI))
.recover { case e: URISyntaxException => new JFile(url.getPath) }
.get
def toByteArray: Array[Byte] =
withCloseable(url.openConnection.getInputStream) {
_.toByteArray
}
}
implicit class RichFile(val file: JFile) extends AnyVal {
def toByteArray: Array[Byte] =
withCloseable(new FileInputStream(file)) { is =>
val buf = new Array[Byte](file.length.toInt)
is.read(buf)
buf
}
def validateExistingReadable: Validation[JFile] =
if (!file.exists)
s"File $file doesn't exist".failure
else if (!file.canRead)
s"File $file can't be read".failure
else
file.success
}
implicit class RichInputStream(val is: InputStream) extends AnyVal {
def toString(charset: Charset, bufferSize: Int = DefaultBufferSize): String =
charset match {
case UTF_8 => Utf8InputStreamDecoder.pooled().decode(is)
case US_ASCII => UsAsciiInputStreamDecoder.pooled().decode(is)
case _ =>
val writer = new FastStringWriter(bufferSize)
val reader = new InputStreamReader(is, charset)
reader.copyTo(writer, bufferSize)
writer.toString
}
def toByteArray(): Array[Byte] = {
val os = FastByteArrayOutputStream.pooled()
os.write(is)
os.toByteArray
}
def copyTo(os: OutputStream, bufferSize: Int = DefaultBufferSize): Int = {
def copyLarge(buffer: Array[Byte]): Long = {
var lastReadCount: Int = 0
def read(): Int = {
lastReadCount = is.read(buffer)
lastReadCount
}
var count: Long = 0
while (read() != -1) {
os.write(buffer, 0, lastReadCount)
count += lastReadCount
}
count
}
copyLarge(new Array[Byte](bufferSize)) match {
case l if l > Integer.MAX_VALUE => -1
case l => l.toInt
}
}
}
implicit class RichReader(val reader: Reader) extends AnyVal {
def copyTo(writer: Writer, bufferSize: Int = DefaultBufferSize): Int = {
def copyLarge(buffer: Array[Char]) = {
var lastReadCount: Int = 0
def read(): Int = {
lastReadCount = reader.read(buffer)
lastReadCount
}
var count: Long = 0
while (read() != -1) {
writer.write(buffer, 0, lastReadCount)
count += lastReadCount
}
count
}
copyLarge(new Array[Char](bufferSize)) match {
case l if l > Integer.MAX_VALUE => -1
case l => l.toInt
}
copyLarge(new Array[Char](bufferSize)) match {
case l if l > Integer.MAX_VALUE => -1
case l => l.toInt
}
}
}
def withCloseable[T, C <: AutoCloseable](closeable: C)(block: C => T) =
try
block(closeable)
finally
closeable.close()
def withSource[T, C <: Source](closeable: C)(block: C => T) =
try
block(closeable)
finally
closeable.close()
def classpathResourceAsStream(path: String): InputStream =
Option(ClassLoader.getSystemResourceAsStream(path))
.orElse(Option(getClass.getResourceAsStream(path)))
.getOrElse(throw new IllegalStateException(s"Couldn't load $path neither from System ClassLoader nor from current one"))
def deleteDirectoryAsap(directory: Path): Unit =
if (!deleteDirectory(directory)) {
deleteDirectoryOnExit(directory)
}
/**
* Delete a possibly non empty directory
*
* @param directory the directory to delete
* @return if directory could be deleted
*/
def deleteDirectory(directory: Path): Boolean = try {
Files.walkFileTree(directory, new SimpleFileVisitor[Path]() {
@throws[IOException]
override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
Files.delete(file)
FileVisitResult.CONTINUE
}
@throws[IOException]
override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = {
Files.delete(dir)
FileVisitResult.CONTINUE
}
})
true
} catch {
case NonFatal(e) => false
}
/**
* Make a possibly non empty directory to be deleted on exit
*
* @param directory the directory to delete
*/
def deleteDirectoryOnExit(directory: Path): Unit =
Files.walkFileTree(directory, new SimpleFileVisitor[Path]() {
@throws[IOException]
override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
file.toFile.deleteOnExit()
FileVisitResult.CONTINUE
}
@throws[IOException]
override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = {
dir.toFile.deleteOnExit()
FileVisitResult.CONTINUE
}
})
}
| thkluge/gatling | gatling-commons/src/main/scala/io/gatling/commons/util/Io.scala | Scala | apache-2.0 | 6,116 |
/**
* Illustrates filtering and union to extract lines with "error" or "warning"
*/
package com.oreilly.learningsparkexamples.scala
import org.apache.spark._
import org.apache.spark.SparkContext._
object BasicFilterUnionCombo {
def main(args: Array[String]) {
val conf = new SparkConf
conf.setMaster(args(0))
val sc = new SparkContext(conf)
val inputRDD = sc.textFile(args(1))
val errorsRDD = inputRDD.filter(_.contains("error"))
val warningsRDD = inputRDD.filter(_.contains("warn"))
val badLinesRDD = errorsRDD.union(warningsRDD)
println(badLinesRDD.collect().mkString("\\n"))
}
}
| wuzhong290/learning-spark | src/main/scala/com/oreilly/learningsparkexamples/scala/BasicFilterUnionCombo.scala | Scala | mit | 641 |
package com.skittr.comet
/* *\\
(c) 2007 WorldWide Conferencing, LLC
Distributed under an Apache License
http://www.apache.org/licenses/LICENSE-2.0
\\* */
import _root_.scala.actors._
import _root_.scala.actors.Actor._
import _root_.net.liftweb.http._
import _root_.net.liftweb.util.Helpers._
import _root_.net.liftweb.util.{Helpers, Box, Full, Empty, Failure}
import _root_.scala.xml._
import _root_.com.skittr.actor._
import S._
import SHtml._
import _root_.com.skittr.model.{Friend, User}
import _root_.net.liftweb.mapper._
class WatchUser extends CometActor {
private var userActor: Box[UserActor] = Empty
private var messages: List[Message] = Nil
override def defaultPrefix = Full("sk")
private def getUser(ua: UserActor) = (ua !? (400L, GetUserIdAndName)) match {case Some(u: UserIdInfo) => Full(u) case _ => Empty}
def render = {
val ret: NodeSeq = (for (ua <- userActor;
user <- getUser(ua)) yield {
bind("username" -> Text(user.name+" -> "+user.fullName) ,
"content" -> <span>{friendList(user) ++
ajaxForm(textarea("", msg => ua ! SendMessage(msg, "web")) % ("cols" -> "40") ++
submit("msg", () => true))
}</span>) ++
messages.flatMap(msg => bind("username" -> Text(msg.who+" @ "+toInternetDate(msg.when)), "content" -> Text(msg.text)))
}) openOr bind("username" -> Text("N/A"), "content" -> Text("N/A"))
ret
}
override def lowPriority : PartialFunction[Any, Unit] = {
case Timeline(msg) =>
messages = msg
reRender(false)
}
override def localSetup {
userActor = name.flatMap(name => UserList.find(name))
userActor.foreach{ua => ua ! AddTimelineViewer ; messages = (ua !? GetTimeline) match {case Timeline(m) => m; case _ => Nil}}
}
private def friendList(user: UserIdInfo): NodeSeq = <ul>{user.friends.map(f => <li><a href={"/user/"+f}>{f}</a> <a href={"/unfriend/"+f}>Unfriend</a></li>)}</ul>
}
| andreum/liftweb | sites/skittr/src/main/scala/com/skittr/comet/WatchUser.scala | Scala | apache-2.0 | 2,084 |
/*
* Copyright (C) 2009 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo.core
import edu.umd.cs.piccolo.nodes._
trait Figure {
def clear(): Unit
def fgClear(): Unit
def stopRefresh(): Unit
def setPenColor(color: java.awt.Color): Unit
def setPenThickness(t: Double): Unit
def setFillColor(color: java.awt.Color): Unit
type FPoint <: Point with VisualElement
type FLine <: Line with VisualElement
type FEllipse <: Ellipse with VisualElement
type FArc <: Arc with VisualElement
type FText <: Text with VisualElement
type FRectangle <: Rectangle with VisualElement
type FRRectangle <: RoundRectangle with VisualElement
type FPolyLine <: VisualElement
def point(x: Double, y: Double): FPoint
def line(p1: Point, p2: Point): FLine
def line(x0: Double, y0: Double, x1: Double, y1: Double): FLine
def ellipse(center: Point, w: Double, h: Double): FEllipse
def ellipse(cx: Double, cy: Double, w: Double, h: Double): FEllipse
def arc(onEll: Ellipse, start: Double, extent: Double): FArc
def arc(cx: Double, cy: Double, w: Double, h: Double, start: Double, extent: Double): FArc
def arc(cp: Point, r: Double, start: Double, extent: Double): FArc
def arc(cx: Double, cy: Double, r: Double, start: Double, extent: Double): FArc
def circle(cp: Point, radius: Double): Ellipse
def circle(cx: Double, cy: Double, radius: Double): Ellipse
def rectangle(bLeft: Point, tRight: Point): FRectangle
def rectangle(x0: Double, y0: Double, w: Double, h: Double): FRectangle
def roundRectangle(p1: Point, p2: Point, rx: Double, ry: Double): FRRectangle
def text(content: String, p: Point): FText
def text(content: String, x: Double, y: Double): FText
def polyLine(path: net.kogics.kojo.kgeom.PolyLine): FPolyLine
def pnode(node: edu.umd.cs.piccolo.PNode): edu.umd.cs.piccolo.PNode
def refresh(fn: => Unit): Unit
}
| richardfontana/fontana2007-t | KojoEnv/src/net/kogics/kojo/core/Figure.scala | Scala | gpl-3.0 | 2,373 |
package scalaoauth2.provider
import java.util.Base64
import scala.util.Try
case class ClientCredential(clientId: String, clientSecret: Option[String])
class AuthorizationRequest(
headers: Map[String, Seq[String]],
params: Map[String, Seq[String]]
) extends RequestBase(headers, params) {
def scope: Option[String] = param("scope")
def grantType: String = requireParam("grant_type")
def parseClientCredential: Option[Either[InvalidClient, ClientCredential]] =
findAuthorization
.flatMap(x =>
Some(
x.fold(
left => Left(left),
header => clientCredentialByAuthorization(header)
)
)
)
.orElse(clientCredentialByParam.map(Right(_)))
private def findAuthorization: Option[Either[InvalidClient, String]] = {
header("Authorization").map { auth =>
val basicAuthCred = for {
matcher <- """^\\s*Basic\\s+(.+?)\\s*$""".r.findFirstMatchIn(auth)
} yield matcher.group(1)
basicAuthCred.fold[Either[InvalidClient, String]](
Left(new InvalidClient("Authorization header could not be parsed"))
)(x => Right(x))
}
}
private def clientCredentialByAuthorization(
s: String
): Either[InvalidClient, ClientCredential] =
Try(new String(Base64.getDecoder.decode(s), "UTF-8"))
.map(_.split(":", 2))
.getOrElse(Array.empty[String]) match {
case Array(clientId, clientSecret) =>
Right(
ClientCredential(
clientId,
if (clientSecret.isEmpty) None
else Some(clientSecret)
)
)
case _ =>
Left(new InvalidClient())
}
private def clientCredentialByParam =
param("client_id").map(ClientCredential(_, param("client_secret")))
}
case class RefreshTokenRequest(request: AuthorizationRequest)
extends AuthorizationRequest(request.headers, request.params) {
/** returns refresh_token.
*
* @return code.
* @throws InvalidRequest if the parameter is not found
*/
def refreshToken: String = requireParam("refresh_token")
}
case class PasswordRequest(request: AuthorizationRequest)
extends AuthorizationRequest(request.headers, request.params) {
/** returns username.
*
* @return username.
* @throws InvalidRequest if the parameter is not found
*/
def username = requireParam("username")
/** returns password.
*
* @return password.
* @throws InvalidRequest if the parameter is not found
*/
def password = requireParam("password")
}
case class ClientCredentialsRequest(request: AuthorizationRequest)
extends AuthorizationRequest(request.headers, request.params)
case class AuthorizationCodeRequest(request: AuthorizationRequest)
extends AuthorizationRequest(request.headers, request.params) {
/** returns code.
*
* @return code.
* @throws InvalidRequest if code is not found
*/
def code: String = requireParam("code")
/** Returns redirect_uri.
*
* @return redirect_uri
*/
def redirectUri: Option[String] = param("redirect_uri")
/** Returns code_verifier
*
* @return
*/
def codeVerifier: Option[String] = param("code_verifier")
}
case class ImplicitRequest(request: AuthorizationRequest)
extends AuthorizationRequest(request.headers, request.params)
| nulab/scala-oauth2-provider | src/main/scala/scalaoauth2/provider/AuthorizationRequest.scala | Scala | mit | 3,331 |
package com.bigm.bot.engine
import akka.actor.{ActorLogging, ActorRef, FSM}
import akka.contrib.pattern.ReceivePipeline
import com.bigm.bot.api.facebookmessenger.{FacebookMessageDeliveredEvent, FacebookMessageReadEvent}
import com.bigm.bot.engine.ConciergeActor.{Data, State}
import com.bigm.bot.engine.interceptors.{LoggingInterceptor, PlatformSwitchInterceptor}
import com.bigm.bot.events._
import com.bigm.bot.models.{ConversationEngine, IntentResolutionEvaluationStrategy, IntentResolutionSelectionStrategy}
import com.bigm.bot.modules.{ActorInject, NamedActor}
import com.bigm.bot.services.FacebookSendQueue
import com.bigm.bot.utils.General
import com.google.inject.{Inject, Injector}
import com.typesafe.config.Config
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Random, Success}
class ConciergeActor @Inject()(config: Config,
val injector: Injector,
watsonConversationFactory: WatsonConversationActor.Factory)
extends ActorInject
with ActorLogging
with ReceivePipeline
with LoggingInterceptor
with PlatformSwitchInterceptor
// with EmojiInterceptor
with FutureExtensions
with General
with FSM[State, Data] {
import ConciergeActor._
import com.bigm.bot.models.ConversationEngine._
import com.bigm.bot.models.IntentResolutionSelectionStrategy._
import context.dispatcher
implicit val akkaTimeout: akka.util.Timeout = 30.seconds
private val defaultConversationEngine = ConversationEngine.withName(config.getString("settings.default-engine"))
private val maxFailCount = config.getInt("settings.max-fail-count")
private val maxMessageLength = config.getInt("settings.max-message-length")
private val voteThreshold = config.getDouble("settings.vote-threshold")
private val intentResolutionEvaluationStrategy =
IntentResolutionEvaluationStrategy withName config.getString("settings.intent-resolution-strategy.evaluation")
private val intentResolutionSelectionStrategy =
IntentResolutionSelectionStrategy withName config.getString("settings.intent-resolution-strategy.selection")
private val defaultProvider = injectActor[FacebookSendQueue]("provider")
// private val defaultAgentProvider = injectActor[SparkSendQueue]("agentProvider")
private val historyActor = injectActor[HistoryActor]("history")
// val greetActor = injectActor(greetActorFactory(defaultProvider, historyActor), "greet")
// val formActor = injectActor(formActorFactory(defaultProvider), "form")
// private val liveAgentActor = injectActor(liveAgentActorFactory(defaultProvider, defaultAgentProvider, historyActor), "agent")
private val defaultConversationActor = getConversationActor(defaultConversationEngine)
// sequence in preferred order of execution
private val intentResolvers = List(
injectActor[ApiAiIntentActor]("apiai")
)
val initialData = ConciergeContext(
provider = defaultProvider,
child = defaultConversationActor,
agentName = "bigmbot")
startWith(WithoutIntent, initialData)
when(WithoutIntent) {
case Event(ev: TextResponse, _) =>
resolveIntent(ev)
stay
case Event(StartMultistep, _) =>
goto(WithIntent)
}
when(WithIntent) {
case Event(ev: TextResponse, ctx: ConciergeContext) =>
// TODO
// was this a mistake?
//ctx.provider ! ev
ctx.child ! ev
stay
}
// when(FillingForm) {
//
// case Event(ev: TextResponse, _) =>
// formActor ! ev
// stay
//
// case Event(ev: EndFillForm, ctx: ConciergeContext) =>
// ctx.child ! ev
// goto(WithoutIntent)
//
// }
// when(UsingHuman) {
//
// case Event(ev: SparkMessageEvent, _) =>
// liveAgentActor ! ev
// stay
//
// case Event(SparkRoomLeftEvent(sender), ctx: ConciergeContext) =>
// val message = s"${ctx.agentName} (Human) is leaving the conversation"
// ctx.provider ! TextMessage(sender, message)
// val tempMembership = ctx.tempMemberships(sender)
// sparkService.deleteWebhook(tempMembership.leaveRoomWebhookId)
// sparkService.deleteWebhook(tempMembership.webhookId)
// sparkService.deleteTeam(tempMembership.teamId)
// goto(WithoutIntent)
//
// case Event(ev: TextResponse, ctx: ConciergeContext) =>
// val tempMembership = ctx.tempMemberships(ev.sender)
// liveAgentActor ! SparkWrappedEvent(tempMembership.roomId, tempMembership.personId, ev)
// stay
//
// }
whenUnhandled {
//
// case Event(ev: InitiateChat, ctx: ConciergeContext) =>
// ctx.child ! ev
// stay
//
case Event(IntentVote(_, ev, multistep), ctx: ConciergeContext) =>
if (multistep) {
self ! StartMultistep
}
self ! ev
stay
//
case Event(IntentUnknown(sender, text), ctx: ConciergeContext) =>
log.debug("intent unknown")
if (ctx.failCount > maxFailCount) {
self ! Fallback(sender, Nil)
goto(WithoutIntent) using ctx.copy(failCount = 0)
} else {
val message = "Tell me something else!"
self ! Say(sender, text, message)
goto(WithoutIntent) using ctx.copy(failCount = ctx.failCount + 1)
}
//
case Event(Unhandled(ev@TextResponse(_, sender, _, _)), ctx: ConciergeContext) =>
if (ctx.failCount > maxFailCount) {
self ! Fallback(sender, Nil)
goto(WithoutIntent) using ctx.copy(failCount = 0)
} else {
// TODO
// is this the best pattern to resend message after state change?
context.system.scheduler.scheduleOnce(50 milliseconds) {
self ! ev
}
goto(WithoutIntent) using ctx.copy(failCount = ctx.failCount + 1)
}
//
// case Event(ev: Greet, _) =>
// greetActor ! ev
// stay
//
case Event(ev: QuickReplyResponse, ctx: ConciergeContext) =>
ctx.child ! ev
stay
case Event(Say(sender, text, message), ctx: ConciergeContext) =>
say(ctx.provider, historyActor, sender, text, message)
stay
case Event(ev@(_: FacebookMessageDeliveredEvent | _: FacebookMessageReadEvent | _: LoginCard), ctx: ConciergeContext) =>
ctx.provider ! ev
stay
case Event(ShowHistory(sender), ctx: ConciergeContext) =>
akka.pattern.ask(historyActor, GetHistory).mapTo[History] onComplete { history =>
sendMultiMessage(ctx.provider, maxMessageLength, sender, formatHistory(history.get))
}
stay
case Event(ev@SetProvider(platform, _, ref, wrappedEvent, _, handleEventImmediately), ctx: ConciergeContext) =>
currentPlatform = Some(platform)
if (handleEventImmediately && wrappedEvent != NullEvent) {
log.debug("handle {} immediately", wrappedEvent)
self ! wrappedEvent
val notification = ev.copy(event = NullEvent)
ctx.child ! notification
// formActor ! notification
// liveAgentActor ! notification
} else {
ctx.child ! ev
// formActor ! ev
// liveAgentActor ! ev
}
stay using ctx.copy(provider = ref)
case Event(SetEngine(sender, engine), ctx: ConciergeContext) =>
ctx.provider ! TextMessage(sender, "set conversation engine to " + engine)
stay using ctx.copy(child = getConversationActor(engine))
// case Event(Fallback(sender, history), ctx: ConciergeContext) =>
// val message = "Hold on...transferring you to one of my human coworkers"
// ctx.provider ! TextMessage(sender, message)
// ctx.provider ! TransferToAgent
// // for {
// // tempMembership <- sparkService.setupTempRoom(sender)
// // .withTimeout(new TimeoutException("future timed out"))(futureTimeout, context.system)
// // } yield {
// // log.debug("setup temporary membership to room [{}] for sender [{}]", tempMembership.roomId, sender)
// //
// // // print transcript history
// // history map {
// // case Exchange(Some(request), response) => s"user: $request\\ntombot: $response"
// // case Exchange(None, response) => s"tombot: $response"
// // } foreach { text =>
// // liveAgentActor ! SparkTextMessage(Some(tempMembership.roomId), None, None, text, None)
// // }
// // self ! UpdateTempMemberships(ctx.tempMemberships + (sender -> tempMembership))
// // }
// stay
//
// case Event(UpdateTempMemberships(tempMemberships), ctx: ConciergeContext) =>
// goto(UsingHuman) using ctx.copy(tempMemberships = tempMemberships)
//
// case Event(FillForm(sender, goal), _) =>
// formActor ! NextQuestion(sender)
// goto(FillingForm)
//
// // TODO
// case Event(ev: SparkMessageEvent, _) =>
// liveAgentActor ! ev
// goto(UsingHuman)
//
case Event(Reset, ctx: ConciergeContext) =>
// formActor ! Reset
ctx.child ! Reset
goto(WithoutIntent) using initialData
//
case Event(ev, ctx: ConciergeContext) =>
log.warning("{} received unhandled request {} in state {}/{}", name, ev, stateName, ctx)
// TODO
// required?
// yes, for all use case specific events such as Qualify
ctx.child ! ev
stay
}
initialize()
def resolveIntent(ev: TextResponse): Unit = {
log.debug("resolving intent")
// TODO
// parallelize
// break at first certain score (1.0)
Future.traverse(intentResolvers)(r => akka.pattern.ask(r, ev).mapTo[IntentVote]) onComplete {
case Success(votes) if votes.isEmpty =>
log.debug("no votes returned")
self ! IntentUnknown(ev.sender, ev.text)
case Success(votes) if votes.nonEmpty =>
if (intentResolutionSelectionStrategy == TopScore) {
val sorted =
votes
.filter(_.probability > voteThreshold)
.sortBy(-_.probability)
sorted match {
case x :: _ =>
log.debug("winning vote: {}", x)
self ! x
case _ =>
self ! IntentUnknown(ev.sender, ev.text)
}
} else {
// random
val filtered = votes.filter(_.probability > voteThreshold)
if (filtered.isEmpty) {
self ! IntentUnknown(ev.sender, ev.text)
} else {
val winningVote = filtered(random.nextInt(filtered.length))
log.debug("winning vote: {}", winningVote)
self ! winningVote
}
}
case Failure(e) =>
log.error(e, e.getMessage)
self ! IntentUnknown(ev.sender, ev.text)
}
}
// bail out at first minimally viable vote
def resolveIntent2(ev: TextResponse): Unit = {
def loop(resolvers: List[ActorRef]): Future[Option[IntentVote]] = resolvers match {
case Nil => Future.successful(None)
case x :: xs =>
akka.pattern.ask(x, ev).mapTo[IntentVote] flatMap { vote =>
if (vote.probability > voteThreshold) {
log.debug("first minimally viable vote: {}", vote)
Future.successful(Some(vote))
} else {
loop(xs)
}
}
}
loop(intentResolvers) map {
case Some(vote) =>
self ! vote
case None =>
IntentUnknown(ev.sender, ev.text)
}
}
// bail out at first certain vote
def resolveIntent3(ev: TextResponse): Unit = {
def loop(resolvers: List[ActorRef], votes: List[IntentVote]): Future[List[IntentVote]] =
resolvers match {
case Nil => Future.successful(votes)
case x :: xs =>
akka.pattern.ask(x, ev).mapTo[IntentVote] flatMap { vote =>
if (vote.probability == 1.0) {
log.debug("winning vote: {}", vote)
Future.successful(vote :: votes)
} else {
loop(xs, vote :: votes)
}
}
}
loop(intentResolvers, Nil) map {
case x :: xs =>
if (intentResolutionSelectionStrategy == TopScore) {
self ! (x :: xs).sortBy(-_.probability).head
} else {
// random
val list = x :: xs
self ! list(random.nextInt(list.length))
}
case Nil =>
IntentUnknown(ev.sender, ev.text)
}
}
def getConversationActor(engine: ConversationEngine): ActorRef = engine match {
case Watson => injectActor(watsonConversationFactory(defaultProvider, historyActor), "watson")
}
def formatHistory(history: History) =
history map {
case Exchange(Some(request), response) => s"$request <- $response"
case Exchange(None, response) => s" <- $response"
} mkString newLine
}
object ConciergeActor extends NamedActor {
val name = "ConciergeActor"
sealed trait State
case object WithoutIntent extends State
case object WithIntent extends State
case object FillingForm extends State
case object UsingHuman extends State
sealed trait Data
case class ConciergeContext(provider: ActorRef,
child: ActorRef,
agentName: String,
failCount: Int = 0) extends Data
val random = new Random
}
| bigmlab/bigm-bot | src/main/scala/com/bigm/bot/engine/ConciergeActor.scala | Scala | apache-2.0 | 13,495 |
/**
* **************************************************************
* Licensed to the AOS Community (AOS) under one or more *
* contributor license agreements. See the NOTICE file *
* distributed with this work for additional information *
* regarding copyright ownership. The AOS licenses this file *
* to you under the Apache License, Version 2.0 (the *
* "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, *
* software distributed under the License is distributed on an *
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
* KIND, either express or implied. See the License for the *
* specific language governing permissions and limitations *
* under the License. *
* **************************************************************
*/
package io.aos.scala
import org.junit.Assert.assertTrue
import org.junit.Test
@Test
class JunitTest {
@Test
def testOK() = assertTrue(true)
@Test
def test() {
T4fScala.main("hello");
}
}
| XClouded/t4f-core | scala/src/test/scala/io/aos/scala/JunitTest.scala | Scala | apache-2.0 | 1,402 |
//scalac: -Werror -Wunused:privates -Xsource:3
//
object Domain {
def id(id: String): Domain = Domain(Some(id), None)
def name(name: String): Domain = Domain(None, Some(name))
// induces private copy and private copy defaults
def apply(id: String, name: String): Domain = Domain(Some(id), Some(name))
}
case class Domain private (id: Option[String], name: Option[String])
// t7707
object O { O() ; def f(): Unit = O() }
case class O private (x: Int = 3)
| scala/scala | test/files/pos/t11980.scala | Scala | apache-2.0 | 465 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.internal.SQLConf._
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
class SimpleSQLViewSuite extends SQLViewSuite with SharedSparkSession
/**
* A suite for testing view related functionality.
*/
abstract class SQLViewSuite extends QueryTest with SQLTestUtils {
import testImplicits._
protected override def beforeAll(): Unit = {
super.beforeAll()
// Create a simple table with two columns: id and id1
spark.range(1, 10).selectExpr("id", "id id1").write.format("json").saveAsTable("jt")
}
protected override def afterAll(): Unit = {
try {
spark.sql(s"DROP TABLE IF EXISTS jt")
} finally {
super.afterAll()
}
}
test("create a permanent view on a permanent view") {
withView("jtv1", "jtv2") {
sql("CREATE VIEW jtv1 AS SELECT * FROM jt WHERE id > 3")
sql("CREATE VIEW jtv2 AS SELECT * FROM jtv1 WHERE id < 6")
checkAnswer(sql("select count(*) FROM jtv2"), Row(2))
}
}
test("create a temp view on a permanent view") {
withView("jtv1") {
withTempView("temp_jtv1") {
sql("CREATE VIEW jtv1 AS SELECT * FROM jt WHERE id > 3")
sql("CREATE TEMPORARY VIEW temp_jtv1 AS SELECT * FROM jtv1 WHERE id < 6")
checkAnswer(sql("select count(*) FROM temp_jtv1"), Row(2))
}
}
}
test("create a temp view on a temp view") {
withTempView("temp_jtv1", "temp_jtv2") {
sql("CREATE TEMPORARY VIEW temp_jtv1 AS SELECT * FROM jt WHERE id > 3")
sql("CREATE TEMPORARY VIEW temp_jtv2 AS SELECT * FROM temp_jtv1 WHERE id < 6")
checkAnswer(sql("select count(*) FROM temp_jtv2"), Row(2))
}
}
test("create a permanent view on a temp view") {
withView("jtv1") {
withTempView("temp_jtv1") {
withGlobalTempView("global_temp_jtv1") {
sql("CREATE TEMPORARY VIEW temp_jtv1 AS SELECT * FROM jt WHERE id > 3")
var e = intercept[AnalysisException] {
sql("CREATE VIEW jtv1 AS SELECT * FROM temp_jtv1 WHERE id < 6")
}.getMessage
assert(e.contains("Not allowed to create a permanent view `default`.`jtv1` by " +
"referencing a temporary view temp_jtv1. " +
"Please create a temp view instead by CREATE TEMP VIEW"))
val globalTempDB = spark.sharedState.globalTempViewManager.database
sql("CREATE GLOBAL TEMP VIEW global_temp_jtv1 AS SELECT * FROM jt WHERE id > 0")
e = intercept[AnalysisException] {
sql(s"CREATE VIEW jtv1 AS SELECT * FROM $globalTempDB.global_temp_jtv1 WHERE id < 6")
}.getMessage
assert(e.contains("Not allowed to create a permanent view `default`.`jtv1` by " +
"referencing a temporary view global_temp.global_temp_jtv1"))
}
}
}
}
test("error handling: existing a table with the duplicate name when creating/altering a view") {
withTable("tab1") {
sql("CREATE TABLE tab1 (id int) USING parquet")
var e = intercept[AnalysisException] {
sql("CREATE OR REPLACE VIEW tab1 AS SELECT * FROM jt")
}.getMessage
assert(e.contains("`tab1` is not a view"))
e = intercept[AnalysisException] {
sql("CREATE VIEW tab1 AS SELECT * FROM jt")
}.getMessage
assert(e.contains("`tab1` is not a view"))
e = intercept[AnalysisException] {
sql("ALTER VIEW tab1 AS SELECT * FROM jt")
}.getMessage
assert(e.contains("tab1 is a table. 'ALTER VIEW ... AS' expects a view."))
}
}
test("existing a table with the duplicate name when CREATE VIEW IF NOT EXISTS") {
withTable("tab1") {
sql("CREATE TABLE tab1 (id int) USING parquet")
sql("CREATE VIEW IF NOT EXISTS tab1 AS SELECT * FROM jt")
checkAnswer(sql("select count(*) FROM tab1"), Row(0))
}
}
test("Issue exceptions for ALTER VIEW on the temporary view") {
val viewName = "testView"
withTempView(viewName) {
spark.range(10).createTempView(viewName)
assertAnalysisError(
s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')",
"testView is a temp view. 'ALTER VIEW ... SET TBLPROPERTIES' expects a permanent view.")
assertAnalysisError(
s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')",
"testView is a temp view. 'ALTER VIEW ... UNSET TBLPROPERTIES' expects a permanent view.")
}
}
test("Issue exceptions for ALTER TABLE on the temporary view") {
val viewName = "testView"
withTempView(viewName) {
spark.range(10).createTempView(viewName)
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName SET SERDE 'whatever'",
viewName,
"ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName PARTITION (a=1, b=2) SET SERDE 'whatever'",
viewName,
"ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName SET SERDEPROPERTIES ('p' = 'an')",
viewName,
"ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName PARTITION (a='4') RENAME TO PARTITION (a='5')",
viewName,
"ALTER TABLE ... RENAME TO PARTITION")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName RECOVER PARTITIONS",
viewName,
"ALTER TABLE ... RECOVER PARTITIONS")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName SET LOCATION '/path/to/your/lovely/heart'",
viewName,
"ALTER TABLE ... SET LOCATION ...")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName PARTITION (a='4') SET LOCATION '/path/to/home'",
viewName,
"ALTER TABLE ... SET LOCATION ...")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName ADD IF NOT EXISTS PARTITION (a='4', b='8')",
viewName,
"ALTER TABLE ... ADD PARTITION ...")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName DROP PARTITION (a='4', b='8')",
viewName,
"ALTER TABLE ... DROP PARTITION ...")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName SET TBLPROPERTIES ('p' = 'an')",
viewName,
"ALTER TABLE ... SET TBLPROPERTIES")
assertErrorForAlterTableOnTempView(
s"ALTER TABLE $viewName UNSET TBLPROPERTIES ('p')",
viewName,
"ALTER TABLE ... UNSET TBLPROPERTIES")
}
}
test("Issue exceptions for other table DDL on the temporary view") {
val viewName = "testView"
withTempView(viewName) {
spark.range(10).createTempView(viewName)
val e = intercept[AnalysisException] {
sql(s"INSERT INTO TABLE $viewName SELECT 1")
}.getMessage
assert(e.contains("Inserting into an RDD-based table is not allowed"))
val dataFilePath =
Thread.currentThread().getContextClassLoader.getResource("data/files/employee.dat")
val e2 = intercept[AnalysisException] {
sql(s"""LOAD DATA LOCAL INPATH "$dataFilePath" INTO TABLE $viewName""")
}.getMessage
assert(e2.contains(s"$viewName is a temp view. 'LOAD DATA' expects a table"))
val e3 = intercept[AnalysisException] {
sql(s"SHOW CREATE TABLE $viewName")
}.getMessage
assert(e3.contains(
s"$viewName is a temp view. 'SHOW CREATE TABLE' expects a table or permanent view."))
val e4 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $viewName COMPUTE STATISTICS")
}.getMessage
assert(e4.contains(
s"$viewName is a temp view. 'ANALYZE TABLE' expects a table or permanent view."))
val e5 = intercept[AnalysisException] {
sql(s"ANALYZE TABLE $viewName COMPUTE STATISTICS FOR COLUMNS id")
}.getMessage
assert(e5.contains(s"Temporary view `$viewName` is not cached for analyzing columns."))
}
}
private def assertAnalysisError(query: String, message: String): Unit = {
val e = intercept[AnalysisException](sql(query))
assert(e.message.contains(message))
}
private def assertErrorForAlterTableOnTempView(
sqlText: String, viewName: String, cmdName: String): Unit = {
assertAnalysisError(
sqlText,
s"$viewName is a temp view. '$cmdName' expects a table. Please use ALTER VIEW instead.")
}
test("error handling: insert/load table commands against a view") {
val viewName = "testView"
withView(viewName) {
sql(s"CREATE VIEW $viewName AS SELECT id FROM jt")
var e = intercept[AnalysisException] {
sql(s"INSERT INTO TABLE $viewName SELECT 1")
}.getMessage
assert(e.contains("Inserting into a view is not allowed. View: `default`.`testview`"))
val dataFilePath =
Thread.currentThread().getContextClassLoader.getResource("data/files/employee.dat")
e = intercept[AnalysisException] {
sql(s"""LOAD DATA LOCAL INPATH "$dataFilePath" INTO TABLE $viewName""")
}.getMessage
assert(e.contains("default.testView is a view. 'LOAD DATA' expects a table"))
}
}
test("error handling: fail if the view sql itself is invalid") {
// A database that does not exist
assertInvalidReference("CREATE OR REPLACE VIEW myabcdview AS SELECT * FROM db_not_exist234.jt")
// A table that does not exist
assertInvalidReference("CREATE OR REPLACE VIEW myabcdview AS SELECT * FROM table_not_exist345")
// A column that does not exist
intercept[AnalysisException] {
sql("CREATE OR REPLACE VIEW myabcdview AS SELECT random1234 FROM jt").collect()
}
}
private def assertInvalidReference(query: String): Unit = {
val e = intercept[AnalysisException] {
sql(query)
}.getMessage
assert(e.contains("Table or view not found"))
}
test("error handling: fail if the temp view name contains the database prefix") {
// Fully qualified table name like "database.table" is not allowed for temporary view
val e = intercept[AnalysisException] {
sql("CREATE OR REPLACE TEMPORARY VIEW default.myabcdview AS SELECT * FROM jt")
}
assert(e.message.contains("It is not allowed to add database prefix"))
}
test("error handling: disallow IF NOT EXISTS for CREATE TEMPORARY VIEW") {
withTempView("myabcdview") {
val e = intercept[AnalysisException] {
sql("CREATE TEMPORARY VIEW IF NOT EXISTS myabcdview AS SELECT * FROM jt")
}
assert(e.message.contains("It is not allowed to define a TEMPORARY view with IF NOT EXISTS"))
}
}
test("error handling: fail if the temp view sql itself is invalid") {
// A database that does not exist
assertInvalidReference(
"CREATE OR REPLACE TEMPORARY VIEW myabcdview AS SELECT * FROM db_not_exist234.jt")
// A table that does not exist
assertInvalidReference(
"CREATE OR REPLACE TEMPORARY VIEW myabcdview AS SELECT * FROM table_not_exist1345")
// A column that does not exist, for temporary view
intercept[AnalysisException] {
sql("CREATE OR REPLACE TEMPORARY VIEW myabcdview AS SELECT random1234 FROM jt")
}
}
test("SPARK-32374: disallow setting properties for CREATE TEMPORARY VIEW") {
withTempView("myabcdview") {
val e = intercept[ParseException] {
sql("CREATE TEMPORARY VIEW myabcdview TBLPROPERTIES ('a' = 'b') AS SELECT * FROM jt")
}
assert(e.message.contains(
"Operation not allowed: TBLPROPERTIES can't coexist with CREATE TEMPORARY VIEW"))
}
}
test("correctly parse CREATE VIEW statement") {
withView("testView") {
sql(
"""CREATE VIEW IF NOT EXISTS
|default.testView (c1 COMMENT 'blabla', c2 COMMENT 'blabla')
|TBLPROPERTIES ('a' = 'b')
|AS SELECT * FROM jt
|""".stripMargin)
checkAnswer(sql("SELECT c1, c2 FROM testView ORDER BY c1"), (1 to 9).map(i => Row(i, i)))
}
}
test("correctly parse a nested view") {
withTempDatabase { db =>
withView("view1", "view2", s"$db.view3") {
sql("CREATE VIEW view1(x, y) AS SELECT * FROM jt")
// Create a nested view in the same database.
sql("CREATE VIEW view2(id, id1) AS SELECT * FROM view1")
checkAnswer(sql("SELECT * FROM view2 ORDER BY id"), (1 to 9).map(i => Row(i, i)))
// Create a nested view in a different database.
activateDatabase(db) {
sql(s"CREATE VIEW $db.view3(id, id1) AS SELECT * FROM default.view1")
checkAnswer(sql("SELECT * FROM view3 ORDER BY id"), (1 to 9).map(i => Row(i, i)))
}
}
}
}
test("correctly parse CREATE TEMPORARY VIEW statement") {
withTempView("testView") {
sql(
"""CREATE TEMPORARY VIEW
|testView (c1 COMMENT 'blabla', c2 COMMENT 'blabla')
|AS SELECT * FROM jt
|""".stripMargin)
checkAnswer(sql("SELECT c1, c2 FROM testView ORDER BY c1"), (1 to 9).map(i => Row(i, i)))
}
}
test("should NOT allow CREATE TEMPORARY VIEW when TEMPORARY VIEW with same name exists") {
withTempView("testView") {
sql("CREATE TEMPORARY VIEW testView AS SELECT id FROM jt")
val e = intercept[AnalysisException] {
sql("CREATE TEMPORARY VIEW testView AS SELECT id FROM jt")
}
assert(e.message.contains("Temporary view") && e.message.contains("already exists"))
}
}
test("should allow CREATE TEMPORARY VIEW when a permanent VIEW with same name exists") {
withView("testView", "default.testView") {
withTempView("testView") {
sql("CREATE VIEW testView AS SELECT id FROM jt")
sql("CREATE TEMPORARY VIEW testView AS SELECT id FROM jt")
}
}
}
test("should allow CREATE permanent VIEW when a TEMPORARY VIEW with same name exists") {
withView("testView", "default.testView") {
withTempView("testView") {
sql("CREATE TEMPORARY VIEW testView AS SELECT id FROM jt")
sql("CREATE VIEW testView AS SELECT id FROM jt")
}
}
}
test("correctly handle CREATE VIEW IF NOT EXISTS") {
withTable("jt2") {
withView("testView") {
sql("CREATE VIEW testView AS SELECT id FROM jt")
val df = (1 until 10).map(i => i -> i).toDF("i", "j")
df.write.format("json").saveAsTable("jt2")
sql("CREATE VIEW IF NOT EXISTS testView AS SELECT * FROM jt2")
// make sure our view doesn't change.
checkAnswer(sql("SELECT * FROM testView ORDER BY id"), (1 to 9).map(i => Row(i)))
}
}
}
test(s"correctly handle CREATE OR REPLACE TEMPORARY VIEW") {
withTable("jt2") {
withView("testView") {
sql("CREATE OR REPLACE TEMPORARY VIEW testView AS SELECT id FROM jt")
checkAnswer(sql("SELECT * FROM testView ORDER BY id"), (1 to 9).map(i => Row(i)))
sql("CREATE OR REPLACE TEMPORARY VIEW testView AS SELECT id AS i, id AS j FROM jt")
// make sure the view has been changed.
checkAnswer(sql("SELECT * FROM testView ORDER BY i"), (1 to 9).map(i => Row(i, i)))
}
}
}
test("correctly handle CREATE OR REPLACE VIEW") {
withTable("jt2") {
sql("CREATE OR REPLACE VIEW testView AS SELECT id FROM jt")
checkAnswer(sql("SELECT * FROM testView ORDER BY id"), (1 to 9).map(i => Row(i)))
val df = (1 until 10).map(i => i -> i).toDF("i", "j")
df.write.format("json").saveAsTable("jt2")
sql("CREATE OR REPLACE VIEW testView AS SELECT * FROM jt2")
// make sure the view has been changed.
checkAnswer(sql("SELECT * FROM testView ORDER BY i"), (1 to 9).map(i => Row(i, i)))
sql("DROP VIEW testView")
val e = intercept[AnalysisException] {
sql("CREATE OR REPLACE VIEW IF NOT EXISTS testView AS SELECT id FROM jt")
}
assert(e.message.contains(
"CREATE VIEW with both IF NOT EXISTS and REPLACE is not allowed"))
}
}
test("correctly handle ALTER VIEW") {
withTable("jt2") {
withView("testView") {
sql("CREATE VIEW testView AS SELECT id FROM jt")
val df = (1 until 10).map(i => i -> i).toDF("i", "j")
df.write.format("json").saveAsTable("jt2")
sql("ALTER VIEW testView AS SELECT * FROM jt2")
// make sure the view has been changed.
checkAnswer(sql("SELECT * FROM testView ORDER BY i"), (1 to 9).map(i => Row(i, i)))
}
}
}
test("correctly handle ALTER VIEW on a referenced view") {
withView("view1", "view2") {
sql("CREATE VIEW view1(x, y) AS SELECT * FROM jt")
// Create a nested view.
sql("CREATE VIEW view2(id, id1) AS SELECT * FROM view1")
checkAnswer(sql("SELECT * FROM view2 ORDER BY id"), (1 to 9).map(i => Row(i, i)))
// Alter the referenced view.
sql("ALTER VIEW view1 AS SELECT id AS x, id1 + 1 As y FROM jt")
checkAnswer(sql("SELECT * FROM view2 ORDER BY id"), (1 to 9).map(i => Row(i, i + 1)))
}
}
test("should not allow ALTER VIEW AS when the view does not exist") {
assertAnalysisError(
"ALTER VIEW testView AS SELECT 1, 2",
"View not found: testView")
assertAnalysisError(
"ALTER VIEW default.testView AS SELECT 1, 2",
"View not found: default.testView")
}
test("ALTER VIEW AS should try to alter temp view first if view name has no database part") {
withView("test_view") {
withTempView("test_view") {
sql("CREATE VIEW test_view AS SELECT 1 AS a, 2 AS b")
sql("CREATE TEMP VIEW test_view AS SELECT 1 AS a, 2 AS b")
sql("ALTER VIEW test_view AS SELECT 3 AS i, 4 AS j")
// The temporary view should be updated.
checkAnswer(spark.table("test_view"), Row(3, 4))
// The permanent view should stay same.
checkAnswer(spark.table("default.test_view"), Row(1, 2))
}
}
}
test("ALTER VIEW AS should alter permanent view if view name has database part") {
withView("test_view") {
withTempView("test_view") {
sql("CREATE VIEW test_view AS SELECT 1 AS a, 2 AS b")
sql("CREATE TEMP VIEW test_view AS SELECT 1 AS a, 2 AS b")
sql("ALTER VIEW default.test_view AS SELECT 3 AS i, 4 AS j")
// The temporary view should stay same.
checkAnswer(spark.table("test_view"), Row(1, 2))
// The permanent view should be updated.
checkAnswer(spark.table("default.test_view"), Row(3, 4))
}
}
}
test("ALTER VIEW AS should keep the previous table properties, comment, create_time, etc.") {
withView("test_view") {
sql(
"""
|CREATE VIEW test_view
|COMMENT 'test'
|TBLPROPERTIES ('key' = 'a')
|AS SELECT 1 AS a, 2 AS b
""".stripMargin)
val catalog = spark.sessionState.catalog
val viewMeta = catalog.getTableMetadata(TableIdentifier("test_view"))
assert(viewMeta.comment == Some("test"))
assert(viewMeta.properties("key") == "a")
sql("ALTER VIEW test_view AS SELECT 3 AS i, 4 AS j")
val updatedViewMeta = catalog.getTableMetadata(TableIdentifier("test_view"))
assert(updatedViewMeta.comment == Some("test"))
assert(updatedViewMeta.properties("key") == "a")
assert(updatedViewMeta.createTime == viewMeta.createTime)
// The view should be updated.
checkAnswer(spark.table("test_view"), Row(3, 4))
}
}
test("create view for json table") {
// json table is not hive-compatible, make sure the new flag fix it.
withView("testView") {
sql("CREATE VIEW testView AS SELECT id FROM jt")
checkAnswer(sql("SELECT * FROM testView ORDER BY id"), (1 to 9).map(i => Row(i)))
}
}
test("create view for partitioned parquet table") {
// partitioned parquet table is not hive-compatible, make sure the new flag fix it.
withTable("parTable") {
withView("testView") {
val df = Seq(1 -> "a").toDF("i", "j")
df.write.format("parquet").partitionBy("i").saveAsTable("parTable")
sql("CREATE VIEW testView AS SELECT i, j FROM parTable")
checkAnswer(sql("SELECT * FROM testView"), Row(1, "a"))
}
}
}
test("create view for joined tables") {
// make sure the new flag can handle some complex cases like join and schema change.
withTable("jt1", "jt2") {
spark.range(1, 10).toDF("id1").write.format("json").saveAsTable("jt1")
spark.range(1, 10).toDF("id2").write.format("json").saveAsTable("jt2")
withView("testView") {
sql("CREATE VIEW testView AS SELECT * FROM jt1 JOIN jt2 ON id1 == id2")
checkAnswer(sql("SELECT * FROM testView ORDER BY id1"), (1 to 9).map(i => Row(i, i)))
val df = (1 until 10).map(i => i -> i).toDF("id1", "newCol")
df.write.format("json").mode(SaveMode.Overwrite).saveAsTable("jt1")
checkAnswer(sql("SELECT * FROM testView ORDER BY id1"), (1 to 9).map(i => Row(i, i)))
}
}
}
test("CTE within view") {
withView("cte_view") {
sql("CREATE VIEW cte_view AS WITH w AS (SELECT 1 AS n) SELECT n FROM w")
checkAnswer(sql("SELECT * FROM cte_view"), Row(1))
}
}
test("Using view after switching current database") {
withView("v") {
sql("CREATE VIEW v AS SELECT * FROM jt")
withTempDatabase { db =>
activateDatabase(db) {
// Should look up table `jt` in database `default`.
checkAnswer(sql("SELECT * FROM default.v"), sql("SELECT * FROM default.jt"))
// The new `jt` table shouldn't be scanned.
sql("CREATE TABLE jt(key INT, value STRING) USING parquet")
checkAnswer(sql("SELECT * FROM default.v"), sql("SELECT * FROM default.jt"))
}
}
}
}
test("Using view after adding more columns") {
withTable("add_col") {
spark.range(10).write.saveAsTable("add_col")
withView("v") {
sql("CREATE VIEW v AS SELECT * FROM add_col")
spark.range(10).select('id, 'id as 'a).write.mode("overwrite").saveAsTable("add_col")
checkAnswer(sql("SELECT * FROM v"), spark.range(10).toDF())
}
}
}
test("error handling: fail if the referenced table or view is invalid") {
withView("view1", "view2", "view3") {
// Fail if the referenced table is defined in a invalid database.
withTempDatabase { db =>
withTable(s"$db.table1") {
activateDatabase(db) {
sql("CREATE TABLE table1(a int, b string) USING parquet")
sql("CREATE VIEW default.view1 AS SELECT * FROM table1")
}
}
}
assertInvalidReference("SELECT * FROM view1")
// Fail if the referenced table is invalid.
withTable("table2") {
sql("CREATE TABLE table2(a int, b string) USING parquet")
sql("CREATE VIEW view2 AS SELECT * FROM table2")
}
assertInvalidReference("SELECT * FROM view2")
// Fail if the referenced view is invalid.
withView("testView") {
sql("CREATE VIEW testView AS SELECT * FROM jt")
sql("CREATE VIEW view3 AS SELECT * FROM testView")
}
assertInvalidReference("SELECT * FROM view3")
}
}
test("correctly resolve a view in a self join") {
withView("testView") {
sql("CREATE VIEW testView AS SELECT * FROM jt")
checkAnswer(
sql("SELECT * FROM testView t1 JOIN testView t2 ON t1.id = t2.id ORDER BY t1.id"),
(1 to 9).map(i => Row(i, i, i, i)))
}
}
test("correctly handle a view with custom column names") {
withTable("tab1") {
spark.range(1, 10).selectExpr("id", "id + 1 id1").write.saveAsTable("tab1")
withView("testView", "testView2") {
sql("CREATE VIEW testView(x, y) AS SELECT * FROM tab1")
// Correctly resolve a view with custom column names.
checkAnswer(sql("SELECT * FROM testView ORDER BY x"), (1 to 9).map(i => Row(i, i + 1)))
// Throw an AnalysisException if the number of columns don't match up.
val e = intercept[AnalysisException] {
sql("CREATE VIEW testView2(x, y, z) AS SELECT * FROM tab1")
}.getMessage
assert(e.contains("The number of columns produced by the SELECT clause (num: `2`) does " +
"not match the number of column names specified by CREATE VIEW (num: `3`)."))
// Correctly resolve a view when the referenced table schema changes.
spark.range(1, 10).selectExpr("id", "id + id dummy", "id + 1 id1")
.write.mode(SaveMode.Overwrite).saveAsTable("tab1")
checkAnswer(sql("SELECT * FROM testView ORDER BY x"), (1 to 9).map(i => Row(i, i + 1)))
// Throw an AnalysisException if the column name is not found.
spark.range(1, 10).selectExpr("id", "id + 1 dummy")
.write.mode(SaveMode.Overwrite).saveAsTable("tab1")
intercept[AnalysisException](sql("SELECT * FROM testView"))
}
}
}
test("resolve a view when the dataTypes of referenced table columns changed") {
withTable("tab1") {
spark.range(1, 10).selectExpr("id", "id + 1 id1").write.saveAsTable("tab1")
withView("testView") {
sql("CREATE VIEW testView AS SELECT * FROM tab1")
// Allow casting from IntegerType to LongType
val df = (1 until 10).map(i => (i, i + 1)).toDF("id", "id1")
df.write.format("json").mode(SaveMode.Overwrite).saveAsTable("tab1")
checkAnswer(sql("SELECT * FROM testView ORDER BY id1"), (1 to 9).map(i => Row(i, i + 1)))
// Casting from DoubleType to LongType might truncate, throw an AnalysisException.
val df2 = (1 until 10).map(i => (i.toDouble, i.toDouble)).toDF("id", "id1")
df2.write.format("json").mode(SaveMode.Overwrite).saveAsTable("tab1")
intercept[AnalysisException](sql("SELECT * FROM testView"))
// Can't cast from ArrayType to LongType, throw an AnalysisException.
val df3 = (1 until 10).map(i => (i, Seq(i))).toDF("id", "id1")
df3.write.format("json").mode(SaveMode.Overwrite).saveAsTable("tab1")
intercept[AnalysisException](sql("SELECT * FROM testView"))
}
}
}
test("correctly handle a cyclic view reference") {
withView("view1", "view2", "view3") {
sql("CREATE VIEW view1 AS SELECT * FROM jt")
sql("CREATE VIEW view2 AS SELECT * FROM view1")
sql("CREATE VIEW view3 AS SELECT * FROM view2")
// Detect cyclic view reference on ALTER VIEW.
val e1 = intercept[AnalysisException] {
sql("ALTER VIEW view1 AS SELECT * FROM view2")
}.getMessage
assert(e1.contains("Recursive view `default`.`view1` detected (cycle: `default`.`view1` " +
"-> `default`.`view2` -> `default`.`view1`)"))
// Detect the most left cycle when there exists multiple cyclic view references.
val e2 = intercept[AnalysisException] {
sql("ALTER VIEW view1 AS SELECT * FROM view3 JOIN view2")
}.getMessage
assert(e2.contains("Recursive view `default`.`view1` detected (cycle: `default`.`view1` " +
"-> `default`.`view3` -> `default`.`view2` -> `default`.`view1`)"))
// Detect cyclic view reference on CREATE OR REPLACE VIEW.
val e3 = intercept[AnalysisException] {
sql("CREATE OR REPLACE VIEW view1 AS SELECT * FROM view2")
}.getMessage
assert(e3.contains("Recursive view `default`.`view1` detected (cycle: `default`.`view1` " +
"-> `default`.`view2` -> `default`.`view1`)"))
// Detect cyclic view reference from subqueries.
val e4 = intercept[AnalysisException] {
sql("ALTER VIEW view1 AS SELECT * FROM jt WHERE EXISTS (SELECT 1 FROM view2)")
}.getMessage
assert(e4.contains("Recursive view `default`.`view1` detected (cycle: `default`.`view1` " +
"-> `default`.`view2` -> `default`.`view1`)"))
}
}
test("permanent view should be case-preserving") {
withView("v") {
sql("CREATE VIEW v AS SELECT 1 as aBc")
assert(spark.table("v").schema.head.name == "aBc")
sql("CREATE OR REPLACE VIEW v AS SELECT 2 as cBa")
assert(spark.table("v").schema.head.name == "cBa")
}
}
test("sparkSession API view resolution with different default database") {
withDatabase("db2") {
withView("default.v1") {
withTable("t1") {
sql("USE default")
sql("CREATE TABLE t1 USING parquet AS SELECT 1 AS c0")
sql("CREATE VIEW v1 AS SELECT * FROM t1")
sql("CREATE DATABASE IF NOT EXISTS db2")
sql("USE db2")
checkAnswer(spark.table("default.v1"), Row(1))
}
}
}
}
test("SPARK-23519 view should be created even when query output contains duplicate col name") {
withTable("t23519") {
withView("v23519") {
sql("CREATE TABLE t23519 USING parquet AS SELECT 1 AS c1")
sql("CREATE VIEW v23519 (c1, c2) AS SELECT c1, c1 FROM t23519")
checkAnswer(sql("SELECT * FROM v23519"), Row(1, 1))
}
}
}
test("temporary view should ignore useCurrentSQLConfigsForView config") {
withTable("t") {
Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t")
withTempView("v1") {
sql("CREATE TEMPORARY VIEW v1 AS SELECT 1/0")
withSQLConf(
USE_CURRENT_SQL_CONFIGS_FOR_VIEW.key -> "true",
ANSI_ENABLED.key -> "true") {
checkAnswer(sql("SELECT * FROM v1"), Seq(Row(null)))
}
}
}
}
test("alter temporary view should follow current storeAnalyzedPlanForView config") {
withTable("t") {
Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t")
withView("v1") {
withSQLConf(STORE_ANALYZED_PLAN_FOR_VIEW.key -> "true") {
sql("CREATE TEMPORARY VIEW v1 AS SELECT * FROM t")
Seq(4, 6, 5).toDF("c1").write.mode("overwrite").format("parquet").saveAsTable("t")
val e = intercept[SparkException] {
sql("SELECT * FROM v1").collect()
}.getMessage
assert(e.contains("does not exist"))
}
withSQLConf(STORE_ANALYZED_PLAN_FOR_VIEW.key -> "false") {
// alter view from legacy to non-legacy config
sql("ALTER VIEW v1 AS SELECT * FROM t")
Seq(1, 3, 5).toDF("c1").write.mode("overwrite").format("parquet").saveAsTable("t")
checkAnswer(sql("SELECT * FROM v1"), Seq(Row(1), Row(3), Row(5)))
}
withSQLConf(STORE_ANALYZED_PLAN_FOR_VIEW.key -> "true") {
// alter view from non-legacy to legacy config
sql("ALTER VIEW v1 AS SELECT * FROM t")
Seq(2, 4, 6).toDF("c1").write.mode("overwrite").format("parquet").saveAsTable("t")
val e = intercept[SparkException] {
sql("SELECT * FROM v1").collect()
}.getMessage
assert(e.contains("does not exist"))
}
}
}
}
test("local temp view refers global temp view") {
withGlobalTempView("v1") {
withTempView("v2") {
val globalTempDB = spark.sharedState.globalTempViewManager.database
sql("CREATE GLOBAL TEMPORARY VIEW v1 AS SELECT 1")
sql(s"CREATE TEMPORARY VIEW v2 AS SELECT * FROM ${globalTempDB}.v1")
checkAnswer(sql("SELECT * FROM v2"), Seq(Row(1)))
}
}
}
test("global temp view refers local temp view") {
withTempView("v1") {
withGlobalTempView("v2") {
val globalTempDB = spark.sharedState.globalTempViewManager.database
sql("CREATE TEMPORARY VIEW v1 AS SELECT 1")
sql(s"CREATE GLOBAL TEMPORARY VIEW v2 AS SELECT * FROM v1")
checkAnswer(sql(s"SELECT * FROM ${globalTempDB}.v2"), Seq(Row(1)))
}
}
}
test("SPARK-33141: view should be parsed and analyzed with configs set when creating") {
withTable("t") {
withView("v1", "v2", "v3", "v4", "v5") {
Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t")
sql("CREATE VIEW v1 (c1) AS SELECT C1 FROM t")
sql("CREATE VIEW v2 (c1) AS SELECT c1 FROM t ORDER BY 1 ASC, c1 DESC")
sql("CREATE VIEW v3 (c1, count) AS SELECT c1, count(c1) AS cnt FROM t GROUP BY 1")
sql("CREATE VIEW v4 (a, count) AS SELECT c1 as a, count(c1) AS cnt FROM t GROUP BY a")
sql("CREATE VIEW v5 (c1) AS SELECT 1/0 AS invalid")
withSQLConf(CASE_SENSITIVE.key -> "true") {
checkAnswer(sql("SELECT * FROM v1"), Seq(Row(2), Row(3), Row(1)))
}
withSQLConf(ORDER_BY_ORDINAL.key -> "false") {
checkAnswer(sql("SELECT * FROM v2"), Seq(Row(1), Row(2), Row(3)))
}
withSQLConf(GROUP_BY_ORDINAL.key -> "false") {
checkAnswer(sql("SELECT * FROM v3"),
Seq(Row(1, 1), Row(2, 1), Row(3, 1)))
}
withSQLConf(GROUP_BY_ALIASES.key -> "false") {
checkAnswer(sql("SELECT * FROM v4"),
Seq(Row(1, 1), Row(2, 1), Row(3, 1)))
}
withSQLConf(ANSI_ENABLED.key -> "true") {
checkAnswer(sql("SELECT * FROM v5"), Seq(Row(null)))
}
withSQLConf(USE_CURRENT_SQL_CONFIGS_FOR_VIEW.key -> "true") {
withSQLConf(CASE_SENSITIVE.key -> "true") {
val e = intercept[AnalysisException] {
sql("SELECT * FROM v1")
}
assert(e.getErrorClass == "MISSING_COLUMN")
assert(e.messageParameters.sameElements(Array("C1", "spark_catalog.default.t.c1")))
}
withSQLConf(ORDER_BY_ORDINAL.key -> "false") {
checkAnswer(sql("SELECT * FROM v2"), Seq(Row(3), Row(2), Row(1)))
}
withSQLConf(GROUP_BY_ORDINAL.key -> "false") {
val e = intercept[AnalysisException] {
sql("SELECT * FROM v3")
}.getMessage
assert(e.contains(
"expression 'spark_catalog.default.t.c1' is neither present " +
"in the group by, nor is it an aggregate function. Add to group by or wrap in " +
"first() (or first_value) if you don't care which value you get."))
}
withSQLConf(GROUP_BY_ALIASES.key -> "false") {
val e = intercept[AnalysisException] {
sql("SELECT * FROM v4")
}
assert(e.getErrorClass == "MISSING_COLUMN")
assert(e.messageParameters.sameElements(Array("a", "spark_catalog.default.t.c1")))
}
withSQLConf(ANSI_ENABLED.key -> "true") {
val e = intercept[ArithmeticException] {
sql("SELECT * FROM v5").collect()
}.getMessage
assert(e.contains("divide by zero"))
}
}
withSQLConf(ANSI_ENABLED.key -> "true") {
sql("ALTER VIEW v1 AS SELECT 1/0 AS invalid")
}
val e = intercept[ArithmeticException] {
sql("SELECT * FROM v1").collect()
}.getMessage
assert(e.contains("divide by zero"))
}
}
}
}
| shaneknapp/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala | Scala | apache-2.0 | 35,725 |
object prob {
opaque type Probability = Double
implicit object Probability {
def apply(n: Double): Option[Probability] =
if (0.0 <= n && n <= 1.0) Some(n) else None
def unsafe(p: Double): Probability = {
require(0.0 <= p && p <= 1.0, s"probabilities lie in [0, 1] (got $p)")
p
}
def asDouble(p: Probability): Double = p
val Never: Probability = 0.0
val CoinToss: Probability = 0.5
val Certain: Probability = 1.0
implicit val ordering: Ordering[Probability] =
implicitly[Ordering[Double]]
extension (p1: Probability) def unary_~ : Probability = Certain - p1
extension (p1: Probability) def & (p2: Probability): Probability = p1 * p2
extension (p1: Probability) def | (p2: Probability): Probability = p1 + p2 - (p1 * p2)
extension (p1: Probability) def isImpossible: Boolean = p1 == Never
extension (p1: Probability) def isCertain: Boolean = p1 == Certain
import scala.util.Random
extension (p1: Probability) def sample(r: Random = Random): Boolean = r.nextDouble <= p1
extension (p1: Probability) def toDouble: Double = p1
}
val caughtTrain = Probability.unsafe(0.3)
val missedTrain = ~caughtTrain
val caughtCab = Probability.CoinToss
val arrived = caughtTrain | (missedTrain & caughtCab)
println((1 to 5).map(_ => arrived.sample()).toList)
}
| lampepfl/dotty | tests/pos/opaque-propability-xm.scala | Scala | apache-2.0 | 1,357 |
package scala.pickling.`null`.json
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, json._
class D
final class E
case class C(val x: String, val y: Int, val d: D, val e: E)
class NullJsonTest extends FunSuite {
test("main") {
val c = C(null, 0, null, null)
val pickle = c.pickle
assert(pickle.value === """
|{
| "tpe": "scala.pickling.null.json.C",
| "x": null,
| "y": 0,
| "e": null,
| "d": null
|}
""".stripMargin.trim)
assert(pickle.unpickle[C].toString === c.toString)
}
}
| eed3si9n/pickling-historical | core/src/test/scala/pickling/run/null-json.scala | Scala | bsd-3-clause | 584 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.query
object Order {
case object TimestampDesc extends Order
case object TimestampAsc extends Order
case object DurationDesc extends Order
case object DurationAsc extends Order
case object None extends Order
}
sealed trait Order
| cogitate/twitter-zipkin-uuid | zipkin-common/src/main/scala/com/twitter/zipkin/query/Order.scala | Scala | apache-2.0 | 872 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
/**
* Obtained inside a reduce task to read combined records from the mappers.
*在reduce任务中读取mappers组合记录
*
* ShuffleReader实现了下游的Task如何读取上游的ShuffleMapTask的Shuffle输出的逻辑
* 这个逻辑比较复杂,简单来说就是通过org.apache.spark.MapOutputTracker获得数据的位置信息,
* 然后如果数据在本地那么调用org.apache.spark.storage.BlockManager的getBlockData读取本地数据
*/
private[spark] trait ShuffleReader[K, C] {
/** Read the combined key-values for this reduce task
* 读取此reduce任务的组合键值 */
def read(): Iterator[Product2[K, C]]
/**
* Close this reader.
* TODO: Add this back when we make the ShuffleReader a developer API that others can implement
* (at which point this will likely be necessary).
*/
// def stop(): Unit
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/shuffle/ShuffleReader.scala | Scala | apache-2.0 | 1,693 |
package nozzle.webresult
import spray.http.{ StatusCode, StatusCodes }
trait DefaultMarshallingSupport extends MarshallingSupport {
implicit def webErrorToStatusCode(webError: WebError) = webError match {
case WebError.InvalidParam(_, _) => StatusCodes.UnprocessableEntity
case WebError.InvalidParams(_) => StatusCodes.UnprocessableEntity
case WebError.InvalidOperation(_) => StatusCodes.UnprocessableEntity
case WebError.InvalidCredentials => StatusCodes.Unauthorized
case WebError.Forbidden(_) => StatusCodes.Forbidden
case WebError.NotFound => StatusCodes.NotFound
case WebError.GenericError(_) => StatusCodes.BadRequest
case WebError.GenericErrors(_) => StatusCodes.BadRequest
}
implicit def webErrorToMessageString(webError: WebError) = webError match {
case WebError.Forbidden(desc) => s"Forbidden: $desc"
case WebError.InvalidParam(param, value) => s"Invalid parameter: ${param.name} ($value)"
case WebError.InvalidParams(params) => {
val errors = params mkString ", "
s"Invalid parameters: $errors"
}
case WebError.InvalidOperation(desc) => s"Invalid operation. $desc"
case WebError.InvalidCredentials => "Invalid credentials"
case WebError.NotFound => "Not found"
case WebError.GenericError(error) => error.desc
case WebError.GenericErrors(errors) => {
val errorsDesc = errors mkString ", "
s"Errors description: $errorsDesc"
}
}
}
| buildo/nozzle | src/main/scala/webresult/DefaultMarshallingSupport.scala | Scala | mit | 1,613 |
package com.arcusys.valamis.certificate.service
import com.arcusys.learn.liferay.LiferayClasses.LSocialActivity
import com.arcusys.valamis.certificate.model.goal._
import com.arcusys.valamis.certificate.storage._
import com.arcusys.valamis.model.PeriodTypes
import org.joda.time.DateTime
import org.mockito.Mockito._
import org.mockito.Matchers._
import org.scalatest.FunSuite
class ActivityGoalStatusCheckerComponentTest extends FunSuite {
val stateRepository = mock(classOf[CertificateGoalStateRepository])
when(stateRepository.create(notNull().asInstanceOf[CertificateGoalState])).thenReturn(null)
val checker = new ActivityGoalStatusCheckerComponent {
var certificateStateRepository: CertificateStateRepository = _
var activityGoalStorage: ActivityGoalStorage = _
var goalRepository: CertificateGoalRepository = _
def goalStateRepository = stateRepository
def checkActivityGoalPublic(userId: Long, activities: Seq[LSocialActivity], userJoinedDate: DateTime)
(activityGoal: ActivityGoal, goalData: CertificateGoal) = {
this.checkActivityGoal(userId, activities, userJoinedDate)(activityGoal, goalData)
}
}
test("no activities with unlimited test") {
val userJoinedDate = DateTime.now.minusDays(10)
val activities = Seq()
val goal = ActivityGoal(1, 11, "com.activity", 1)
val goalData = CertificateGoal(1, 11, GoalType.Activity, 0, PeriodTypes.UNLIMITED, 1, false, groupId = None)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.InProgress)
}
test("no activities with timeout test") {
val userJoinedDate = DateTime.now.minusDays(10)
val activities = Seq()
val goal = ActivityGoal(2, 11, "com.activity", 1)
val goalData = CertificateGoal(2, 11, GoalType.Activity,1, PeriodTypes.DAYS, 1, false, groupId = None)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.Failed)
}
test("no activities with no timeout test") {
val userJoinedDate = DateTime.now.minusDays(10)
val activities = Seq()
val goal = ActivityGoal(3, 11, "com.activity", 1)
val goalData = CertificateGoal(3, 11, GoalType.Activity, 11, PeriodTypes.DAYS, 1, false, groupId = None)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.InProgress)
}
test("single activity success test") {
val userJoinedDate = DateTime.now.minusDays(10)
val activities = Seq(createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.activity"))
val goal = ActivityGoal(4, 11, "com.activity", 1)
val goalData = CertificateGoal(4, 11, GoalType.Activity, 2, PeriodTypes.DAYS, 1, false, groupId = None)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.Success)
}
test("single activity timeout test") {
val userJoinedDate = DateTime.now.minusDays(10)
val activities = Seq(createSocialActivity(createdDate = userJoinedDate.plusDays(4), "com.activity"))
val goal = ActivityGoal(5, 11, "com.activity", 1)
val goalData = CertificateGoal(5, 11, GoalType.Activity, 2, PeriodTypes.DAYS, 1, false, groupId = None)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.Failed)
}
test("single activity inprogress unlimited test") {
val userJoinedDate = DateTime.now.minusDays(10)
val activities = Seq(createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.other.activity"))
val goal = ActivityGoal(6, 11, "com.activity", 1)
val goalData = CertificateGoal(6, 11, GoalType.Activity, 0, PeriodTypes.UNLIMITED, 1, false, groupId = None)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.InProgress)
}
test("several activities success test") {
val userJoinedDate = DateTime.now.minusDays(10)
val goal = ActivityGoal(7, 11, "com.activity", 1)
val goalData = CertificateGoal(7, 11, GoalType.Activity, 2, PeriodTypes.DAYS, 1, false, groupId = None)
val activities = Seq(
createSocialActivity(createdDate = userJoinedDate.plusDays(4), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(3), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.activity")
)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.Success)
}
test("several activities success test, goals count = 2") {
val userJoinedDate = DateTime.now.minusDays(10)
val goal = ActivityGoal(8, 11, "com.activity", 2)
val goalData = CertificateGoal(8, 11, GoalType.Activity, 5, PeriodTypes.DAYS, 1, false, groupId = None)
val activities = Seq(
createSocialActivity(createdDate = userJoinedDate.plusDays(8), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(3), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.activity")
)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.Success)
}
test("several activities inprogress test") {
val userJoinedDate = DateTime.now.minusDays(10)
val goal = ActivityGoal(9, 11, "com.activity", 2)
val goalData = CertificateGoal(9, 11, GoalType.Activity, 15, PeriodTypes.DAYS, 1, false, groupId = None)
val activities = Seq(
createSocialActivity(createdDate = userJoinedDate.plusDays(4), "com.other.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(3), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.other.activity")
)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.InProgress)
}
test("several activities unlimited inprogress test") {
val userJoinedDate = DateTime.now.minusDays(10)
val goal = ActivityGoal(10, 11, "com.activity", 2)
val goalData = CertificateGoal(10, 11, GoalType.Activity, 0, PeriodTypes.UNLIMITED, 1, false, groupId = None)
val activities = Seq(
createSocialActivity(createdDate = userJoinedDate.plusDays(4), "com.other.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(3), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.other.activity")
)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.InProgress)
}
test("several activities failed test") {
val userJoinedDate = DateTime.now.minusDays(10)
val goal = ActivityGoal(11, 11, "com.activity", 2)
val goalData = CertificateGoal(11, 11, GoalType.Activity, 2, PeriodTypes.DAYS, 1, false, groupId = None)
val activities = Seq(
createSocialActivity(createdDate = userJoinedDate.plusDays(4), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(3), "com.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.activity")
)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.Failed)
}
test("several activities failed test, goals count = 2") {
val userJoinedDate = DateTime.now.minusDays(10)
val goal = ActivityGoal(12, 11, "com.activity", 2)
val goalData = CertificateGoal(12, 11, GoalType.Activity, 5, PeriodTypes.DAYS, 1, false, groupId = None)
val activities = Seq(
createSocialActivity(createdDate = userJoinedDate.plusDays(4), "com.other.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(3), "com.other.activity"),
createSocialActivity(createdDate = userJoinedDate.plusDays(1), "com.activity")
)
val goalStatus = checker.checkActivityGoalPublic(1, activities, userJoinedDate)(goal, goalData)
assert(goalStatus == GoalStatuses.Failed)
}
private def createSocialActivity(createdDate: DateTime, activityName: String) = {
val activity = mock(classOf[LSocialActivity])
when(activity.getCreateDate).thenReturn(createdDate.toDate.getTime)
when(activity.getClassName).thenReturn(activityName)
activity
}
} | igor-borisov/valamis | valamis-certificate/src/test/scala/com/arcusys/valamis/certificate/service/ActivityGoalStatusCheckerComponentTest.scala | Scala | gpl-3.0 | 8,677 |
package dk.gp.gpc.util
import breeze.linalg.DenseVector
import breeze.optimize.DiffFunction
import breeze.optimize.ApproximateGradientFunction
import util._
import breeze.linalg._
import dk.gp.gpc.GpcModel
case class GpcApproxLowerboundDiffFunction(initialGpcModel: GpcModel) extends DiffFunction[DenseVector[Double]] {
val epsilon = 1E-5
def calculate(x: DenseVector[Double]): (Double, DenseVector[Double]) = {
try {
val currCovFuncParams = DenseVector(x.toArray.dropRight(1))
val currMean = x.toArray.last
val currModel = initialGpcModel.copy(covFuncParams = currCovFuncParams, gpMean = currMean)
val gpcFactorGraph = GpcFactorGraph(currModel)
val (calib, iters) = calibrateGpcFactorGraph(gpcFactorGraph, maxIter = 10)
val loglik = -calcGPCLoglik(gpcFactorGraph)
val grad: DenseVector[Double] = DenseVector.zeros[Double](x.size)
val xx = x.copy
for ((k, v) <- x.iterator) {
xx(k) += epsilon
val gradLoglik = -calcGPCLoglik(gpcFactorGraph, DenseVector(xx.toArray.dropRight(1)), xx.toArray.last)
grad(k) = (gradLoglik - loglik) / epsilon
xx(k) -= epsilon
}
(loglik, grad)
} catch {
case e: NotConvergedException => (Double.NaN, DenseVector.zeros[Double](x.size) * Double.NaN)
case e: IllegalArgumentException => e.printStackTrace(); System.exit(-1); (Double.NaN, DenseVector.zeros[Double](x.size) * Double.NaN)
}
}
} | danielkorzekwa/bayes-scala-gp | src/main/scala/dk/gp/gpc/util/GpcApproxLowerboundDiffFunction.scala | Scala | bsd-2-clause | 1,458 |
// This is a playground for AI, it offers a game space for AI to work with.
// Copyright (C) 2016 Jappe Klooster
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program.If not, see <http://www.gnu.org/licenses/>.
package nl.jappieklooster.gdx.mapstare.states
import com.badlogic.gdx.{InputMultiplexer, Gdx}
import com.badlogic.gdx.graphics.Color
import com.badlogic.gdx.graphics.g2d.{SpriteBatch, BitmapFont}
import com.badlogic.gdx.scenes.scene2d.Stage
import com.badlogic.gdx.scenes.scene2d.ui._
import com.badlogic.gdx.math.Vector2
import nl.jappieklooster.gdx.mapstare.Cam
import nl.jappieklooster.gdx.mapstare.akka.server.Create
import nl.jappieklooster.gdx.mapstare.controller.{Updateable, Updater}
import nl.jappieklooster.gdx.mapstare.input.gui.OnClick
import nl.jappieklooster.gdx.mapstare.input.{PlacementClick, SelectionBox, CamMovement}
import nl.jappieklooster.gdx.mapstare.model.{World, GameTick}
import nl.jappieklooster.gdx.mapstare.model.math._
import nl.jappieklooster.gdx.mapstare.view.Animation
import nl.jappieklooster.gdx.mapstare.Game
class BuildState(game:Game) extends GameState(game){
// TODO: make this scalable (also multiple types of units, snould acutally be doable with a list)
val clickThing = new PlacementClick(
placeCallback = item=> {
game.updateActor ! new Create(item)
},
cam,
inputMultiplexer
)
override def enter(stateMachine: StateMachine):Unit = {
val factory = new UIFactory()
val button = factory.button("Start!")
button.setWidth(200)
button.setHeight(50)
val dialog = factory.dialog("click message")
dialog.addListener(OnClick({
dialog.hide()
}
))
button.addListener(OnClick({
if(world.units == Nil){
dialog.getTitleLabel.setText("No units made :s")
dialog.show(stage)
}
stateMachine.changeTo(new FightState(game))
}
))
val (scrolltable, scrollpane) = factory.scrollPane()
val container = factory.table()
container.add(scrollpane).width(200).height(100)
container.row()
container.add(button)
val label = factory.button("swordman")
label.addListener(
clickThing
)
scrolltable.add(label)
scrolltable.row()
scrolltable.add("Horseman")
scrolltable.row()
scrolltable.add("Archer")
scrolltable.row()
scrolltable.add("Catapult")
scrolltable.row()
scrolltable.add("Wardog")
scrolltable.row()
scrolltable.add("Elephant")
scrolltable.row()
scrolltable.add("Dragon")
container.setWidth(200)
container.setHeight(120)
stage.addActor(container)
}
override def exit():Unit = {
stage.getRoot.clear()
inputMultiplexer.removeProcessor(clickThing.processor)
}
}
| jappeace/gdx_ai_gamespace | core/src/main/scala/nl/jappieklooster/gdx/mapstare/states/BuildState.scala | Scala | gpl-3.0 | 3,155 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.registration.returns
import config.{AuthClientConnector, BaseControllerComponents, FrontendAppConfig}
import controllers.BaseController
import forms.ReceiveGoodsNipForm
import models.api.{NETP, NonUkNonEstablished}
import models.{ConditionalValue, NIPCompliance, TransferOfAGoingConcern}
import play.api.mvc.{Action, AnyContent}
import services.{SessionService, _}
import views.html.returns.ReceiveGoodsNip
import javax.inject.Inject
import scala.concurrent.{ExecutionContext, Future}
class ReceiveGoodsNipController @Inject()(val sessionService: SessionService,
val authConnector: AuthClientConnector,
val applicantDetailsService: ApplicantDetailsService,
val returnsService: ReturnsService,
val vatRegistrationService: VatRegistrationService,
view: ReceiveGoodsNip)
(implicit appConfig: FrontendAppConfig,
val executionContext: ExecutionContext,
baseControllerComponents: BaseControllerComponents)
extends BaseController with SessionProfile {
def show: Action[AnyContent] = isAuthenticatedWithProfile() {
implicit request =>
implicit profile =>
returnsService.getReturns.map { returns =>
returns.northernIrelandProtocol match {
case Some(NIPCompliance(_, Some(ConditionalValue(receiveGoods, amount)))) => Ok(view(ReceiveGoodsNipForm.form.fill(receiveGoods, amount)))
case _ => Ok(view(ReceiveGoodsNipForm.form))
}
}
}
def submit: Action[AnyContent] = isAuthenticatedWithProfile() {
implicit request =>
implicit profile =>
ReceiveGoodsNipForm.form.bindFromRequest.fold(
badForm => Future.successful(BadRequest(view(badForm))),
successForm => {
val (receiveGoods, amount) = successForm
for {
returns <- returnsService.getReturns
updatedReturns = returns.copy(
northernIrelandProtocol = Some(NIPCompliance(returns.northernIrelandProtocol.flatMap(_.goodsToEU), Some(ConditionalValue(receiveGoods,amount))))
)
_ <- returnsService.submitReturns(updatedReturns)
regReason <- vatRegistrationService.getEligibilitySubmissionData.map(_.registrationReason)
partyType <- vatRegistrationService.partyType
} yield (partyType, regReason) match {
case (NETP | NonUkNonEstablished, _) | (_, TransferOfAGoingConcern) =>
Redirect(controllers.registration.returns.routes.ReturnsController.returnsFrequencyPage)
case _ =>
Redirect(routes.VatRegStartDateResolverController.resolve)
}
}
)
}
}
| hmrc/vat-registration-frontend | app/controllers/registration/returns/ReceiveGoodsNipController.scala | Scala | apache-2.0 | 3,591 |
package com.wavesplatform.transaction.lease
import com.wavesplatform.account.{AddressScheme, KeyPair, PrivateKey, PublicKey}
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.crypto
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.transaction._
import com.wavesplatform.transaction.serialization.impl.LeaseCancelTxSerializer
import com.wavesplatform.transaction.validation.TxValidator
import com.wavesplatform.transaction.validation.impl.LeaseCancelTxValidator
import monix.eval.Coeval
import play.api.libs.json.JsObject
import scala.util.Try
final case class LeaseCancelTransaction(
version: TxVersion,
sender: PublicKey,
leaseId: ByteStr,
fee: TxAmount,
timestamp: TxTimestamp,
proofs: Proofs,
chainId: Byte
) extends SigProofsSwitch
with VersionedTransaction
with TxWithFee.InWaves
with FastHashId
with LegacyPBSwitch.V3 {
override def builder: TransactionParser = LeaseCancelTransaction
override val bodyBytes: Coeval[Array[TxVersion]] = Coeval.evalOnce(LeaseCancelTxSerializer.bodyBytes(this))
override val bytes: Coeval[Array[TxVersion]] = Coeval.evalOnce(LeaseCancelTxSerializer.toBytes(this))
override val json: Coeval[JsObject] = Coeval.evalOnce(LeaseCancelTxSerializer.toJson(this))
}
object LeaseCancelTransaction extends TransactionParser {
type TransactionT = LeaseCancelTransaction
val supportedVersions: Set[TxVersion] = Set(1, 2, 3)
val typeId: TxType = 9: Byte
implicit val validator: TxValidator[LeaseCancelTransaction] = LeaseCancelTxValidator
implicit def sign(tx: LeaseCancelTransaction, privateKey: PrivateKey): LeaseCancelTransaction =
tx.copy(proofs = Proofs(crypto.sign(privateKey, tx.bodyBytes())))
override def parseBytes(bytes: Array[Byte]): Try[LeaseCancelTransaction] =
LeaseCancelTxSerializer.parseBytes(bytes)
def create(
version: TxVersion,
sender: PublicKey,
leaseId: ByteStr,
fee: TxAmount,
timestamp: TxTimestamp,
proofs: Proofs,
chainId: Byte = AddressScheme.current.chainId
): Either[ValidationError, TransactionT] =
LeaseCancelTransaction(version, sender, leaseId, fee, timestamp, proofs, chainId).validatedEither
def signed(
version: TxVersion,
sender: PublicKey,
leaseId: ByteStr,
fee: TxAmount,
timestamp: TxTimestamp,
signer: PrivateKey,
chainId: Byte = AddressScheme.current.chainId
): Either[ValidationError, TransactionT] =
create(version, sender, leaseId, fee, timestamp, Nil, chainId).map(_.signWith(signer))
def selfSigned(
version: TxVersion,
sender: KeyPair,
leaseId: ByteStr,
fee: TxAmount,
timestamp: TxTimestamp,
chainId: Byte = AddressScheme.current.chainId
): Either[ValidationError, TransactionT] =
signed(version, sender.publicKey, leaseId, fee, timestamp, sender.privateKey, chainId).map(_.signWith(sender.privateKey))
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/transaction/lease/LeaseCancelTransaction.scala | Scala | mit | 2,995 |
package isabelle.eclipse.launch
import java.net.{MalformedURLException, URL}
import org.eclipse.jface.resource.ImageDescriptor
/**
* Isabelle launch image definitions.
*
* When images are used in label providers (e.g. where Image) is required, they must be disposed manually.
* For convenience, [[org.eclipse.jface.resource.ResourceManager]] could be used.
*
* @author Andrius Velykis
*/
object IsabelleLaunchImages {
private lazy val ICON_BASE_URL = IsabelleLaunchPlugin.plugin.getBundle.getEntry("icons/")
val MISSING_ICON = ImageDescriptor.getMissingImageDescriptor
lazy val TAB_MAIN = create("main_tab.gif")
lazy val TAB_INSTALLATION = create("isabelle.png")
lazy val TAB_SESSION_DIRS = create("session-dirs.gif")
lazy val TAB_BUILD = create("build_tab.gif")
lazy val JOB_BUILD = create("isabelle.png")
lazy val SESSION = create("logic_obj.gif")
private def create(iconPath: String) = {
try {
val url = new URL(ICON_BASE_URL, iconPath)
ImageDescriptor.createFromURL(url)
} catch {
case _: MalformedURLException => MISSING_ICON
}
}
}
| andriusvelykis/isabelle-eclipse | isabelle.eclipse.launch/src/isabelle/eclipse/launch/IsabelleLaunchImages.scala | Scala | epl-1.0 | 1,107 |
package com.github.uchibori3.mfcloud.invoice.service
import akka.Done
import akka.actor.ActorSystem
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.model.{ HttpEntity, HttpRequest, HttpResponse, StatusCodes }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Sink }
import akka.stream.testkit.scaladsl.TestSink
import com.github.uchibori3.mfcloud.invoice.Error.MfcloudException
import com.github.uchibori3.mfcloud.invoice.response.BillResponse
import com.github.uchibori3.mfcloud.invoice.HttpClient
import com.github.uchibori3.mfcloud.invoice.testkit.{ BillResponseFixtures, CreateBillRequestFixtures }
import io.circe.ParsingFailure
import io.circe.generic.auto._
import io.circe.syntax._
import org.scalamock.scalatest.MockFactory
import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec }
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.{ Await, ExecutionContextExecutor }
import scala.concurrent.duration.Duration
class BillsSpec extends FlatSpec with DiagrammedAssertions with BeforeAndAfterAll with MockFactory with ScalaFutures {
implicit val system: ActorSystem = ActorSystem("bills")
implicit val executor: ExecutionContextExecutor = system.dispatcher
implicit val materializer: ActorMaterializer = ActorMaterializer()
override protected def afterAll(): Unit = {
Await.result(system.terminate(), Duration.Inf)
}
private val token = "e60aa4e706e05aa73a3494f0b54f7b8d81e0542897708e2d64703c52cb40af02"
private val credential = OAuth2BearerToken(token)
private val host = "invoice.moneyforward.com"
private val maxConnections = 4
"#post" should "returns bill response" in {
val request = CreateBillRequestFixtures.build
val response = BillResponseFixtures.build
val httpEntity = HttpEntity(`application/json`, response.asJson.noSpaces)
val httpResponse = HttpResponse(StatusCodes.Created, Nil, httpEntity)
val httpClient = mock[HttpClient]
(httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))
val bills = new BillsImpl(host, httpClient, credential, maxConnections)
bills
.post(request)
.runWith(TestSink.probe[Either[Throwable, BillResponse]])
.requestNext(Right(response))
.expectComplete()
}
it should "returns failure when entity unmarshalled" in {
val request = CreateBillRequestFixtures.build
val httpEntity = HttpEntity(`application/json`, "{")
val httpResponse = HttpResponse(StatusCodes.Created, Nil, httpEntity)
val httpClient = mock[HttpClient]
(httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))
val bills = new BillsImpl(host, httpClient, credential, maxConnections)
val result = bills.post(request).runWith(Sink.head)
whenReady(result) { ex =>
assert(ex.isLeft)
assert(ex.left.get.isInstanceOf[ParsingFailure])
}
}
it should "returns MfcloudException when department id is not found" in {
val request = CreateBillRequestFixtures.build
val httpEntity = HttpEntity(
`application/json`,
"""
|{
| "code" : "404",
| "errors" : [
| {
| "message" : "存在しないIDが渡されました。"
| }
| ]
|}
""".stripMargin
)
val httpResponse = HttpResponse(StatusCodes.NotFound, Nil, httpEntity)
val httpClient = mock[HttpClient]
(httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))
val bills = new BillsImpl(host, httpClient, credential, maxConnections)
val result = bills.post(request).runWith(Sink.head)
whenReady(result) { ex =>
assert(ex.isLeft)
assert(ex.left.get.isInstanceOf[MfcloudException])
}
}
"#getPdf" should "returns http response" in {
val billId = "ID"
val httpResponse = HttpResponse()
val httpClient = mock[HttpClient]
(httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))
val bills = new BillsImpl(host, httpClient, credential, maxConnections)
bills
.getPdf(billId)
.runWith(TestSink.probe[Either[Throwable, HttpResponse]])
.requestNext(Right(httpResponse))
.expectComplete()
}
it should "returns MfcloudException when not found" in {
val httpEntity = HttpEntity(
`application/json`,
"""
|{
| "code" : "404",
| "errors" : [
| {
| "message" : "存在しないIDが渡されました。"
| }
| ]
|}
""".stripMargin
)
val httpResponse = HttpResponse(StatusCodes.NotFound, Nil, httpEntity)
val httpClient = mock[HttpClient]
(httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))
val bills = new BillsImpl(host, httpClient, credential, maxConnections)
val result = bills.getPdf("ID").runWith(Sink.head)
whenReady(result) { ex =>
assert(ex.isLeft)
assert(ex.left.get.isInstanceOf[MfcloudException])
}
}
"#delete" should "returns empty response" in {
val billId = "ID"
val httpResponse = HttpResponse(StatusCodes.NoContent)
val httpClient = mock[HttpClient]
(httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))
val bills = new BillsImpl(host, httpClient, credential, maxConnections)
bills
.delete(billId)
.runWith(TestSink.probe[Either[Throwable, Done]])
.requestNext(Right(Done))
.expectComplete()
}
it should "returns MfcloudException when not found" in {
val httpEntity = HttpEntity(
`application/json`,
"""
|{
| "code" : "404",
| "errors" : [
| {
| "message" : "存在しないIDが渡されました。"
| }
| ]
|}
""".stripMargin
)
val httpResponse = HttpResponse(StatusCodes.NotFound, Nil, httpEntity)
val httpClient = mock[HttpClient]
(httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))
val bills = new BillsImpl(host, httpClient, credential, maxConnections)
val result = bills.delete("ID").runWith(Sink.head)
whenReady(result) { ex =>
assert(ex.isLeft)
assert(ex.left.get.isInstanceOf[MfcloudException])
}
}
}
| Uchibori3/mfcloud-invoice-scala | src/test/scala/com/github/uchibori3/mfcloud/invoice/service/BillsSpec.scala | Scala | apache-2.0 | 6,629 |
package com.gilt.gfc.concurrent
import java.util.concurrent.{ExecutorService => JExecutorService, ScheduledExecutorService => JScheduledExecutorService}
import com.gilt.gfc.concurrent.{ScheduledExecutorService => GScheduledExecutorService}
import org.scalatest.mockito.{MockitoSugar => ScalaTestMockitoSugar}
import org.scalatest.{WordSpecLike, Matchers => ScalaTestMatchers}
class JavaConversionsSpec extends WordSpecLike
with ScalaTestMatchers
with ScalaTestMockitoSugar {
"When converting java `ScheduledExecutorService`, and JavaConversions is imported" must {
"compile" in {
import JavaConversions._
val mockJavaSchExecService = mock[JScheduledExecutorService]
val serviceUnderTest: GScheduledExecutorService = mockJavaSchExecService
}
}
"When converting java `ExecutorService`, and JavaConversions is imported" must {
"compile" in {
import JavaConversions._
val mockJavaService = mock[JExecutorService]
val serviceUnderTest: ExecutorService = mockJavaService
}
}
}
| gilt/gfc-concurrent | src/test/scala/com/gilt/gfc/concurrent/JavaConversionsSpec.scala | Scala | apache-2.0 | 1,047 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.commons.math3.stat.inference.ChiSquareTest
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
class ConfigBehaviorSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("SPARK-22160 spark.sql.execution.rangeExchange.sampleSizePerPartition") {
// In this test, we run a sort and compute the histogram for partition size post shuffle.
// With a high sample count, the partition size should be more evenly distributed, and has a
// low chi-sq test value.
// Also the whole code path for range partitioning as implemented should be deterministic
// (it uses the partition id as the seed), so this test shouldn't be flaky.
val numPartitions = 4
def computeChiSquareTest(): Double = {
val n = 10000
// Trigger a sort
val data = spark.range(0, n, 1, 1).sort('id)
.selectExpr("SPARK_PARTITION_ID() pid", "id").as[(Int, Long)].collect()
// Compute histogram for the number of records per partition post sort
val dist = data.groupBy(_._1).map(_._2.length.toLong).toArray
assert(dist.length == 4)
new ChiSquareTest().chiSquare(
Array.fill(numPartitions) { n.toDouble / numPartitions },
dist)
}
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> numPartitions.toString) {
// The default chi-sq value should be low
assert(computeChiSquareTest() < 100)
withSQLConf(SQLConf.RANGE_EXCHANGE_SAMPLE_SIZE_PER_PARTITION.key -> "1") {
// If we only sample one point, the range boundaries will be pretty bad and the
// chi-sq value would be very high.
assert(computeChiSquareTest() > 300)
}
}
}
}
| akopich/spark | sql/core/src/test/scala/org/apache/spark/sql/ConfigBehaviorSuite.scala | Scala | apache-2.0 | 2,557 |
import sbt._
import Keys._
object HW3Build extends Build {
lazy val root = Project(id = "hw3",
base = file("."))
}
| mpgarate/ProgLang-Assignments | HW3/project/Build.scala | Scala | mit | 145 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import scala.annotation.meta.param
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import scala.language.reflectiveCalls
import scala.util.control.NonFatal
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.shuffle.{FetchFailedException, MetadataFetchFailedException}
import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster}
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, Utils}
class DAGSchedulerEventProcessLoopTester(dagScheduler: DAGScheduler)
extends DAGSchedulerEventProcessLoop(dagScheduler) {
override def post(event: DAGSchedulerEvent): Unit = {
try {
// Forward event to `onReceive` directly to avoid processing event asynchronously.
onReceive(event)
} catch {
case NonFatal(e) => onError(e)
}
}
override def onError(e: Throwable): Unit = {
logError("Error in DAGSchedulerEventLoop: ", e)
dagScheduler.stop()
throw e
}
}
/**
* An RDD for passing to DAGScheduler. These RDDs will use the dependencies and
* preferredLocations (if any) that are passed to them. They are deliberately not executable
* so we can test that DAGScheduler does not try to execute RDDs locally.
*
* Optionally, one can pass in a list of locations to use as preferred locations for each task,
* and a MapOutputTrackerMaster to enable reduce task locality. We pass the tracker separately
* because, in this test suite, it won't be the same as sc.env.mapOutputTracker.
*/
class MyRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null)
extends RDD[(Int, Int)](sc, dependencies) with Serializable {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions: Array[Partition] = (0 until numPartitions).map(i => new Partition {
override def index: Int = i
}).toArray
override def getPreferredLocations(partition: Partition): Seq[String] = {
if (locations.isDefinedAt(partition.index)) {
locations(partition.index)
} else if (tracker != null && dependencies.size == 1 &&
dependencies(0).isInstanceOf[ShuffleDependency[_, _, _]]) {
// If we have only one shuffle dependency, use the same code path as ShuffledRDD for locality
val dep = dependencies(0).asInstanceOf[ShuffleDependency[_, _, _]]
tracker.getPreferredLocationsForShuffle(dep, partition.index)
} else {
Nil
}
}
override def toString: String = "DAGSchedulerSuiteRDD " + id
}
class DAGSchedulerSuiteDummyException extends Exception
class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLimits {
import DAGSchedulerSuite._
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
val conf = new SparkConf
/** Set of TaskSets the DAGScheduler has requested executed. */
val taskSets = scala.collection.mutable.Buffer[TaskSet]()
/** Stages for which the DAGScheduler has called TaskScheduler.cancelTasks(). */
val cancelledStages = new HashSet[Int]()
val taskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start() = {}
override def stop() = {}
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId): Boolean = true
override def submitTasks(taskSet: TaskSet) = {
// normally done by TaskSetManager
taskSet.tasks.foreach(_.epoch = mapOutputTracker.getEpoch)
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean) {
cancelledStages += stageId
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {}
override def setDAGScheduler(dagScheduler: DAGScheduler) = {}
override def defaultParallelism() = 2
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
/** Length of time to wait while draining listener events. */
val WAIT_TIMEOUT_MILLIS = 10000
val sparkListener = new SparkListener() {
val submittedStageInfos = new HashSet[StageInfo]
val successfulStages = new HashSet[Int]
val failedStages = new ArrayBuffer[Int]
val stageByOrderOfExecution = new ArrayBuffer[Int]
val endedTasks = new HashSet[Long]
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted) {
submittedStageInfos += stageSubmitted.stageInfo
}
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted) {
val stageInfo = stageCompleted.stageInfo
stageByOrderOfExecution += stageInfo.stageId
if (stageInfo.failureReason.isEmpty) {
successfulStages += stageInfo.stageId
} else {
failedStages += stageInfo.stageId
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
endedTasks += taskEnd.taskInfo.taskId
}
}
var mapOutputTracker: MapOutputTrackerMaster = null
var broadcastManager: BroadcastManager = null
var securityMgr: SecurityManager = null
var scheduler: DAGScheduler = null
var dagEventProcessLoopTester: DAGSchedulerEventProcessLoop = null
/**
* Set of cache locations to return from our mock BlockManagerMaster.
* Keys are (rdd ID, partition ID). Anything not present will return an empty
* list of cache locations silently.
*/
val cacheLocations = new HashMap[(Int, Int), Seq[BlockManagerId]]
// stub out BlockManagerMaster.getLocations to use our cacheLocations
val blockManagerMaster = new BlockManagerMaster(null, conf, true) {
override def getLocations(blockIds: Array[BlockId]): IndexedSeq[Seq[BlockManagerId]] = {
blockIds.map {
_.asRDDId.map(id => (id.rddId -> id.splitIndex)).flatMap(key => cacheLocations.get(key)).
getOrElse(Seq())
}.toIndexedSeq
}
override def removeExecutor(execId: String) {
// don't need to propagate to the driver, which we don't have
}
}
/** The list of results that DAGScheduler has collected. */
val results = new HashMap[Int, Any]()
var failure: Exception = _
val jobListener = new JobListener() {
override def taskSucceeded(index: Int, result: Any) = results.put(index, result)
override def jobFailed(exception: Exception) = { failure = exception }
}
/** A simple helper class for creating custom JobListeners */
class SimpleListener extends JobListener {
val results = new HashMap[Int, Any]
var failure: Exception = null
override def taskSucceeded(index: Int, result: Any): Unit = results.put(index, result)
override def jobFailed(exception: Exception): Unit = { failure = exception }
}
override def beforeEach(): Unit = {
super.beforeEach()
init(new SparkConf())
}
private def init(testConf: SparkConf): Unit = {
sc = new SparkContext("local", "DAGSchedulerSuite", testConf)
sparkListener.submittedStageInfos.clear()
sparkListener.successfulStages.clear()
sparkListener.failedStages.clear()
sparkListener.endedTasks.clear()
failure = null
sc.addSparkListener(sparkListener)
taskSets.clear()
cancelledStages.clear()
cacheLocations.clear()
results.clear()
securityMgr = new SecurityManager(conf)
broadcastManager = new BroadcastManager(true, conf, securityMgr)
mapOutputTracker = new MapOutputTrackerMaster(conf, broadcastManager, true) {
override def sendTracker(message: Any): Unit = {
// no-op, just so we can stop this to avoid leaking threads
}
}
scheduler = new DAGScheduler(
sc,
taskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(scheduler)
}
override def afterEach(): Unit = {
try {
scheduler.stop()
dagEventProcessLoopTester.stop()
mapOutputTracker.stop()
broadcastManager.stop()
} finally {
super.afterEach()
}
}
override def afterAll() {
super.afterAll()
}
/**
* Type of RDD we use for testing. Note that we should never call the real RDD compute methods.
* This is a pair RDD type so it can always be used in ShuffleDependencies.
*/
type PairOfIntsRDD = RDD[(Int, Int)]
/**
* Process the supplied event as if it were the top of the DAGScheduler event queue, expecting
* the scheduler not to exit.
*
* After processing the event, submit waiting stages as is done on most iterations of the
* DAGScheduler event loop.
*/
private def runEvent(event: DAGSchedulerEvent) {
dagEventProcessLoopTester.post(event)
}
/**
* When we submit dummy Jobs, this is the compute function we supply. Except in a local test
* below, we do not expect this function to ever be executed; instead, we will return results
* directly through CompletionEvents.
*/
private val jobComputeFunc = (context: TaskContext, it: Iterator[(_)]) =>
it.next.asInstanceOf[Tuple2[_, _]]._1
/** Send the given CompletionEvent messages for the tasks in the TaskSet. */
private def complete(taskSet: TaskSet, results: Seq[(TaskEndReason, Any)]) {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(taskSet.tasks(i), result._1, result._2))
}
}
}
private def completeWithAccumulator(
accumId: Long,
taskSet: TaskSet,
results: Seq[(TaskEndReason, Any)]) {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(
taskSet.tasks(i),
result._1,
result._2,
Seq(AccumulatorSuite.createLongAccum("", initValue = 1, id = accumId))))
}
}
}
/** Submits a job to the scheduler and returns the job id. */
private def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc,
listener: JobListener = jobListener,
properties: Properties = null): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, func, partitions, CallSite("", ""), listener, properties))
jobId
}
/** Submits a map stage to the scheduler and returns the job id. */
private def submitMapStage(
shuffleDep: ShuffleDependency[_, _, _],
listener: JobListener = jobListener): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(MapStageSubmitted(jobId, shuffleDep, CallSite("", ""), listener))
jobId
}
/** Sends TaskSetFailed to the scheduler. */
private def failed(taskSet: TaskSet, message: String) {
runEvent(TaskSetFailed(taskSet, message, None))
}
/** Sends JobCancelled to the DAG scheduler. */
private def cancel(jobId: Int) {
runEvent(JobCancelled(jobId, None))
}
test("[SPARK-3353] parent stage should have lower stage id") {
sparkListener.stageByOrderOfExecution.clear()
sc.parallelize(1 to 10).map(x => (x, x)).reduceByKey(_ + _, 4).count()
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.stageByOrderOfExecution.length === 2)
assert(sparkListener.stageByOrderOfExecution(0) < sparkListener.stageByOrderOfExecution(1))
}
/**
* This test ensures that DAGScheduler build stage graph correctly.
*
* Suppose you have the following DAG:
*
* [A] <--(s_A)-- [B] <--(s_B)-- [C] <--(s_C)-- [D]
* \\ /
* <-------------
*
* Here, RDD B has a shuffle dependency on RDD A, and RDD C has shuffle dependency on both
* B and A. The shuffle dependency IDs are numbers in the DAGScheduler, but to make the example
* easier to understand, let's call the shuffled data from A shuffle dependency ID s_A and the
* shuffled data from B shuffle dependency ID s_B.
*
* Note: [] means an RDD, () means a shuffle dependency.
*/
test("[SPARK-13902] Ensure no duplicate stages are created") {
val rddA = new MyRDD(sc, 1, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val s_A = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 1, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val s_B = shuffleDepB.shuffleId
val rddC = new MyRDD(sc, 1, List(shuffleDepA, shuffleDepB), tracker = mapOutputTracker)
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val s_C = shuffleDepC.shuffleId
val rddD = new MyRDD(sc, 1, List(shuffleDepC), tracker = mapOutputTracker)
submit(rddD, Array(0))
assert(scheduler.shuffleIdToMapStage.size === 3)
assert(scheduler.activeJobs.size === 1)
val mapStageA = scheduler.shuffleIdToMapStage(s_A)
val mapStageB = scheduler.shuffleIdToMapStage(s_B)
val mapStageC = scheduler.shuffleIdToMapStage(s_C)
val finalStage = scheduler.activeJobs.head.finalStage
assert(mapStageA.parents.isEmpty)
assert(mapStageB.parents === List(mapStageA))
assert(mapStageC.parents === List(mapStageA, mapStageB))
assert(finalStage.parents === List(mapStageC))
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(3), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("All shuffle files on the slave should be cleaned up when slave lost") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set("spark.shuffle.service.enabled", "true")
conf.set("spark.files.fetchFailure.unRegisterOutputOnHost", "true")
init(conf)
runEvent(ExecutorAdded("exec-hostA1", "hostA"))
runEvent(ExecutorAdded("exec-hostA2", "hostA"))
runEvent(ExecutorAdded("exec-hostB", "hostB"))
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(3))
val secondShuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// map stage1 completes successfully, with one task on each executor
complete(taskSets(0), Seq(
(Success,
MapStatus(BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), 1)),
(Success,
MapStatus(BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), 1)),
(Success, makeMapStatus("hostB", 1))
))
// map stage2 completes successfully, with one task on each executor
complete(taskSets(1), Seq(
(Success,
MapStatus(BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), 1)),
(Success,
MapStatus(BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), 1)),
(Success, makeMapStatus("hostB", 1))
))
// make sure our test setup is correct
val initialMapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus1.count(_ != null) === 3)
assert(initialMapStatus1.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
val initialMapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus2.count(_ != null) === 3)
assert(initialMapStatus2.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
// reduce stage fails with a fetch failure from one host
complete(taskSets(2), Seq(
(FetchFailed(BlockManagerId("exec-hostA2", "hostA", 12345), firstShuffleId, 0, 0, "ignored"),
null)
))
// Here is the main assertion -- make sure that we de-register
// the map outputs for both map stage from both executors on hostA
val mapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
assert(mapStatus1.count(_ != null) === 1)
assert(mapStatus1(2).location.executorId === "exec-hostB")
assert(mapStatus1(2).location.host === "hostB")
val mapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
assert(mapStatus2.count(_ != null) === 1)
assert(mapStatus2(2).location.executorId === "exec-hostB")
assert(mapStatus2(2).location.host === "hostB")
}
test("zero split job") {
var numResults = 0
var failureReason: Option[Exception] = None
val fakeListener = new JobListener() {
override def taskSucceeded(partition: Int, value: Any): Unit = numResults += 1
override def jobFailed(exception: Exception): Unit = {
failureReason = Some(exception)
}
}
val jobId = submit(new MyRDD(sc, 0, Nil), Array(), listener = fakeListener)
assert(numResults === 0)
cancel(jobId)
assert(failureReason.isDefined)
assert(failureReason.get.getMessage() === "Job 0 cancelled ")
}
test("run trivial job") {
submit(new MyRDD(sc, 1, Nil), Array(0))
complete(taskSets(0), List((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial job w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil)
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0))
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("equals and hashCode AccumulableInfo") {
val accInfo1 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = true, countFailedValues = false)
val accInfo2 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
val accInfo3 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
assert(accInfo1 !== accInfo2)
assert(accInfo2 === accInfo3)
assert(accInfo2.hashCode() === accInfo3.hashCode())
}
test("cache location preferences w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil).cache()
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
cacheLocations(baseRdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(finalRdd, Array(0))
val taskSet = taskSets(0)
assertLocations(taskSet, Seq(Seq("hostA", "hostB")))
complete(taskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("regression test for getCacheLocs") {
val rdd = new MyRDD(sc, 3, Nil).cache()
cacheLocations(rdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
cacheLocations(rdd.id -> 1) =
Seq(makeBlockManagerId("hostB"), makeBlockManagerId("hostC"))
cacheLocations(rdd.id -> 2) =
Seq(makeBlockManagerId("hostC"), makeBlockManagerId("hostD"))
val locs = scheduler.getCacheLocs(rdd).map(_.map(_.host))
assert(locs === Seq(Seq("hostA", "hostB"), Seq("hostB", "hostC"), Seq("hostC", "hostD")))
}
/**
* This test ensures that if a particular RDD is cached, RDDs earlier in the dependency chain
* are not computed. It constructs the following chain of dependencies:
* +---+ shuffle +---+ +---+ +---+
* | A |<--------| B |<---| C |<---| D |
* +---+ +---+ +---+ +---+
* Here, B is derived from A by performing a shuffle, C has a one-to-one dependency on B,
* and D similarly has a one-to-one dependency on C. If none of the RDDs were cached, this
* set of RDDs would result in a two stage job: one ShuffleMapStage, and a ResultStage that
* reads the shuffled data from RDD A. This test ensures that if C is cached, the scheduler
* doesn't perform a shuffle, and instead computes the result using a single ResultStage
* that reads C's cached data.
*/
test("getMissingParentStages should consider all ancestor RDDs' cache statuses") {
val rddA = new MyRDD(sc, 1, Nil)
val rddB = new MyRDD(sc, 1, List(new ShuffleDependency(rddA, new HashPartitioner(1))),
tracker = mapOutputTracker)
val rddC = new MyRDD(sc, 1, List(new OneToOneDependency(rddB))).cache()
val rddD = new MyRDD(sc, 1, List(new OneToOneDependency(rddC)))
cacheLocations(rddC.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(rddD, Array(0))
assert(scheduler.runningStages.size === 1)
// Make sure that the scheduler is running the final result stage.
// Because C is cached, the shuffle map stage to compute A does not need to be run.
assert(scheduler.runningStages.head.isInstanceOf[ResultStage])
}
test("avoid exponential blowup when getting preferred locs list") {
// Build up a complex dependency graph with repeated zip operations, without preferred locations
var rdd: RDD[_] = new MyRDD(sc, 1, Nil)
(1 to 30).foreach(_ => rdd = rdd.zip(rdd))
// getPreferredLocs runs quickly, indicating that exponential graph traversal is avoided.
failAfter(10 seconds) {
val preferredLocs = scheduler.getPreferredLocs(rdd, 0)
// No preferred locations are returned.
assert(preferredLocs.length === 0)
}
}
test("unserializable task") {
val unserializableRdd = new MyRDD(sc, 1, Nil) {
class UnserializableClass
val unserializable = new UnserializableClass
}
submit(unserializableRdd, Array(0))
assert(failure.getMessage.startsWith(
"Job aborted due to stage failure: Task not serializable:"))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty()
}
test("trivial job failure") {
submit(new MyRDD(sc, 1, Nil), Array(0))
failed(taskSets(0), "some failure")
assert(failure.getMessage === "Job aborted due to stage failure: some failure")
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty()
}
test("trivial job cancellation") {
val rdd = new MyRDD(sc, 1, Nil)
val jobId = submit(rdd, Array(0))
cancel(jobId)
assert(failure.getMessage === s"Job $jobId cancelled ")
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty()
}
test("job cancellation no-kill backend") {
// make sure that the DAGScheduler doesn't crash when the TaskScheduler
// doesn't implement killTask()
val noKillTaskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean) {
throw new UnsupportedOperationException
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = {
throw new UnsupportedOperationException
}
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {
throw new UnsupportedOperationException
}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId): Boolean = true
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
val noKillScheduler = new DAGScheduler(
sc,
noKillTaskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(noKillScheduler)
val jobId = submit(new MyRDD(sc, 1, Nil), Array(0))
cancel(jobId)
// Because the job wasn't actually cancelled, we shouldn't have received a failure message.
assert(failure === null)
// When the task set completes normally, state should be correctly updated.
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.isEmpty)
assert(sparkListener.successfulStages.contains(0))
}
test("run trivial shuffle") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
complete(taskSets(1), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial shuffle with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// the 2nd ResultTask failed
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"), null)))
// this will get called
// blockManagerMaster.removeExecutor("exec-hostA")
// ask the scheduler to try it again
scheduler.resubmitFailedStages()
// have the 2nd attempt pass
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
// we can see both result blocks now
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
private val shuffleFileLossTests = Seq(
("slave lost with shuffle service", SlaveLost("", false), true, false),
("worker lost with shuffle service", SlaveLost("", true), true, true),
("worker lost without shuffle service", SlaveLost("", true), false, true),
("executor failure with shuffle service", ExecutorKilled, true, false),
("executor failure without shuffle service", ExecutorKilled, false, true))
for ((eventDescription, event, shuffleServiceOn, expectFileLoss) <- shuffleFileLossTests) {
val maybeLost = if (expectFileLoss) {
"lost"
} else {
"not lost"
}
test(s"shuffle files $maybeLost when $eventDescription") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set("spark.shuffle.service.enabled", shuffleServiceOn.toString)
init(conf)
assert(sc.env.blockManager.externalShuffleServiceEnabled == shuffleServiceOn)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
runEvent(ExecutorLost("exec-hostA", event))
if (expectFileLoss) {
intercept[MetadataFetchFailedException] {
mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
}
} else {
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
}
}
}
// Helper function to validate state when creating tests for task failures
private def checkStageId(stageId: Int, attempt: Int, stageAttempt: TaskSet) {
assert(stageAttempt.stageId === stageId)
assert(stageAttempt.stageAttemptId == attempt)
}
// Helper functions to extract commonly used code in Fetch Failure test cases
private def setupStageAbortTest(sc: SparkContext) {
sc.listenerBus.addToSharedQueue(new EndListener())
ended = false
jobResult = null
}
// Create a new Listener to confirm that the listenerBus sees the JobEnd message
// when we abort the stage. This message will also be consumed by the EventLoggingListener
// so this will propagate up to the user.
var ended = false
var jobResult : JobResult = null
class EndListener extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
jobResult = jobEnd.jobResult
ended = true
}
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* successfully.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param numShufflePartitions - The number of partitions in the next stage
*/
private def completeShuffleMapStageSuccessfully(
stageId: Int,
attemptIdx: Int,
numShufflePartitions: Int): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map {
case (task, idx) =>
(Success, makeMapStatus("host" + ('A' + idx).toChar, numShufflePartitions))
}.toSeq)
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* with all FetchFailure.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param shuffleDep - The shuffle dependency of the stage with a fetch failure
*/
private def completeNextStageWithFetchFailure(
stageId: Int,
attemptIdx: Int,
shuffleDep: ShuffleDependency[_, _, _]): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(FetchFailed(makeBlockManagerId("hostA"), shuffleDep.shuffleId, 0, idx, "ignored"), null)
}.toSeq)
}
/**
* Common code to get the next result stage attempt, confirm it's the one we expect, and
* complete it with a success where we return 42.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
*/
private def completeNextResultStageWithSuccess(
stageId: Int,
attemptIdx: Int,
partitionToResult: Int => Int = _ => 42): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
assert(scheduler.stageIdToStage(stageId).isInstanceOf[ResultStage])
val taskResults = stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(Success, partitionToResult(idx))
}
complete(stageAttempt, taskResults.toSeq)
}
/**
* In this test, we simulate a job where many tasks in the same stage fail. We want to show
* that many fetch failures inside a single stage attempt do not trigger an abort
* on their own, but only when there are enough failing stage attempts.
*/
test("Single stage fetch failure should not abort the stage.") {
setupStageAbortTest(sc)
val parts = 8
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
completeShuffleMapStageSuccessfully(0, 0, numShufflePartitions = parts)
completeNextStageWithFetchFailure(1, 0, shuffleDep)
// Resubmit and confirm that now all is well
scheduler.resubmitFailedStages()
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Complete stage 0 and then stage 1 with a "42"
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = parts)
completeNextResultStageWithSuccess(1, 1)
// Confirm job finished successfully
sc.listenerBus.waitUntilEmpty(1000)
assert(ended === true)
assert(results === (0 until parts).map { idx => idx -> 42 }.toMap)
assertDataStructuresEmpty()
}
/**
* In this test we simulate a job failure where the first stage completes successfully and
* the second stage fails due to a fetch failure. Multiple successive fetch failures of a stage
* trigger an overall job abort to avoid endless retries.
*/
test("Multiple consecutive stage fetch failures should lead to job being aborted.") {
setupStageAbortTest(sc)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDep)
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
if (attempt < scheduler.maxConsecutiveStageAttempts - 1) {
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
} else {
// Stage should have been aborted and removed from running stages
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty(1000)
assert(ended)
jobResult match {
case JobFailed(reason) =>
assert(reason.getMessage.contains("ResultStage 1 () has failed the maximum"))
case other => fail(s"expected JobFailed, not $other")
}
}
}
}
/**
* In this test, we create a job with two consecutive shuffles, and simulate 2 failures for each
* shuffle fetch. In total In total, the job has had four failures overall but not four failures
* for a particular stage, and as such should not be aborted.
*/
test("Failures in different stages should not trigger an overall abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// In the first two iterations, Stage 0 succeeds and stage 1 fails. In the next two iterations,
// stage 2 fails.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
if (attempt < scheduler.maxConsecutiveStageAttempts / 2) {
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
} else {
completeShuffleMapStageSuccessfully(1, attempt, numShufflePartitions = 1)
// Fail stage 2
completeNextStageWithFetchFailure(2,
attempt - scheduler.maxConsecutiveStageAttempts / 2, shuffleDepTwo)
}
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
}
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 4, numShufflePartitions = 1)
// Succeed stage2 with a "42"
completeNextResultStageWithSuccess(2, scheduler.maxConsecutiveStageAttempts / 2)
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
/**
* In this test we demonstrate that only consecutive failures trigger a stage abort. A stage may
* fail multiple times, succeed, then fail a few more times (because its run again by downstream
* dependencies). The total number of failed attempts for one stage will go over the limit,
* but that doesn't matter, since they have successes in the middle.
*/
test("Non-consecutive stage failures don't trigger abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// First, execute stages 0 and 1, failing stage 1 up to MAX-1 times.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts - 1) {
// Make each task in stage 0 success
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
}
// Rerun stage 0 and 1 to step through the task set
completeShuffleMapStageSuccessfully(0, 3, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 3, numShufflePartitions = 1)
// Fail stage 2 so that stage 1 is resubmitted when we call scheduler.resubmitFailedStages()
completeNextStageWithFetchFailure(2, 0, shuffleDepTwo)
scheduler.resubmitFailedStages()
// Rerun stage 0 to step through the task set
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
// Now again, fail stage 1 (up to MAX_FAILURES) but confirm that this doesn't trigger an abort
// since we succeeded in between.
completeNextStageWithFetchFailure(1, 4, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Next, succeed all and confirm output
// Rerun stage 0 + 1
completeShuffleMapStageSuccessfully(0, 5, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 5, numShufflePartitions = 1)
// Succeed stage 2 and verify results
completeNextResultStageWithSuccess(2, 1)
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty(1000)
assert(ended === true)
assert(results === Map(0 -> 42))
}
test("trivial shuffle with multiple fetch failures") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(1))
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 1, 1, "ignored"),
null))
// The SparkListener should not receive redundant failure events.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.size == 1)
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 1)
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assertDataStructuresEmpty()
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(1)))
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 0)
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assertDataStructuresEmpty()
}
/**
* This tests the case where another FetchFailed comes in while the map stage is getting
* re-run.
*/
test("late fetch failures don't cause multiple concurrent attempts for the same map stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 1)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 1).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(1))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1, 1, "ignored"),
null))
// Another ResubmitFailedStages event should not result in another attempt for the map
// stage being run concurrently.
// NOTE: the actual ResubmitFailedStages may get called at any time during this, but it
// shouldn't effect anything -- our calling it just makes *SURE* it gets called between the
// desired event and our check.
runEvent(ResubmitFailedStages)
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 2)
}
/**
* This tests the case where a late FetchFailed comes in after the map stage has finished getting
* retried and a new reduce stage starts running.
*/
test("extremely late fetch failures don't cause multiple concurrent attempts for " +
"the same stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
def countSubmittedReduceStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 1)
}
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 0)
}
// The map stage should have been submitted.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 1)
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The reduce stage should have been submitted.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedReduceStageAttempts() === 1)
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null))
// Trigger resubmission of the failed map stage and finish the re-started map task.
runEvent(ResubmitFailedStages)
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
// Because the map stage finished, another attempt for the reduce stage should have been
// submitted, resulting in 2 total attempts for each the map and the reduce stage.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 2)
assert(countSubmittedReduceStageAttempts() === 2)
// A late FetchFailed arrives from the second task in the original reduce stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1, 1, "ignored"),
null))
// Running ResubmitFailedStages shouldn't result in any more attempts for the map stage, because
// the FetchFailed should have been ignored
runEvent(ResubmitFailedStages)
// The FetchFailed from the original reduce stage should be ignored.
assert(countSubmittedMapStageAttempts() === 2)
}
test("task events always posted in speculation / when stage is killed") {
val baseRdd = new MyRDD(sc, 4, Nil)
val finalRdd = new MyRDD(sc, 4, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0, 1, 2, 3))
// complete two tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, 42,
Seq.empty, createFakeTaskInfoWithId(0)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1), Success, 42,
Seq.empty, createFakeTaskInfoWithId(1)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
assert(sparkListener.endedTasks.size == 2)
// finish other 2 tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, 42,
Seq.empty, createFakeTaskInfoWithId(2)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, createFakeTaskInfoWithId(3)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.endedTasks.size == 4)
// verify the stage is done
assert(!scheduler.stageIdToStage.contains(0))
// Stage should be complete. Finish one other Successful task to simulate what can happen
// with a speculative task and make sure the event is sent out
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, createFakeTaskInfoWithId(5)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.endedTasks.size == 5)
// make sure non successful tasks also send out event
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), UnknownReason, 42,
Seq.empty, createFakeTaskInfoWithId(6)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.endedTasks.size == 6)
}
test("ignore late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// pretend we were told hostA went away
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// now start completing some tasks in the shuffle map stage, under different hosts
// and epochs, and make sure scheduler updates its state correctly
val taskSet = taskSets(0)
val shuffleStage = scheduler.stageIdToStage(taskSet.stageId).asInstanceOf[ShuffleMapStage]
assert(shuffleStage.numAvailableOutputs === 0)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 0)
// should work because it's a non-failed host (so the available map outputs will increase)
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostB", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should work because it's a new epoch, which will increase the number of available map
// outputs, and also finish the stage
taskSet.tasks(1).epoch = newEpoch
runEvent(makeCompletionEvent(
taskSet.tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 2)
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostA")))
// finish the next stage normally, which completes the job
complete(taskSets(1), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
test("run shuffle with map stage failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// Fail the map stage. This should cause the entire job to fail.
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(failure.getMessage === s"Job aborted due to stage failure: $stageFailureMessage")
// Listener bus should get told about the map stage failing, but not the reduce stage
// (since the reduce stage hasn't been started yet).
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.toSet === Set(0))
assertDataStructuresEmpty()
}
/**
* Run two jobs, with a shared dependency. We simulate a fetch failure in the second job, which
* requires regenerating some outputs of the shared dependency. One key aspect of this test is
* that the second job actually uses a different stage for the shared dependency (a "skipped"
* stage).
*/
test("shuffle fetch failure in a reused shuffle dependency") {
// Run the first job successfully, which creates one shuffle dependency
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(0, 0, 2)
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42, 1 -> 42))
assertDataStructuresEmpty()
// submit another job w/ the shared dependency, and have a fetch failure
val reduce2 = new MyRDD(sc, 2, List(shuffleDep))
submit(reduce2, Array(0, 1))
// Note that the stage numbering here is only b/c the shared dependency produces a new, skipped
// stage. If instead it reused the existing stage, then this would be stage 2
completeNextStageWithFetchFailure(3, 0, shuffleDep)
scheduler.resubmitFailedStages()
// the scheduler now creates a new task set to regenerate the missing map output, but this time
// using a different stage, the "skipped" one
// SPARK-9809 -- this stage is submitted without a task for each partition (because some of
// the shuffle map output is still available from stage 0); make sure we've still got internal
// accumulators setup
assert(scheduler.stageIdToStage(2).latestInfo.taskMetrics != null)
completeShuffleMapStageSuccessfully(2, 0, 2)
completeNextResultStageWithSuccess(3, 1, idx => idx + 1234)
assert(results === Map(0 -> 1234, 1 -> 1235))
assertDataStructuresEmpty()
}
/**
* This test runs a three stage job, with a fetch failure in stage 1. but during the retry, we
* have completions from both the first & second attempt of stage 1. So all the map output is
* available before we finish any task set for stage 1. We want to make sure that we don't
* submit stage 2 until the map output for stage 1 is registered
*/
test("don't submit stage until its dependencies map outputs are registered (SPARK-5259)") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// things start out smoothly, stage 0 completes with no issues
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostA", shuffleMapRdd.partitions.length))
))
// then one executor dies, and a task fails in stage 1
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(null, firstShuffleId, 2, 0, "Fetch failed"),
null))
// so we resubmit stage 0, which completes happily
scheduler.resubmitFailedStages()
val stage0Resubmit = taskSets(2)
assert(stage0Resubmit.stageId == 0)
assert(stage0Resubmit.stageAttemptId === 1)
val task = stage0Resubmit.tasks(0)
assert(task.partitionId === 2)
runEvent(makeCompletionEvent(
task,
Success,
makeMapStatus("hostC", shuffleMapRdd.partitions.length)))
// now here is where things get tricky : we will now have a task set representing
// the second attempt for stage 1, but we *also* have some tasks for the first attempt for
// stage 1 still going
val stage1Resubmit = taskSets(3)
assert(stage1Resubmit.stageId == 1)
assert(stage1Resubmit.stageAttemptId === 1)
assert(stage1Resubmit.tasks.length === 3)
// we'll have some tasks finish from the first attempt, and some finish from the second attempt,
// so that we actually have all stage outputs, though no attempt has completed all its
// tasks
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(3).tasks(1),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
// late task finish from the first attempt
runEvent(makeCompletionEvent(
taskSets(1).tasks(2),
Success,
makeMapStatus("hostB", reduceRdd.partitions.length)))
// What should happen now is that we submit stage 2. However, we might not see an error
// b/c of DAGScheduler's error handling (it tends to swallow errors and just log them). But
// we can check some conditions.
// Note that the really important thing here is not so much that we submit stage 2 *immediately*
// but that we don't end up with some error from these interleaved completions. It would also
// be OK (though sub-optimal) if stage 2 simply waited until the resubmission of stage 1 had
// all its tasks complete
// check that we have all the map output for stage 0 (it should have been there even before
// the last round of completions from stage 1, but just to double check it hasn't been messed
// up) and also the newly available stage 1
val stageToReduceIdxs = Seq(
0 -> (0 until 3),
1 -> (0 until 1)
)
for {
(stage, reduceIdxs) <- stageToReduceIdxs
reduceIdx <- reduceIdxs
} {
// this would throw an exception if the map status hadn't been registered
val statuses = mapOutputTracker.getMapSizesByExecutorId(stage, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 2 has been submitted
assert(taskSets.size == 5)
val stage2TaskSet = taskSets(4)
assert(stage2TaskSet.stageId == 2)
assert(stage2TaskSet.stageAttemptId == 0)
}
/**
* We lose an executor after completing some shuffle map tasks on it. Those tasks get
* resubmitted, and when they finish the job completes normally
*/
test("register map outputs correctly after ExecutorLost and task Resubmitted") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 5, List(firstShuffleDep))
submit(reduceRdd, Array(0))
// complete some of the tasks from the first stage, on one host
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
// now that host goes down
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
// so we resubmit those tasks
runEvent(makeCompletionEvent(taskSets(0).tasks(0), Resubmitted, null))
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Resubmitted, null))
// now complete everything on a different host
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))
))
// now we should submit stage 1, and the map output from stage 0 should be registered
// check that we have all the map output for stage 0
(0 until reduceRdd.partitions.length).foreach { reduceIdx =>
val statuses = mapOutputTracker.getMapSizesByExecutorId(0, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 1 has been submitted
assert(taskSets.size == 2)
val stage1TaskSet = taskSets(1)
assert(stage1TaskSet.stageId == 1)
assert(stage1TaskSet.stageAttemptId == 0)
}
/**
* Makes sure that failures of stage used by multiple jobs are correctly handled.
*
* This test creates the following dependency graph:
*
* shuffleMapRdd1 shuffleMapRDD2
* | \\ |
* | \\ |
* | \\ |
* | \\ |
* reduceRdd1 reduceRdd2
*
* We start both shuffleMapRdds and then fail shuffleMapRdd1. As a result, the job listeners for
* reduceRdd1 and reduceRdd2 should both be informed that the job failed. shuffleMapRDD2 should
* also be cancelled, because it is only used by reduceRdd2 and reduceRdd2 cannot complete
* without shuffleMapRdd1.
*/
test("failure of stage used by two jobs") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, Nil)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val reduceRdd1 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val reduceRdd2 = new MyRDD(sc, 2, List(shuffleDep1, shuffleDep2), tracker = mapOutputTracker)
// We need to make our own listeners for this test, since by default submit uses the same
// listener for all jobs, and here we want to capture the failure for each job separately.
class FailureRecordingJobListener() extends JobListener {
var failureMessage: String = _
override def taskSucceeded(index: Int, result: Any) {}
override def jobFailed(exception: Exception): Unit = { failureMessage = exception.getMessage }
}
val listener1 = new FailureRecordingJobListener()
val listener2 = new FailureRecordingJobListener()
submit(reduceRdd1, Array(0, 1), listener = listener1)
submit(reduceRdd2, Array(0, 1), listener = listener2)
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(cancelledStages.toSet === Set(0, 2))
// Make sure the listeners got told about both failed stages.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.successfulStages.isEmpty)
assert(sparkListener.failedStages.toSet === Set(0, 2))
assert(listener1.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assert(listener2.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assertDataStructuresEmpty()
}
def checkJobPropertiesAndPriority(taskSet: TaskSet, expected: String, priority: Int): Unit = {
assert(taskSet.properties != null)
assert(taskSet.properties.getProperty("testProperty") === expected)
assert(taskSet.priority === priority)
}
def launchJobsThatShareStageAndCancelFirst(): ShuffleDependency[Int, Int, Nothing] = {
val baseRdd = new MyRDD(sc, 1, Nil)
val shuffleDep1 = new ShuffleDependency(baseRdd, new HashPartitioner(1))
val intermediateRdd = new MyRDD(sc, 1, List(shuffleDep1))
val shuffleDep2 = new ShuffleDependency(intermediateRdd, new HashPartitioner(1))
val finalRdd1 = new MyRDD(sc, 1, List(shuffleDep2))
val finalRdd2 = new MyRDD(sc, 1, List(shuffleDep2))
val job1Properties = new Properties()
val job2Properties = new Properties()
job1Properties.setProperty("testProperty", "job1")
job2Properties.setProperty("testProperty", "job2")
// Run jobs 1 & 2, both referencing the same stage, then cancel job1.
// Note that we have to submit job2 before we cancel job1 to have them actually share
// *Stages*, and not just shuffle dependencies, due to skipped stages (at least until
// we address SPARK-10193.)
val jobId1 = submit(finalRdd1, Array(0), properties = job1Properties)
val jobId2 = submit(finalRdd2, Array(0), properties = job2Properties)
assert(scheduler.activeJobs.nonEmpty)
val testProperty1 = scheduler.jobIdToActiveJob(jobId1).properties.getProperty("testProperty")
// remove job1 as an ActiveJob
cancel(jobId1)
// job2 should still be running
assert(scheduler.activeJobs.nonEmpty)
val testProperty2 = scheduler.jobIdToActiveJob(jobId2).properties.getProperty("testProperty")
assert(testProperty1 != testProperty2)
// NB: This next assert isn't necessarily the "desired" behavior; it's just to document
// the current behavior. We've already submitted the TaskSet for stage 0 based on job1, but
// even though we have cancelled that job and are now running it because of job2, we haven't
// updated the TaskSet's properties. Changing the properties to "job2" is likely the more
// correct behavior.
val job1Id = 0 // TaskSet priority for Stages run with "job1" as the ActiveJob
checkJobPropertiesAndPriority(taskSets(0), "job1", job1Id)
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
shuffleDep1
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active
*/
test("stage used by two jobs, the first no longer active (SPARK-6880)") {
launchJobsThatShareStageAndCancelFirst()
// The next check is the key for SPARK-6880. For the stage which was shared by both job1 and
// job2 but never had any tasks submitted for job1, the properties of job2 are now used to run
// the stage.
checkJobPropertiesAndPriority(taskSets(1), "job2", 1)
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
assert(taskSets(2).properties != null)
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active, even when
* there are fetch failures
*/
test("stage used by two jobs, some fetch failures, and the first job no longer active " +
"(SPARK-6880)") {
val shuffleDep1 = launchJobsThatShareStageAndCancelFirst()
val job2Id = 1 // TaskSet priority for Stages run with "job2" as the ActiveJob
// lets say there is a fetch failure in this task set, which makes us go back and
// run stage 0, attempt 1
complete(taskSets(1), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDep1.shuffleId, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// stage 0, attempt 1 should have the properties of job2
assert(taskSets(2).stageId === 0)
assert(taskSets(2).stageAttemptId === 1)
checkJobPropertiesAndPriority(taskSets(2), "job2", job2Id)
// run the rest of the stages normally, checking that they have the correct properties
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(3), "job2", job2Id)
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(4), "job2", job2Id)
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from a task that ran on that executor. We want to make sure the
* stage is resubmitted so that the task that ran on the failed executor is re-executed, and
* that the stage is only marked as finished once that task completes.
*/
test("run trivial shuffle with out-of-band executor failure and retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
// Tell the DAGScheduler that hostA was lost.
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers the
// stage complete), but the tasks that ran on HostA need to be re-run, so the DAGScheduler
// should re-submit the stage with one task (the task that originally ran on HostA).
assert(taskSets.size === 2)
assert(taskSets(1).tasks.size === 1)
// Make sure that the stage that was re-submitted was the ShuffleMapStage (not the reduce
// stage, which shouldn't be run until all of the tasks in the ShuffleMapStage complete on
// alive executors).
assert(taskSets(1).tasks(0).isInstanceOf[ShuffleMapTask])
// have hostC complete the resubmitted task
complete(taskSets(1), Seq((Success, makeMapStatus("hostC", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// Make sure that the reduce stage was now submitted.
assert(taskSets.size === 3)
assert(taskSets(2).tasks(0).isInstanceOf[ResultTask[_, _]])
// Complete the reduce stage.
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("recursive shuffle failures") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil)
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker)
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// have the first stage complete normally
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// have the second stage complete normally
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostC", 1))))
// fail the third stage because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDepTwo.shuffleId, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// have DAGScheduler try again
scheduler.resubmitFailedStages()
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 2))))
complete(taskSets(4), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(5), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("cached post-shuffle") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
cacheLocations(shuffleTwoRdd.id -> 0) = Seq(makeBlockManagerId("hostD"))
cacheLocations(shuffleTwoRdd.id -> 1) = Seq(makeBlockManagerId("hostC"))
// complete stage 0
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// complete stage 1
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// pretend stage 2 failed because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDepTwo.shuffleId, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// DAGScheduler should notice the cached copy of the second shuffle and try to get it rerun.
scheduler.resubmitFailedStages()
assertLocations(taskSets(3), Seq(Seq("hostD")))
// allow hostD to recover
complete(taskSets(3), Seq((Success, makeMapStatus("hostD", 1))))
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
val acc = new LongAccumulator {
override def add(v: java.lang.Long): Unit = throw new DAGSchedulerSuiteDummyException
override def add(v: Long): Unit = throw new DAGSchedulerSuiteDummyException
}
sc.register(acc)
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
/**
* The job will be failed on first task throwing a DAGSchedulerSuiteDummyException.
* Any subsequent task WILL throw a legitimate java.lang.UnsupportedOperationException.
* If multiple tasks, there exists a race condition between the SparkDriverExecutionExceptions
* and their differing causes as to which will represent result for job...
*/
test("misbehaved resultHandler should not crash DAGScheduler and SparkContext") {
val e = intercept[SparkDriverExecutionException] {
// Number of parallelized partitions implies number of tasks of job
val rdd = sc.parallelize(1 to 10, 2)
sc.runJob[Int, Int](
rdd,
(context: TaskContext, iter: Iterator[Int]) => iter.size,
// For a robust test assertion, limit number of job tasks to 1; that is,
// if multiple RDD partitions, use id of any one partition, say, first partition id=0
Seq(0),
(part: Int, result: Int) => throw new DAGSchedulerSuiteDummyException)
}
assert(e.getCause.isInstanceOf[DAGSchedulerSuiteDummyException])
// Make sure we can still run commands on our SparkContext
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[DAGSchedulerSuiteDummyException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPartitions: Array[Partition] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.reduceByKey(_ + _, 1).count()
}
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[SparkException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPreferredLocations(split: Partition): Seq[String] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.count()
}
assert(e1.getMessage.contains(classOf[DAGSchedulerSuiteDummyException].getName))
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("accumulator not calculated for resubmitted result stage") {
// just for register
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(accum.value === 1)
assertDataStructuresEmpty()
}
test("accumulator not calculated for resubmitted task in result stage") {
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 2, Nil)
submit(finalRdd, Array(0, 1))
// finish the first task
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
// finish the first task again (simulate a speculative task or a resubmitted task)
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
// The accumulator should only be updated once.
assert(accum.value === 1)
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Success, 42))
assertDataStructuresEmpty()
}
test("accumulators are updated on exception failures and task killed") {
val acc1 = AccumulatorSuite.createLongAccum("ingenieur")
val acc2 = AccumulatorSuite.createLongAccum("boulanger")
val acc3 = AccumulatorSuite.createLongAccum("agriculteur")
assert(AccumulatorContext.get(acc1.id).isDefined)
assert(AccumulatorContext.get(acc2.id).isDefined)
assert(AccumulatorContext.get(acc3.id).isDefined)
val accUpdate1 = new LongAccumulator
accUpdate1.metadata = acc1.metadata
accUpdate1.setValue(15)
val accUpdate2 = new LongAccumulator
accUpdate2.metadata = acc2.metadata
accUpdate2.setValue(13)
val accUpdate3 = new LongAccumulator
accUpdate3.metadata = acc3.metadata
accUpdate3.setValue(18)
val accumUpdates1 = Seq(accUpdate1, accUpdate2)
val accumInfo1 = accumUpdates1.map(AccumulatorSuite.makeInfo)
val exceptionFailure = new ExceptionFailure(
new SparkException("fondue?"),
accumInfo1).copy(accums = accumUpdates1)
submit(new MyRDD(sc, 1, Nil), Array(0))
runEvent(makeCompletionEvent(taskSets.head.tasks.head, exceptionFailure, "result"))
assert(AccumulatorContext.get(acc1.id).get.value === 15L)
assert(AccumulatorContext.get(acc2.id).get.value === 13L)
val accumUpdates2 = Seq(accUpdate3)
val accumInfo2 = accumUpdates2.map(AccumulatorSuite.makeInfo)
val taskKilled = new TaskKilled( "test", accumInfo2, accums = accumUpdates2)
runEvent(makeCompletionEvent(taskSets.head.tasks.head, taskKilled, "result"))
assert(AccumulatorContext.get(acc3.id).get.value === 18L)
}
test("reduce tasks should be placed locally with map output") {
// Create a shuffleMapRdd with 1 partition
val shuffleMapRdd = new MyRDD(sc, 1, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run on the same host that map task ran
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostA")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("reduce task locality preferences should only include machines with largest map outputs") {
val numMapTasks = 4
// Create a shuffleMapRdd with more partitions
val shuffleMapRdd = new MyRDD(sc, numMapTasks, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
val statuses = (1 to numMapTasks).map { i =>
(Success, makeMapStatus("host" + i, 1, (10*i).toByte))
}
complete(taskSets(0), statuses)
// Reducer should prefer the last 3 hosts as they have 20%, 30% and 40% of data
val hosts = (1 to numMapTasks).map(i => "host" + i).reverse.take(numMapTasks - 1)
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(hosts))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("stages with both narrow and shuffle dependencies use narrow ones for locality") {
// Create an RDD that has both a shuffle dependency and a narrow dependency (e.g. for a join)
val rdd1 = new MyRDD(sc, 1, Nil)
val rdd2 = new MyRDD(sc, 1, Nil, locations = Seq(Seq("hostB")))
val shuffleDep = new ShuffleDependency(rdd1, new HashPartitioner(1))
val narrowDep = new OneToOneDependency(rdd2)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep, narrowDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run where RDD 2 has preferences, even though it also has a shuffle dep
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostB")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("Spark exceptions should include call site in stack trace") {
val e = intercept[SparkException] {
sc.parallelize(1 to 10, 2).map { _ => throw new RuntimeException("uh-oh!") }.count()
}
// Does not include message, ONLY stack trace.
val stackTraceString = Utils.exceptionString(e)
// should actually include the RDD operation that invoked the method:
assert(stackTraceString.contains("org.apache.spark.rdd.RDD.count"))
// should include the FunSuite setup:
assert(stackTraceString.contains("org.scalatest.FunSuite"))
}
test("catch errors in event loop") {
// this is a test of our testing framework -- make sure errors in event loop don't get ignored
// just run some bad event that will throw an exception -- we'll give a null TaskEndReason
val rdd1 = new MyRDD(sc, 1, Nil)
submit(rdd1, Array(0))
intercept[Exception] {
complete(taskSets(0), Seq(
(null, makeMapStatus("hostA", 1))))
}
}
test("simple map stage submission") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
assert(results.size === 0) // No results yet
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage; it should directly do the reduce
submit(reduceRdd, Array(0))
completeNextResultStageWithSuccess(2, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with reduce stage also depending on the data") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit the map stage by itself
submitMapStage(shuffleDep)
// Submit a reduce job that depends on this map stage
submit(reduceRdd, Array(0))
// Complete tasks for the map stage
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
// Complete tasks for the reduce stage
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage, but where one reduce will fail a fetch
submit(reduceRdd, Array(0, 1))
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"), null)))
// Ask the scheduler to try it again; TaskSet 2 will rerun the map task that we couldn't fetch
// from, then TaskSet 3 will run the reduce stage
scheduler.resubmitFailedStages()
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
// Run another reduce job without a failure; this should just work
submit(reduceRdd, Array(0, 1))
complete(taskSets(4), Seq(
(Success, 44),
(Success, 45)))
assert(results === Map(0 -> 44, 1 -> 45))
results.clear()
assertDataStructuresEmpty()
// Resubmit the map stage; this should also just work
submitMapStage(shuffleDep)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
}
/**
* In this test, we have three RDDs with shuffle dependencies, and we submit map stage jobs
* that are waiting on each one, as well as a reduce job on the last one. We test that all of
* these jobs complete even if there are some fetch failures in both shuffles.
*/
test("map stage submission with multiple shared stages and failures") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val rdd3 = new MyRDD(sc, 2, List(dep2), tracker = mapOutputTracker)
val listener1 = new SimpleListener
val listener2 = new SimpleListener
val listener3 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
submit(rdd3, Array(0, 1), listener = listener3)
// Complete the first stage
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting the second stage, show a fetch failure
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
assert(listener2.results.size === 0) // Second stage listener should not have a result yet
// Stage 0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
assert(listener2.results.size === 0) // Second stage listener should still not have a result
// Stage 1 should now be running as task set 3; make its first task succeed
assert(taskSets(3).stageId === 1)
complete(taskSets(3), Seq(
(Success, makeMapStatus("hostB", rdd2.partitions.length)),
(Success, makeMapStatus("hostD", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep2.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostD")))
assert(listener2.results.size === 1)
// Finally, the reduce job should be running as task set 4; make it see a fetch failure,
// then make it run again and succeed
assert(taskSets(4).stageId === 2)
complete(taskSets(4), Seq(
(Success, 52),
(FetchFailed(makeBlockManagerId("hostD"), dep2.shuffleId, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// TaskSet 5 will rerun stage 1's lost task, then TaskSet 6 will rerun stage 2
assert(taskSets(5).stageId === 1)
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostE", rdd2.partitions.length))))
complete(taskSets(6), Seq(
(Success, 53)))
assert(listener3.results === Map(0 -> 52, 1 -> 53))
assertDataStructuresEmpty()
}
test("Trigger mapstage's job listener in submitMissingTasks") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val listener1 = new SimpleListener
val listener2 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
// Complete the stage0.
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting stage1, trigger a fetch failure.
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// Stage1 listener should not have a result yet
assert(listener2.results.size === 0)
// Speculative task succeeded in stage1.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
Success,
makeMapStatus("hostD", rdd2.partitions.length)))
// stage1 listener still should not have a result, though there's no missing partitions
// in it. Because stage1 has been failed and is not inside `runningStages` at this moment.
assert(listener2.results.size === 0)
// Stage0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
Set(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// After stage0 is finished, stage1 will be submitted and found there is no missing
// partitions in it. Then listener got triggered.
assert(listener2.results.size === 1)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from that executor. We want to make sure the stage is not reported
* as done until all tasks have completed.
*
* Most of the functionality in this test is tested in "run trivial shuffle with out-of-band
* executor failure and retry". However, that test uses ShuffleMapStages that are followed by
* a ResultStage, whereas in this test, the ShuffleMapStage is tested in isolation, without a
* ResultStage after it.
*/
test("map stage submission with executor failure late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 3, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
submitMapStage(shuffleDep)
val oldTaskSet = taskSets(0)
runEvent(makeCompletionEvent(oldTaskSet.tasks(0), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// Pretend host A was lost. This will cause the TaskSetManager to resubmit task 0, because it
// completed on hostA.
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// Suppose we also get a completed event from task 1 on the same host; this should be ignored
runEvent(makeCompletionEvent(oldTaskSet.tasks(1), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// A completion from another task should work because it's a non-failed host
runEvent(makeCompletionEvent(oldTaskSet.tasks(2), Success, makeMapStatus("hostB", 2)))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers
// the stage complete), but the task that ran on hostA needs to be re-run, so the map stage
// shouldn't be marked as complete, and the DAGScheduler should re-submit the stage.
assert(results.size === 0)
assert(taskSets.size === 2)
// Now complete tasks in the second task set
val newTaskSet = taskSets(1)
// 2 tasks should have been re-submitted, for tasks 0 and 1 (which ran on hostA).
assert(newTaskSet.tasks.size === 2)
// Complete task 0 from the original task set (i.e., not hte one that's currently active).
// This should still be counted towards the job being complete (but there's still one
// outstanding task).
runEvent(makeCompletionEvent(newTaskSet.tasks(0), Success, makeMapStatus("hostB", 2)))
assert(results.size === 0)
// Complete the final task, from the currently active task set. There's still one
// running task, task 0 in the currently active stage attempt, but the success of task 0 means
// the DAGScheduler can mark the stage as finished.
runEvent(makeCompletionEvent(newTaskSet.tasks(1), Success, makeMapStatus("hostB", 2)))
assert(results.size === 1) // Map stage job should now finally be complete
assertDataStructuresEmpty()
// Also test that a reduce stage using this shuffled data can immediately run
val reduceRDD = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
results.clear()
submit(reduceRDD, Array(0, 1))
complete(taskSets(2), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
}
/**
* Checks the DAGScheduler's internal logic for traversing an RDD DAG by making sure that
* getShuffleDependencies correctly returns the direct shuffle dependencies of a particular
* RDD. The test creates the following RDD graph (where n denotes a narrow dependency and s
* denotes a shuffle dependency):
*
* A <------------s---------,
* \\
* B <--s-- C <--s-- D <--n---`-- E
*
* Here, the direct shuffle dependency of C is just the shuffle dependency on B. The direct
* shuffle dependencies of E are the shuffle dependency on A and the shuffle dependency on C.
*/
test("getShuffleDependencies correctly returns only direct shuffle parents") {
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val rddB = new MyRDD(sc, 2, Nil)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val rddC = new MyRDD(sc, 1, List(shuffleDepB))
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val rddD = new MyRDD(sc, 1, List(shuffleDepC))
val narrowDepD = new OneToOneDependency(rddD)
val rddE = new MyRDD(sc, 1, List(shuffleDepA, narrowDepD), tracker = mapOutputTracker)
assert(scheduler.getShuffleDependencies(rddA) === Set())
assert(scheduler.getShuffleDependencies(rddB) === Set())
assert(scheduler.getShuffleDependencies(rddC) === Set(shuffleDepB))
assert(scheduler.getShuffleDependencies(rddD) === Set(shuffleDepC))
assert(scheduler.getShuffleDependencies(rddE) === Set(shuffleDepA, shuffleDepC))
}
test("SPARK-17644: After one stage is aborted for too many failed attempts, subsequent stages" +
"still behave correctly on fetch failures") {
// Runs a job that always encounters a fetch failure, so should eventually be aborted
def runJobWithPersistentFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0, 0, "test")
case (x, _) => x
}.count()
}
// Runs a job that encounters a single fetch failure but succeeds on the second attempt
def runJobWithTemporaryFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) && FailThisAttempt._fail.getAndSet(false) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0, 0, "test")
}
}
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
// Run a second job that will fail due to a fetch failure.
// This job will hang without the fix for SPARK-17644.
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
failAfter(10.seconds) {
try {
runJobWithTemporaryFetchFailure
} catch {
case e: Throwable => fail("A job with one fetch failure should eventually succeed")
}
}
}
test("[SPARK-19263] DAGScheduler should not submit multiple active tasksets," +
" even with late completions from earlier stage attempts") {
// Create 3 RDDs with shuffle dependencies on each other: rddA <--- rddB <--- rddC
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(2))
val shuffleIdA = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 2, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(2))
val rddC = new MyRDD(sc, 2, List(shuffleDepB), tracker = mapOutputTracker)
submit(rddC, Array(0, 1))
// Complete both tasks in rddA.
assert(taskSets(0).stageId === 0 && taskSets(0).stageAttemptId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
// Fetch failed for task(stageId=1, stageAttemptId=0, partitionId=0) running on hostA
// and task(stageId=1, stageAttemptId=0, partitionId=1) is still running.
assert(taskSets(1).stageId === 1 && taskSets(1).stageAttemptId === 0)
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleIdA, 0, 0,
"Fetch failure of task: stageId=1, stageAttempt=0, partitionId=0"),
result = null))
// Both original tasks in rddA should be marked as failed, because they ran on the
// failed hostA, so both should be resubmitted. Complete them on hostB successfully.
scheduler.resubmitFailedStages()
assert(taskSets(2).stageId === 0 && taskSets(2).stageAttemptId === 1
&& taskSets(2).tasks.size === 2)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostB", 2))))
// Complete task(stageId=1, stageAttemptId=0, partitionId=1) running on failed hostA
// successfully. The success should be ignored because the task started before the
// executor failed, so the output may have been lost.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1), Success, makeMapStatus("hostA", 2)))
// Both tasks in rddB should be resubmitted, because none of them has succeeded truely.
// Complete the task(stageId=1, stageAttemptId=1, partitionId=0) successfully.
// Task(stageId=1, stageAttemptId=1, partitionId=1) of this new active stage attempt
// is still running.
assert(taskSets(3).stageId === 1 && taskSets(3).stageAttemptId === 1
&& taskSets(3).tasks.size === 2)
runEvent(makeCompletionEvent(
taskSets(3).tasks(0), Success, makeMapStatus("hostB", 2)))
// There should be no new attempt of stage submitted,
// because task(stageId=1, stageAttempt=1, partitionId=1) is still running in
// the current attempt (and hasn't completed successfully in any earlier attempts).
assert(taskSets.size === 4)
// Complete task(stageId=1, stageAttempt=1, partitionId=1) successfully.
runEvent(makeCompletionEvent(
taskSets(3).tasks(1), Success, makeMapStatus("hostB", 2)))
// Now the ResultStage should be submitted, because all of the tasks of rddB have
// completed successfully on alive executors.
assert(taskSets.size === 5 && taskSets(4).tasks(0).isInstanceOf[ResultTask[_, _]])
complete(taskSets(4), Seq(
(Success, 1),
(Success, 1)))
}
test("task end event should have updated accumulators (SPARK-20342)") {
val tasks = 10
val accumId = new AtomicLong()
val foundCount = new AtomicLong()
val listener = new SparkListener() {
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
event.taskInfo.accumulables.find(_.id == accumId.get).foreach { _ =>
foundCount.incrementAndGet()
}
}
}
sc.addSparkListener(listener)
// Try a few times in a loop to make sure. This is not guaranteed to fail when the bug exists,
// but it should at least make the test flaky. If the bug is fixed, this should always pass.
(1 to 10).foreach { i =>
foundCount.set(0L)
val accum = sc.longAccumulator(s"accum$i")
accumId.set(accum.id)
sc.parallelize(1 to tasks, tasks).foreach { _ =>
accum.add(1L)
}
sc.listenerBus.waitUntilEmpty(1000)
assert(foundCount.get() === tasks)
}
}
/**
* Assert that the supplied TaskSet has exactly the given hosts as its preferred locations.
* Note that this checks only the host and not the executor ID.
*/
private def assertLocations(taskSet: TaskSet, hosts: Seq[Seq[String]]) {
assert(hosts.size === taskSet.tasks.size)
for ((taskLocs, expectedLocs) <- taskSet.tasks.map(_.preferredLocations).zip(hosts)) {
assert(taskLocs.map(_.host).toSet === expectedLocs.toSet)
}
}
private def assertDataStructuresEmpty(): Unit = {
assert(scheduler.activeJobs.isEmpty)
assert(scheduler.failedStages.isEmpty)
assert(scheduler.jobIdToActiveJob.isEmpty)
assert(scheduler.jobIdToStageIds.isEmpty)
assert(scheduler.stageIdToStage.isEmpty)
assert(scheduler.runningStages.isEmpty)
assert(scheduler.shuffleIdToMapStage.isEmpty)
assert(scheduler.waitingStages.isEmpty)
assert(scheduler.outputCommitCoordinator.isEmpty)
}
// Nothing in this test should break if the task info's fields are null, but
// OutputCommitCoordinator requires the task info itself to not be null.
private def createFakeTaskInfo(): TaskInfo = {
val info = new TaskInfo(0, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def createFakeTaskInfoWithId(taskId: Long): TaskInfo = {
val info = new TaskInfo(taskId, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def makeCompletionEvent(
task: Task[_],
reason: TaskEndReason,
result: Any,
extraAccumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty,
taskInfo: TaskInfo = createFakeTaskInfo()): CompletionEvent = {
val accumUpdates = reason match {
case Success => task.metrics.accumulators()
case ef: ExceptionFailure => ef.accums
case tk: TaskKilled => tk.accums
case _ => Seq.empty
}
CompletionEvent(task, reason, result, accumUpdates ++ extraAccumUpdates, taskInfo)
}
}
object DAGSchedulerSuite {
def makeMapStatus(host: String, reduces: Int, sizes: Byte = 2): MapStatus =
MapStatus(makeBlockManagerId(host), Array.fill[Long](reduces)(sizes), 1)
def makeBlockManagerId(host: String): BlockManagerId =
BlockManagerId("exec-" + host, host, 12345)
}
object FailThisAttempt {
val _fail = new AtomicBoolean(true)
}
| eyalfa/spark | core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala | Scala | apache-2.0 | 110,366 |
package org.qirx.littlespec.macros
import scala.reflect.macros.Context
import scala.io.Source
import scala.language.experimental.macros
case class Location(filename: String, line: Int, column: Int, lines: Seq[String])
object Location {
implicit def currentLocation: Location = macro currentLocationImpl
def currentLocationImpl(c: Context): c.Expr[Location] = {
import c.universe._
val pos = c.macroApplication.pos
val Location = c.mirror.staticModule(classOf[Location].getName)
val path = pos.source.path
val sourceLines = scala.io.Source.fromFile(path).getLines.toSeq
val lines = q"Seq(..$sourceLines)"
c.Expr(q"$Location($path, ${pos.line}, ${pos.column}, $lines)")
}
} | EECOLOR/little-spec | macros/src/main/scala/org/qirx/littlespec/macros/Location.scala | Scala | mit | 710 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
class DefaultSource extends SimpleScanSource
class SimpleScanSource extends RelationProvider {
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
SimpleScan(parameters("from").toInt, parameters("TO").toInt)(sqlContext)
}
}
//全表扫描 TableScan,@transient说明一个属性是临时的,不会被序列化
case class SimpleScan(from: Int, to: Int)(@transient val sqlContext: SQLContext)
extends BaseRelation with TableScan {
//StructType代表一张表,StructField代表一个字段
override def schema: StructType =
StructType(StructField("i", IntegerType, nullable = false) :: Nil)
override def buildScan(): RDD[Row] = sqlContext.sparkContext.parallelize(from to to).map(Row(_))
}
class AllDataTypesScanSource extends SchemaRelationProvider {
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String],
schema: StructType): BaseRelation = {
// Check that weird parameters are passed correctly.
//检查奇怪的参数是否正确传递
parameters("option_with_underscores")
parameters("option.with.dots")
AllDataTypesScan(parameters("from").toInt, parameters("TO").toInt, schema)(sqlContext)
}
}
case class AllDataTypesScan(
from: Int,
to: Int,
userSpecifiedSchema: StructType)(@transient val sqlContext: SQLContext)
extends BaseRelation
with TableScan {
override def schema: StructType = userSpecifiedSchema
override def needConversion: Boolean = true
override def buildScan(): RDD[Row] = {
sqlContext.sparkContext.parallelize(from to to).map { i =>
Row(
s"str_$i",
s"str_$i".getBytes(StandardCharsets.UTF_8),
i % 2 == 0,
i.toByte,
i.toShort,
i,
i.toLong,
i.toFloat,
i.toDouble,
new java.math.BigDecimal(i),
new java.math.BigDecimal(i),
Date.valueOf("1970-01-01"),
new Timestamp(20000 + i),
s"varchar_$i",
Seq(i, i + 1),
Seq(Map(s"str_$i" -> Row(i.toLong))),
Map(i -> i.toString),
Map(Map(s"str_$i" -> i.toFloat) -> Row(i.toLong)),
Row(i, i.toString),
Row(Seq(s"str_$i", s"str_${i + 1}"),
Row(Seq(Date.valueOf(s"1970-01-${i + 1}")))))
}
}
}
//表扫描测试套件
class TableScanSuite extends DataSourceTest with SharedSQLContext {
protected override lazy val sql = caseInsensitiveContext.sql _
private lazy val tableWithSchemaExpected = (1 to 10).map { i =>
Row(
s"str_$i",
s"str_$i",
i % 2 == 0,
i.toByte,
i.toShort,
i,
i.toLong,
i.toFloat,
i.toDouble,
new java.math.BigDecimal(i),
new java.math.BigDecimal(i),
Date.valueOf("1970-01-01"),
new Timestamp(20000 + i),
s"varchar_$i",
Seq(i, i + 1),
Seq(Map(s"str_$i" -> Row(i.toLong))),
Map(i -> i.toString),
Map(Map(s"str_$i" -> i.toFloat) -> Row(i.toLong)),
Row(i, i.toString),
Row(Seq(s"str_$i", s"str_${i + 1}"), Row(Seq(Date.valueOf(s"1970-01-${i + 1}")))))
}.toSeq
override def beforeAll(): Unit = {
super.beforeAll()
sql(
"""
|CREATE TEMPORARY TABLE oneToTen
|USING org.apache.spark.sql.sources.SimpleScanSource
|OPTIONS (
| From '1',
| To '10',
| option_with_underscores 'someval',
| option.with.dots 'someval'
|)
""".stripMargin)
sql(
"""
|CREATE TEMPORARY TABLE tableWithSchema (
|`string$%Field` stRIng,
|binaryField binary,
|`booleanField` boolean,
|ByteField tinyint,
|shortField smaLlint,
|int_Field iNt,
|`longField_:,<>=+/~^` Bigint,
|floatField flOat,
|doubleField doubLE,
|decimalField1 decimal,
|decimalField2 decimal(9,2),
|dateField dAte,
|timestampField tiMestamp,
|varcharField varchaR(12),
|arrayFieldSimple Array<inT>,
|arrayFieldComplex Array<Map<String, Struct<key:bigInt>>>,
|mapFieldSimple MAP<iNt, StRing>,
|mapFieldComplex Map<Map<stRING, fLOAT>, Struct<key:bigInt>>,
|structFieldSimple StRuct<key:INt, Value:STrINg>,
|structFieldComplex StRuct<key:Array<String>, Value:struct<`value_(2)`:Array<date>>>
|)
|USING org.apache.spark.sql.sources.AllDataTypesScanSource
|OPTIONS (
| From '1',
| To '10',
| option_with_underscores 'someval',
| option.with.dots 'someval'
|)
""".stripMargin)
}
sqlTest(
"SELECT * FROM oneToTen",
(1 to 10).map(Row(_)).toSeq)
sqlTest(
"SELECT i FROM oneToTen",
(1 to 10).map(Row(_)).toSeq)
sqlTest(
"SELECT i FROM oneToTen WHERE i < 5",
(1 to 4).map(Row(_)).toSeq)
sqlTest(
"SELECT i * 2 FROM oneToTen",
(1 to 10).map(i => Row(i * 2)).toSeq)
sqlTest(
"SELECT a.i, b.i FROM oneToTen a JOIN oneToTen b ON a.i = b.i + 1",
(2 to 10).map(i => Row(i, i - 1)).toSeq)
test("Schema and all fields") {//模式所有字段
val expectedSchema = StructType(
StructField("string$%Field", StringType, true) ::
StructField("binaryField", BinaryType, true) ::
StructField("booleanField", BooleanType, true) ::
StructField("ByteField", ByteType, true) ::
StructField("shortField", ShortType, true) ::
StructField("int_Field", IntegerType, true) ::
StructField("longField_:,<>=+/~^", LongType, true) ::
StructField("floatField", FloatType, true) ::
StructField("doubleField", DoubleType, true) ::
StructField("decimalField1", DecimalType.USER_DEFAULT, true) ::
StructField("decimalField2", DecimalType(9, 2), true) ::
StructField("dateField", DateType, true) ::
StructField("timestampField", TimestampType, true) ::
StructField("varcharField", StringType, true) ::
StructField("arrayFieldSimple", ArrayType(IntegerType), true) ::
StructField("arrayFieldComplex",
ArrayType(//StructType代表一张表,StructField代表一个字段
MapType(StringType, StructType(StructField("key", LongType, true) :: Nil))), true) ::
StructField("mapFieldSimple", MapType(IntegerType, StringType), true) ::
StructField("mapFieldComplex",
MapType(
MapType(StringType, FloatType),
StructType(StructField("key", LongType, true) :: Nil)), true) ::
StructField("structFieldSimple",
StructType(
StructField("key", IntegerType, true) ::
StructField("Value", StringType, true) :: Nil), true) ::
StructField("structFieldComplex",
StructType(
StructField("key", ArrayType(StringType), true) ::
StructField("Value",
StructType(
StructField("value_(2)", ArrayType(DateType), true) :: Nil), true) :: Nil), true) ::
Nil
)
assert(expectedSchema == caseInsensitiveContext.table("tableWithSchema").schema)
checkAnswer(
sql(
"""SELECT
| `string$%Field`,
| cast(binaryField as string),
| booleanField,
| byteField,
| shortField,
| int_Field,
| `longField_:,<>=+/~^`,
| floatField,
| doubleField,
| decimalField1,
| decimalField2,
| dateField,
| timestampField,
| varcharField,
| arrayFieldSimple,
| arrayFieldComplex,
| mapFieldSimple,
| mapFieldComplex,
| structFieldSimple,
| structFieldComplex FROM tableWithSchema""".stripMargin),
tableWithSchemaExpected
)
}
sqlTest(
"SELECT count(*) FROM tableWithSchema",
Seq(Row(10)))
sqlTest(
"SELECT `string$%Field` FROM tableWithSchema",
(1 to 10).map(i => Row(s"str_$i")).toSeq)
sqlTest(
"SELECT int_Field FROM tableWithSchema WHERE int_Field < 5",
(1 to 4).map(Row(_)).toSeq)
sqlTest(
"SELECT `longField_:,<>=+/~^` * 2 FROM tableWithSchema",
(1 to 10).map(i => Row(i * 2.toLong)).toSeq)
sqlTest(
"SELECT structFieldSimple.key, arrayFieldSimple[1] FROM tableWithSchema a where int_Field=1",
Seq(Row(1, 2)))
sqlTest(
"SELECT structFieldComplex.Value.`value_(2)` FROM tableWithSchema",
(1 to 10).map(i => Row(Seq(Date.valueOf(s"1970-01-${i + 1}")))).toSeq)
test("Caching") {
// Cached Query Execution 执行缓存查询
caseInsensitiveContext.cacheTable("oneToTen")
assertCached(sql("SELECT * FROM oneToTen"))
checkAnswer(
sql("SELECT * FROM oneToTen"),
(1 to 10).map(Row(_)).toSeq)
assertCached(sql("SELECT i FROM oneToTen"))
checkAnswer(
sql("SELECT i FROM oneToTen"),
(1 to 10).map(Row(_)).toSeq)
assertCached(sql("SELECT i FROM oneToTen WHERE i < 5"))
checkAnswer(
sql("SELECT i FROM oneToTen WHERE i < 5"),
(1 to 4).map(Row(_)).toSeq)
assertCached(sql("SELECT i * 2 FROM oneToTen"))
checkAnswer(
sql("SELECT i * 2 FROM oneToTen"),
(1 to 10).map(i => Row(i * 2)).toSeq)
assertCached(sql(
"SELECT a.i, b.i FROM oneToTen a JOIN oneToTen b ON a.i = b.i + 1"), 2)
checkAnswer(sql(
"SELECT a.i, b.i FROM oneToTen a JOIN oneToTen b ON a.i = b.i + 1"),
(2 to 10).map(i => Row(i, i - 1)).toSeq)
// Verify uncaching 验证未缓存
caseInsensitiveContext.uncacheTable("oneToTen")
assertCached(sql("SELECT * FROM oneToTen"), 0)
}
test("defaultSource") {//默认数据源
sql(
"""
|CREATE TEMPORARY TABLE oneToTenDef
|USING org.apache.spark.sql.sources
|OPTIONS (
| from '1',
| to '10'
|)
""".stripMargin)
checkAnswer(
sql("SELECT * FROM oneToTenDef"),
(1 to 10).map(Row(_)).toSeq)
}
test("exceptions") {//异常
// Make sure we do throw correct exception when users use a relation provider that
// only implements the RelationProvier or the SchemaRelationProvider.
val schemaNotAllowed = intercept[Exception] {
sql(
"""
|CREATE TEMPORARY TABLE relationProvierWithSchema (i int)
|USING org.apache.spark.sql.sources.SimpleScanSource
|OPTIONS (
| From '1',
| To '10'
|)
""".stripMargin)
}
assert(schemaNotAllowed.getMessage.contains("does not allow user-specified schemas"))
val schemaNeeded = intercept[Exception] {
sql(
"""
|CREATE TEMPORARY TABLE schemaRelationProvierWithoutSchema
|USING org.apache.spark.sql.sources.AllDataTypesScanSource
|OPTIONS (
| From '1',
| To '10'
|)
""".stripMargin)
}
assert(schemaNeeded.getMessage.contains("A schema needs to be specified when using"))
}
test("SPARK-5196 schema field with comment") {//模式字段与注释
sql(
"""
|CREATE TEMPORARY TABLE student(name string comment "SN", age int comment "SA", grade int)
|USING org.apache.spark.sql.sources.AllDataTypesScanSource
|OPTIONS (
| from '1',
| to '10',
| option_with_underscores 'someval',
| option.with.dots 'someval'
|)
""".stripMargin)
val planned = sql("SELECT * FROM student").queryExecution.executedPlan
val comments = planned.schema.fields.map { field =>
if (field.metadata.contains("comment")) field.metadata.getString("comment")
else "NO_COMMENT"
}.mkString(",")
assert(comments === "SN,SA,NO_COMMENT")
}
}
| tophua/spark1.52 | sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala | Scala | apache-2.0 | 12,684 |
package moe.pizza.auth
import java.io.File
import java.net.InetSocketAddress
import java.util.UUID
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import moe.pizza.auth.adapters.{LdapUserDatabase, XmppBroadcastService}
import moe.pizza.auth.ldap.client.LdapClient
import moe.pizza.auth.ldap.server.EmbeddedLdapServer
import moe.pizza.auth.webapp.Webapp
import org.http4s.client.blaze.PooledHttp1Client
import org.http4s.server.blaze.BlazeBuilder
import scopt.OptionParser
import scala.io.Source
import scala.util.{Failure, Try}
/**
* Created by andi on 19/02/16.
*/
object Main {
case class RunOptions(servers: Option[ServerOptions],
config: File = new File("./config.yml"))
case class ServerOptions(ldap: Boolean = true,
webinterface: Boolean = true,
restapi: Boolean = false)
val parser = new OptionParser[RunOptions]("pizza-auth") {
head("pizza-auth-3 command line interface")
help("help") text ("prints this usage text")
arg[File]("<config file>") optional () action { (x, c) =>
c.copy(config = x)
} text ("configuration file (optional)")
cmd("server") action { (_, c) =>
c.copy(servers = Some(ServerOptions()))
} text ("run pizza-auth server(s)") children (
opt[Boolean]("ldap") action { (x, c) =>
c.copy(servers = c.servers.map(_.copy(ldap = x)))
} text ("enable the built in LDAP server"),
opt[Boolean]("webinterface") action { (x, c) =>
c.copy(servers = c.servers.map(_.copy(webinterface = x)))
} text ("enable the main web interface"),
opt[Boolean]("restapi") action { (x, c) =>
c.copy(servers = c.servers.map(_.copy(restapi = x)))
} text ("enable the rest API")
)
}
def parseConfigFile(f: File): Try[config.ConfigFile.ConfigFile] = {
val OM = new ObjectMapper(new YAMLFactory())
OM.registerModule(DefaultScalaModule)
Try {
OM.readValue[config.ConfigFile.ConfigFile](
Source.fromFile(f).mkString,
classOf[config.ConfigFile.ConfigFile])
}
}
def main(args: Array[String]): Unit = {
val config = parser.parse(args, RunOptions(None))
val configfile = parseConfigFile(config.get.config)
configfile match {
case Failure(f) =>
System.err.println("Unable to read configuration file: %s".format(f))
case _ => ()
}
config match {
case Some(c) =>
c.servers match {
case Some(s) =>
val internalpassword = configfile.get.embeddedldap.password
.getOrElse(UUID.randomUUID().toString)
val ldap = new EmbeddedLdapServer(
configfile.get.embeddedldap.instancePath,
configfile.get.embeddedldap.basedn,
configfile.get.embeddedldap.host,
configfile.get.embeddedldap.port
)
if (s.ldap) {
ldap.setPassword(internalpassword)
ldap.start()
}
if (s.webinterface) {
implicit val client = PooledHttp1Client()
val graders =
configfile.get.auth.constructGraders(configfile.get)(client)
val lc = new LdapClient("localhost",
configfile.get.embeddedldap.port,
"uid=admin,ou=system",
internalpassword)
val broadcasters = configfile.get.auth.pingbot.map { c =>
new XmppBroadcastService(c.host, c.password)
}.toList
println(
s"constructed broadcasters from ${configfile.get.auth.pingbot}")
val webapp = new Webapp(
configfile.get,
graders,
9021,
new LdapUserDatabase(lc,
ldap.directoryService.getSchemaManager,
configfile.get.embeddedldap.basedn),
None,
None,
None,
None,
None,
broadcasters)
val builder = BlazeBuilder
.mountService(webapp.router)
.bindSocketAddress(new InetSocketAddress("127.0.0.1", 9021))
val server = builder.run
println(
s"LDAP server started on localhost:${configfile.get.embeddedldap.port} with admin password ${internalpassword}")
println("Web server started on 127.0.0.1:9021")
//val webapp = new Webapp(configfile.get, graders, 9021, new LdapUserDatabase(lc, ldap.directoryService.getSchemaManager))
//webapp.start()
}
case None =>
println("You must pick a set of servers to run")
sys.exit(1)
}
case None =>
println("Configuration couldn't be parsed")
sys.exit(1)
}
}
}
| xxpizzaxx/pizza-auth-3 | src/main/scala/moe/pizza/auth/Main.scala | Scala | mit | 5,073 |
object Test {
abstract class Number
case class MyInt(n: Int) extends Number
case class MyDouble(d: Double) extends Number
trait Term[a]
case class Cell[a](var x: a) extends Term[a]
final case class NumTerm(val n: Number) extends Term[Number]
def f[a](t: Term[a], c: Cell[a]): Unit = {
t match {
case NumTerm(n) => c.x = MyDouble(1.0)
}
}
val x: Term[Number] = NumTerm(MyInt(5))
def main(args: Array[String]): Unit = {
val cell = Cell[Number](MyInt(6))
Console.println(cell)
f[Number](new NumTerm(MyInt(5)), cell)
Console.println(cell)
}
}
| yusuke2255/dotty | tests/pending/pos/gadts2.scala | Scala | bsd-3-clause | 596 |
package spire
package math.extras.interval
import spire.algebra.{Bool, Eq, Order}
import spire.math._
import spire.math.interval._
import scala.annotation.tailrec
import scala.language.implicitConversions
sealed abstract class IntervalTrie[T] extends IntervalSet[T, IntervalTrie[T]]
object IntervalTrie {
implicit def algebra[T:Element]: Bool[IntervalTrie[T]] with Eq[IntervalTrie[T]] = new Bool[IntervalTrie[T]] with Eq[IntervalTrie[T]] {
def eqv(x: IntervalTrie[T], y: IntervalTrie[T]): Boolean = x == y
def zero: IntervalTrie[T] = IntervalTrie.empty[T]
def one: IntervalTrie[T] = IntervalTrie.all[T]
def complement(a: IntervalTrie[T]): IntervalTrie[T] = ~a
def or(a: IntervalTrie[T], b: IntervalTrie[T]): IntervalTrie[T] = a | b
def and(a: IntervalTrie[T], b: IntervalTrie[T]): IntervalTrie[T] = a & b
override def xor(a: IntervalTrie[T], b: IntervalTrie[T]): IntervalTrie[T] = a ^ b
}
trait Element[@sp(Float, Int, Long, Double) T] {
implicit def order:Order[T]
def toLong(value:T): Long
def fromLong(key:Long) : T
}
implicit object ByteElement extends Element[Byte] {
def order: Order[Byte] = spire.std.byte.ByteAlgebra
def toLong(value:Byte): Long = value
def fromLong(key:Long): Byte = key.toByte
}
implicit object ShortElement extends Element[Short] {
def order: Order[Short] = spire.std.short.ShortAlgebra
def toLong(value:Short): Long = value
def fromLong(key:Long): Short = key.toShort
}
implicit object IntElement extends Element[Int] {
def order: Order[Int] = spire.std.int.IntAlgebra
def toLong(value:Int): Long = value
def fromLong(key:Long) : Int = key.toInt
}
implicit object LongElement extends Element[Long] {
def order: Order[Long] = spire.std.long.LongAlgebra
def toLong(value:Long): Long = value
def fromLong(key:Long) : Long = key
}
implicit object FloatElement extends Element[Float] {
def order: Order[Float] = spire.std.float.FloatAlgebra
def toLong(value:Float): Long = {
if(value.isNaN)
throw new IllegalArgumentException("NaN")
// sign and magnitude signed integer
val signAndMagnitude = java.lang.Float.floatToIntBits(value)
// two's complement signed integer: if the sign bit is set, negate everything except the sign bit
val twosComplement = if(signAndMagnitude>=0) signAndMagnitude else (-signAndMagnitude | (1L<<63))
twosComplement
}
def fromLong(twosComplement:Long): Float = {
// sign and magnitude signed integer: if the sign bit is set, negate everything except the sign bit
val signAndMagnitude = if(twosComplement>=0) twosComplement else (-twosComplement | (1L<<63))
// double from sign and magnitude signed integer
java.lang.Float.intBitsToFloat(signAndMagnitude.toInt)
}
}
implicit object CharElement extends Element[Char] {
def order: Order[Char] = spire.std.char.CharAlgebra
def toLong(value:Char): Long = value.toLong
def fromLong(key:Long): Char = key.toChar
}
implicit object DoubleElement extends Element[Double] {
def order: Order[Double] = spire.std.double.DoubleAlgebra
def toLong(value:Double): Long = {
if(value.isNaN)
throw new IllegalArgumentException("NaN")
// sign and magnitude signed integer
val signAndMagnitude = java.lang.Double.doubleToLongBits(value)
// two's complement signed integer: if the sign bit is set, negate everything except the sign bit
val twosComplement = if(signAndMagnitude>=0) signAndMagnitude else (-signAndMagnitude | (1L<<63))
twosComplement
}
def fromLong(twosComplement:Long): Double = {
// sign and magnitude signed integer: if the sign bit is set, negate everything except the sign bit
val signAndMagnitude = if(twosComplement>=0) twosComplement else (-twosComplement | (1L<<63))
// double from sign and magnitude signed integer
java.lang.Double.longBitsToDouble(signAndMagnitude)
}
}
implicit object UByteElement extends Element[UByte] {
def order: Order[UByte] = spire.math.UByte.UByteAlgebra
def toLong(value:UByte): Long = value.toLong
def fromLong(key:Long) : UByte = UByte(key.toByte)
}
implicit object UShortElement extends Element[UShort] {
def order: Order[UShort] = spire.math.UShort.UShortAlgebra
def toLong(value:UShort): Long = value.toLong
def fromLong(key:Long) : UShort = UShort(key.toShort)
}
implicit object UIntElement extends Element[UInt] {
def order: Order[UInt] = spire.math.UInt.UIntAlgebra
def toLong(value:UInt): Long = value.toLong
def fromLong(key:Long) : UInt = UInt(key.toInt)
}
implicit object ULongElement extends Element[ULong] {
def order: Order[ULong] = spire.math.ULong.ULongAlgebra
def toLong(value:ULong): Long = value.toLong + Long.MinValue
def fromLong(key:Long) : ULong = ULong(key - Long.MinValue)
}
import Tree._
private implicit def tIsLong[T](value:T)(implicit tl:Element[T]) = tl.toLong(value)
private[interval] def fromKind[T:Element](value:T, kind:Int) = {
val bound = kind match {
case 0 => Below(value)
case 1 => Above(value)
case 2 => Both(value)
}
IntervalTrie[T](false, bound)
}
def constant[T:Element](value:Boolean): IntervalTrie[T] = IntervalTrie[T](value, null)
def empty[T:Element]: IntervalTrie[T] = constant[T](false)
def point[T:Element](value:T): IntervalTrie[T] = IntervalTrie[T](false, Tree.Leaf(toPrefix(value), true, false))
def atOrAbove[T:Element](value:T): IntervalTrie[T] = IntervalTrie[T](false, Tree.Leaf(toPrefix(value), true, true))
def above[T:Element](value:T): IntervalTrie[T] = IntervalTrie[T](false, Tree.Leaf(toPrefix(value), false, true))
def all[T:Element]: IntervalTrie[T] = constant[T](true)
def hole[T:Element](value:T): IntervalTrie[T] = IntervalTrie[T](true, Tree.Leaf(toPrefix(value), true, false))
def below[T:Element](value:T): IntervalTrie[T] = IntervalTrie[T](true, Tree.Leaf(toPrefix(value), true, true))
def atOrBelow[T:Element](value:T): IntervalTrie[T] = IntervalTrie[T](true, Tree.Leaf(toPrefix(value), false, true))
def apply[T:Element](interval:Interval[T]): IntervalTrie[T] = interval.fold {
case (Closed(a), Closed(b)) if a == b => point(a)
case (Unbound(), Open(x)) => below(x)
case (Unbound(), Closed(x)) => atOrBelow(x)
case (Open(x), Unbound()) => above(x)
case (Closed(x), Unbound()) => atOrAbove(x)
case (Closed(a), Closed(b)) => fromTo(Below(a), Above(b))
case (Closed(a), Open(b)) => fromTo(Below(a), Below(b))
case (Open(a), Closed(b)) => fromTo(Above(a), Above(b))
case (Open(a), Open(b)) => fromTo(Above(a), Below(b))
case (Unbound(), Unbound()) => all[T]
case (EmptyBound(), EmptyBound()) => empty[T]
}
private object Below {
def apply[T: Element](value:T) = Leaf(toPrefix(value), true, true)
def unapply(l:Leaf) = if(l.at && l.sign) Some(l.key) else None
}
private object Above {
def apply[T: Element](value:T) = Leaf(toPrefix(value), false, true)
def unapply(l:Leaf) = if(!l.at && l.sign) Some(l.key) else None
}
private object Both {
def apply[T: Element](value:T) = Leaf(toPrefix(value), true, false)
def unapply(l:Leaf) = if(l.at && !l.sign) Some(l.key) else None
}
private def fromTo[T:Element](a:Leaf, b:Leaf) : IntervalTrie[T] = {
IntervalTrie[T](false, concat(a, b))
}
def apply(text:String): IntervalTrie[Long] = {
val la = spire.std.long.LongAlgebra
def rationalToLong(r:Rational) : Long = {
if(r>Long.MaxValue || r<Long.MinValue)
throw new NumberFormatException("Integer number too large")
else
r.toLong
}
def intervalToIntervalSet(i:Interval[Long]) : IntervalTrie[Long] = apply(i)
val intervals = text.split(';').map(Interval.apply).map(_.mapBounds(rationalToLong)(la))
val simpleSets = intervals.map(intervalToIntervalSet)
(empty[Long] /: simpleSets)(_ | _)
}
private final def foreachInterval[T:Element, U](a0:Boolean, a:Tree)(f:Interval[T] => U): Unit = {
val x = implicitly[Element[T]]
import x._
def op(b0:Bound[T], a0:Boolean, a:Tree): Bound[T] = a match {
case Below(a) =>
if(a0)
f(Interval.fromBounds(b0, Open(fromLong(a))))
Closed(fromLong(a))
case Above(a) =>
if(a0)
f(Interval.fromBounds(b0, Closed(fromLong(a))))
Open(fromLong(a))
case Both(a) =>
if(a0)
f(Interval.fromBounds(b0, Open(fromLong(a))))
else
f(Interval.point(fromLong(a)))
Open(fromLong(a))
case a:Branch =>
val am = a0 ^ a.left.sign
val bm = op(b0, a0, a.left)
val b1 = op(bm, am, a.right)
b1
case _ =>
Unbound()
}
val last = op(Unbound(), a0, a)
if(a0 ^ ((a ne null) && a.sign))
f(Interval.fromBounds(last, Unbound()))
}
private abstract class TreeIterator[T](a:Tree) extends Iterator[T] {
var index = 0
var buffer = new Array[Tree](65)
def pop() = {
index -= 1
buffer(index)
}
def push(x: Tree): Unit = {
buffer(index) = x
index += 1
}
if(a ne null)
push(a)
def hasNextLeaf = index != 0
final def nextLeaf(): Leaf = pop() match {
case b:Branch =>
push(b.right)
push(b.left)
nextLeaf()
case l:Leaf => l
// $COVERAGE-OFF$
case _ => unreachable
// $COVERAGE-ON$
}
}
private final class EdgeIterator[T:Element](tree:Tree) extends TreeIterator[T](tree) {
private val element = implicitly[Element[T]]
def hasNext = hasNextLeaf
def next = element.fromLong(nextLeaf.key)
}
private final class IntervalIterator[T:Element](e:IntervalTrieImpl[T]) extends TreeIterator[Interval[T]](e.tree) {
private[this] val element = implicitly[Element[T]]
private[this] var lower: Bound[T] = if(e.belowAll) Unbound() else null
private[this] def nextInterval(): Interval[T] = {
import element.{fromLong, order}
var result : Interval[T] = null
if(hasNextLeaf) {
val leaf = nextLeaf()
if(lower eq null) leaf match {
case Both(x) =>
result = Interval.point(fromLong(x))
lower = null
case Below(x) =>
result = null
lower = Closed(fromLong(x))
case Above(x) =>
result = null
lower = Open(fromLong(x))
// $COVERAGE-OFF$
case _ => unreachable
// $COVERAGE-ON$
} else leaf match {
case Both(x) =>
val upper = Open(fromLong(x))
result = Interval.fromBounds[T](lower, upper)
lower = upper
case Below(x) =>
val upper = Open(fromLong(x))
result = Interval.fromBounds[T](lower, upper)
lower = null
case Above(x) =>
val upper = Closed(fromLong(x))
result = Interval.fromBounds[T](lower, upper)
lower = null
// $COVERAGE-OFF$
case _ => unreachable
// $COVERAGE-ON$
}
} else if(lower ne null) {
result = Interval.fromBounds(lower, Unbound())
lower = null
} else {
Iterator.empty.next()
}
result
}
def hasNext: Boolean = hasNextLeaf || (lower ne null)
@tailrec
override def next(): Interval[T] = {
val result = nextInterval()
if(result ne null)
result
else
next()
}
}
private def apply[T:Element](below:Boolean, tree:Tree): IntervalTrie[T] =
IntervalTrieImpl(below, tree)
private final case class IntervalTrieImpl[T](belowAll:Boolean, tree:Tree)(implicit ise:Element[T]) extends IntervalTrie[T] { lhs =>
import Tree._
import ise.order
def aboveAll: Boolean = if(tree eq null) belowAll else belowAll ^ tree.sign
def isEmpty = !belowAll && (tree eq null)
def isContiguous = if(belowAll) {
tree match {
case a:Leaf => a.sign
case null => true
case _ => false
}
} else {
tree match {
case _:Leaf => true
case Branch(_,_,a:Leaf, b:Leaf) => a.sign & b.sign
case null => true
case _ => false
}
}
def hull: Interval[T] = {
@tailrec
def lowerBound(a:Tree) : Bound[T] = a match {
case a:Branch => lowerBound(a.left)
case Above(x) => Open(ise.fromLong(x))
case Below(x) => Closed(ise.fromLong(x))
case Both(x) => Closed(ise.fromLong(x))
}
@tailrec
def upperBound(a:Tree) : Bound[T] = a match {
case a:Branch => upperBound(a.right)
case Both(x) => Closed(ise.fromLong(x))
case Above(x) => Closed(ise.fromLong(x))
case Below(x) => Open(ise.fromLong(x))
}
if(isEmpty) {
Interval.empty[T]
} else {
val lower = if(belowAll) Unbound[T]() else lowerBound(tree)
val upper = if(aboveAll) Unbound[T]() else upperBound(tree)
Interval.fromBounds(lower, upper)
}
}
def below(value:T) : Boolean = SampleBelow(belowAll, tree, toPrefix(ise.toLong(value)))
def at(value:T) : Boolean = SampleAt(belowAll, tree, toPrefix(ise.toLong(value)))
def above(value:T) : Boolean = SampleAbove(belowAll, tree, toPrefix(ise.toLong(value)))
def apply(value:T) : Boolean = at(value)
def & (rhs:IntervalTrie[T]) = rhs match {
case rhs:IntervalTrieImpl[T] =>
IntervalTrie[T](lhs.belowAll & rhs.belowAll, AndCalculator(lhs.belowAll, lhs.tree, rhs.belowAll, rhs.tree))
}
def | (rhs:IntervalTrie[T]) = rhs match {
case rhs: IntervalTrieImpl[T] =>
IntervalTrie[T](lhs.belowAll | rhs.belowAll, OrCalculator(lhs.belowAll, lhs.tree, rhs.belowAll, rhs.tree))
}
def ^ (rhs:IntervalTrie[T]) = rhs match {
case rhs: IntervalTrieImpl[T] => IntervalTrie[T](lhs.belowAll ^ rhs.belowAll, XorCalculator(lhs.belowAll, lhs.tree, rhs.belowAll, rhs.tree))
}
def unary_~ = IntervalTrie[T](!belowAll, tree)
def isSupersetOf(rhs:IntervalTrie[T]) = rhs match {
case rhs:IntervalTrieImpl[T] =>
SupersetOfCalculator(lhs.belowAll, lhs.tree, rhs.belowAll, rhs.tree)
}
def intersects(rhs:IntervalTrie[T]) = rhs match {
case rhs:IntervalTrieImpl[T] =>
!DisjointCalculator(lhs.belowAll, lhs.tree, rhs.belowAll, rhs.tree)
}
def isProperSupersetOf(rhs:IntervalTrie[T]) = isSupersetOf(rhs) && (rhs != lhs)
def intervals = new Traversable[Interval[T]] {
override def foreach[U](f: Interval[T] => U): Unit = foreachInterval(belowAll, tree)(f)
}
def intervalIterator = new IntervalIterator[T](lhs)
def edges : Iterable[T] = new Iterable[T] {
override def iterator: Iterator[T] = new EdgeIterator[T](lhs.tree)
}
override def toString = {
if (isEmpty)
Interval.empty[T].toString
else
intervals.map(_.toString).mkString(";")
}
}
}
| rklaehn/spire | extras/src/main/scala/spire/math/extras/interval/IntervalTrie.scala | Scala | mit | 15,158 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.typesafe.tools.mima.core._
import com.typesafe.tools.mima.core.ProblemFilters._
/**
* Additional excludes for checking of Spark's binary compatibility.
*
* This acts as an official audit of cases where we excluded other classes. Please use the narrowest
* possible exclude here. MIMA will usually tell you what exclude to use, e.g.:
*
* ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.take")
*
* It is also possible to exclude Spark classes and packages. This should be used sparingly:
*
* MimaBuild.excludeSparkClass("graphx.util.collection.GraphXPrimitiveKeyOpenHashMap")
*
* For a new Spark version, please update MimaBuild.scala to reflect the previous version.
*/
object MimaExcludes {
// Exclude rules for 2.3.x
lazy val v23excludes = v22excludes ++ Seq(
// [SPARK-20495][SQL] Add StorageLevel to cacheTable API
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.cacheTable"),
// [SPARK-19937] Add remote bytes read to disk.
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ShuffleReadMetrics.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ShuffleReadMetricDistributions.this"),
// [SPARK-21276] Update lz4-java to the latest (v1.4.0)
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.io.LZ4BlockInputStream")
)
// Exclude rules for 2.2.x
lazy val v22excludes = v21excludes ++ Seq(
// [SPARK-20355] Add per application spark version on the history server headerpage
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ApplicationAttemptInfo.this"),
// [SPARK-19652][UI] Do auth checks for REST API access.
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.deploy.history.HistoryServer.withSparkUI"),
ProblemFilters.exclude[IncompatibleTemplateDefProblem]("org.apache.spark.status.api.v1.UIRootFromServletContext"),
// [SPARK-18663][SQL] Simplify CountMinSketch aggregate implementation
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.util.sketch.CountMinSketch.toByteArray"),
// [SPARK-18949] [SQL] Add repairTable API to Catalog
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.recoverPartitions"),
// [SPARK-18537] Add a REST api to spark streaming
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.streaming.scheduler.StreamingListener.onStreamingStarted"),
// [SPARK-19148][SQL] do not expose the external table concept in Catalog
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.createTable"),
// [SPARK-14272][ML] Add logLikelihood in GaussianMixtureSummary
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.clustering.GaussianMixtureSummary.this"),
// [SPARK-19267] Fetch Failure handling robust to user error handling
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.TaskContext.setFetchFailed"),
// [SPARK-19069] [CORE] Expose task 'status' and 'duration' in spark history server REST API.
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.TaskData.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.TaskData.<init>$default$10"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.TaskData.<init>$default$11"),
// [SPARK-17161] Removing Python-friendly constructors not needed
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.OneVsRestModel.this"),
// [SPARK-19820] Allow reason to be specified to task kill
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.TaskKilled$"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.TaskKilled.productElement"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.TaskKilled.productArity"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.TaskKilled.canEqual"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.TaskKilled.productIterator"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.TaskKilled.countTowardsTaskFailures"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.TaskKilled.productPrefix"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.TaskKilled.toErrorString"),
ProblemFilters.exclude[FinalMethodProblem]("org.apache.spark.TaskKilled.toString"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.TaskContext.killTaskIfInterrupted"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.TaskContext.getKillReason"),
// [SPARK-19876] Add one time trigger, and improve Trigger APIs
ProblemFilters.exclude[IncompatibleTemplateDefProblem]("org.apache.spark.sql.streaming.Trigger"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.sql.streaming.ProcessingTime"),
// [SPARK-17471][ML] Add compressed method to ML matrices
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.compressed"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.compressedColMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.compressedRowMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.isRowMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.isColMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.getSparseSizeInBytes"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toDense"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toSparse"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toDenseRowMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toSparseRowMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toSparseColMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.getDenseSizeInBytes"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toDenseColMajor"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toDenseMatrix"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.toSparseMatrix"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Matrix.getSizeInBytes")
) ++ Seq(
// [SPARK-17019] Expose on-heap and off-heap memory usage in various places
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.SparkListenerBlockManagerAdded.copy"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.SparkListenerBlockManagerAdded.this"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.scheduler.SparkListenerBlockManagerAdded$"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.SparkListenerBlockManagerAdded.apply"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.storage.StorageStatus.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.StorageStatus.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.RDDDataDistribution.this")
)
// Exclude rules for 2.1.x
lazy val v21excludes = v20excludes ++ {
Seq(
// [SPARK-17671] Spark 2.0 history server summary page is slow even set spark.history.ui.maxApplications
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.deploy.history.HistoryServer.getApplicationList"),
// [SPARK-14743] Improve delegation token handling in secure cluster
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.deploy.SparkHadoopUtil.getTimeFromNowToRenewal"),
// [SPARK-16199][SQL] Add a method to list the referenced columns in data source Filter
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.sources.Filter.references"),
// [SPARK-16853][SQL] Fixes encoder error in DataSet typed select
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.Dataset.select"),
// [SPARK-16967] Move Mesos to Module
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkMasterRegex.MESOS_REGEX"),
// [SPARK-16240] ML persistence backward compatibility for LDA
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.clustering.LDA$"),
// [SPARK-17717] Add Find and Exists method to Catalog.
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.getDatabase"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.getTable"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.getFunction"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.databaseExists"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.tableExists"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.functionExists"),
// [SPARK-17731][SQL][Streaming] Metrics for structured streaming
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.SourceStatus.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.streaming.SourceStatus.offsetDesc"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQuery.status"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.SinkStatus.this"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.StreamingQueryInfo"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryStarted.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryStarted.queryInfo"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryProgress.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryProgress.queryInfo"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryTerminated.queryInfo"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.StreamingQueryListener$QueryStarted"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgress"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.StreamingQueryListener$QueryTerminated"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryListener.onQueryStarted"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener.onQueryStarted"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryListener.onQueryProgress"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener.onQueryProgress"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryListener.onQueryTerminated"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener.onQueryTerminated"),
// [SPARK-18516][SQL] Split state and progress in streaming
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.SourceStatus"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.SinkStatus"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQuery.sinkStatus"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQuery.sourceStatuses"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.streaming.StreamingQuery.id"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQuery.lastProgress"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQuery.recentProgress"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQuery.id"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryManager.get"),
// [SPARK-17338][SQL] add global temp view
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.dropGlobalTempView"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.catalog.Catalog.dropTempView"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.dropTempView"),
// [SPARK-18034] Upgrade to MiMa 0.1.11 to fix flakiness.
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("org.apache.spark.ml.param.shared.HasAggregationDepth.aggregationDepth"),
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("org.apache.spark.ml.param.shared.HasAggregationDepth.getAggregationDepth"),
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("org.apache.spark.ml.param.shared.HasAggregationDepth.org$apache$spark$ml$param$shared$HasAggregationDepth$_setter_$aggregationDepth_="),
// [SPARK-18236] Reduce duplicate objects in Spark UI and HistoryServer
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.scheduler.TaskInfo.accumulables"),
// [SPARK-18657] Add StreamingQuery.runId
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQuery.runId"),
// [SPARK-18694] Add StreamingQuery.explain and exception to Python and fix StreamingQueryException
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.StreamingQueryException$"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryException.startOffset"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryException.endOffset"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.streaming.StreamingQueryException.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryException.query")
)
}
// Exclude rules for 2.0.x
lazy val v20excludes = {
Seq(
excludePackage("org.apache.spark.rpc"),
excludePackage("org.spark-project.jetty"),
excludePackage("org.spark_project.jetty"),
excludePackage("org.apache.spark.internal"),
excludePackage("org.apache.spark.unused"),
excludePackage("org.apache.spark.unsafe"),
excludePackage("org.apache.spark.memory"),
excludePackage("org.apache.spark.util.collection.unsafe"),
excludePackage("org.apache.spark.sql.catalyst"),
excludePackage("org.apache.spark.sql.execution"),
excludePackage("org.apache.spark.sql.internal"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.feature.PCAModel.this"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.status.api.v1.StageData.this"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.status.api.v1.ApplicationAttemptInfo.this"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.status.api.v1.ApplicationAttemptInfo.<init>$default$5"),
// SPARK-14042 Add custom coalescer support
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.rdd.RDD.coalesce"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.rdd.PartitionCoalescer$LocationIterator"),
ProblemFilters.exclude[IncompatibleTemplateDefProblem]("org.apache.spark.rdd.PartitionCoalescer"),
// SPARK-15532 Remove isRootContext flag from SQLContext.
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLContext.isRootContext"),
// SPARK-12600 Remove SQL deprecated methods
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLContext$QueryExecution"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLContext$SparkPlanner"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.applySchema"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.parquetFile"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.jdbc"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.jsonFile"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.jsonRDD"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.load"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.dialectClassName"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.getSQLDialect"),
// SPARK-13664 Replace HadoopFsRelation with FileFormat
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.ml.source.libsvm.LibSVMRelation"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.HadoopFsRelationProvider"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.HadoopFsRelation$FileStatusCache"),
// SPARK-15543 Rename DefaultSources to make them more self-describing
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.ml.source.libsvm.DefaultSource")
) ++ Seq(
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.SparkContext.emptyRDD"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.broadcast.HttpBroadcastFactory"),
// SPARK-14358 SparkListener from trait to abstract class
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.SparkContext.addSparkListener"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.JavaSparkListener"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.SparkFirehoseListener"),
ProblemFilters.exclude[IncompatibleTemplateDefProblem]("org.apache.spark.scheduler.SparkListener"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ui.jobs.JobProgressListener"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ui.exec.ExecutorsListener"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ui.env.EnvironmentListener"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ui.storage.StorageListener"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.storage.StorageStatusListener")
) ++
Seq(
// SPARK-3369 Fix Iterable/Iterator in Java API
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.function.FlatMapFunction.call"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.function.FlatMapFunction.call"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.function.DoubleFlatMapFunction.call"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.function.DoubleFlatMapFunction.call"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.function.FlatMapFunction2.call"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.function.FlatMapFunction2.call"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.function.PairFlatMapFunction.call"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.function.PairFlatMapFunction.call"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.function.CoGroupFunction.call"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.function.CoGroupFunction.call"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.function.MapPartitionsFunction.call"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.function.MapPartitionsFunction.call"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.function.FlatMapGroupsFunction.call"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.function.FlatMapGroupsFunction.call")
) ++
Seq(
// [SPARK-6429] Implement hashCode and equals together
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.Partition.org$apache$spark$Partition$$super=uals")
) ++
Seq(
// SPARK-4819 replace Guava Optional
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.JavaSparkContext.getCheckpointDir"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.api.java.JavaSparkContext.getSparkHome"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.getCheckpointFile"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.partitioner"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.getCheckpointFile"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.partitioner")
) ++
Seq(
// SPARK-12481 Remove Hadoop 1.x
ProblemFilters.exclude[IncompatibleTemplateDefProblem]("org.apache.spark.mapred.SparkHadoopMapRedUtil"),
// SPARK-12615 Remove deprecated APIs in core
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.<init>$default$6"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.numericRDDToDoubleRDDFunctions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.intToIntWritable"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.intWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.writableWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.rddToPairRDDFunctions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.rddToAsyncRDDActions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.boolToBoolWritable"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.longToLongWritable"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.doubleWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.rddToOrderedRDDFunctions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.floatWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.booleanWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.stringToText"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.doubleRDDToDoubleRDDFunctions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.doubleToDoubleWritable"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.bytesWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.rddToSequenceFileRDDFunctions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.bytesToBytesWritable"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.longWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.stringWritableConverter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.floatToFloatWritable"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.rddToPairRDDFunctions$default$4"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.TaskContext.addOnCompleteCallback"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.TaskContext.runningLocally"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.TaskContext.attemptId"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.defaultMinSplits"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.SparkContext.runJob"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.runJob"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.tachyonFolderName"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.initLocalProperties"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.clearJars"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.clearFiles"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.SparkContext.this"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.flatMapWith$default$2"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.toArray"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.mapWith$default$2"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.mapPartitionsWithSplit"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.flatMapWith"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.filterWith"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.foreachWith"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.mapWith"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.mapPartitionsWithSplit$default$2"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.SequenceFileRDDFunctions.this"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.splits"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.toArray"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaSparkContext.defaultMinSplits"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaSparkContext.clearJars"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaSparkContext.clearFiles"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.externalBlockStoreFolderName"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.storage.ExternalBlockStore$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.storage.ExternalBlockManager"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.storage.ExternalBlockStore")
) ++ Seq(
// SPARK-12149 Added new fields to ExecutorSummary
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.status.api.v1.ExecutorSummary.this")
) ++
// SPARK-12665 Remove deprecated and unused classes
Seq(
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.graphx.GraphKryoRegistrator"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.util.Vector"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.util.Vector$Multiplier"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.util.Vector$")
) ++ Seq(
// SPARK-12591 Register OpenHashMapBasedStateMap for Kryo
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.serializer.KryoInputDataInputBridge"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.serializer.KryoOutputDataOutputBridge")
) ++ Seq(
// SPARK-12510 Refactor ActorReceiver to support Java
ProblemFilters.exclude[AbstractClassProblem]("org.apache.spark.streaming.receiver.ActorReceiver")
) ++ Seq(
// SPARK-12895 Implement TaskMetrics using accumulators
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.TaskContext.internalMetricsToAccumulators"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.TaskContext.collectInternalAccumulators"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.TaskContext.collectAccumulators")
) ++ Seq(
// SPARK-12896 Send only accumulator updates to driver, not TaskMetrics
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.Accumulable.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.Accumulator.this"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.Accumulator.initialValue")
) ++ Seq(
// SPARK-12692 Scala style: Fix the style violation (Space before "," or ":")
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.SparkSink.org$apache$spark$streaming$flume$sink$Logging$$log_"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.SparkSink.org$apache$spark$streaming$flume$sink$Logging$$log__="),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.SparkAvroCallbackHandler.org$apache$spark$streaming$flume$sink$Logging$$log_"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.SparkAvroCallbackHandler.org$apache$spark$streaming$flume$sink$Logging$$log__="),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.Logging.org$apache$spark$streaming$flume$sink$Logging$$log__="),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.Logging.org$apache$spark$streaming$flume$sink$Logging$$log_"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.Logging.org$apache$spark$streaming$flume$sink$Logging$$_log"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.Logging.org$apache$spark$streaming$flume$sink$Logging$$_log_="),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.TransactionProcessor.org$apache$spark$streaming$flume$sink$Logging$$log_"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.flume.sink.TransactionProcessor.org$apache$spark$streaming$flume$sink$Logging$$log__=")
) ++ Seq(
// SPARK-12689 Migrate DDL parsing to the newly absorbed parser
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.execution.datasources.DDLParser"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.execution.datasources.DDLException"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.SQLContext.ddlParser")
) ++ Seq(
// SPARK-7799 Add "streaming-akka" project
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.zeromq.ZeroMQUtils.createStream"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.streaming.zeromq.ZeroMQUtils.createStream"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.streaming.zeromq.ZeroMQUtils.createStream$default$6"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.zeromq.ZeroMQUtils.createStream$default$5"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.StreamingContext.actorStream$default$4"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.StreamingContext.actorStream$default$3"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.StreamingContext.actorStream"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.api.java.JavaStreamingContext.actorStream"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.streaming.zeromq.ZeroMQReceiver"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.streaming.receiver.ActorReceiver$Supervisor")
) ++ Seq(
// SPARK-12348 Remove deprecated Streaming APIs.
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.streaming.dstream.DStream.foreach"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.StreamingContext.toPairDStreamFunctions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.StreamingContext.toPairDStreamFunctions$default$4"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.StreamingContext.awaitTermination"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.StreamingContext.networkStream"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.streaming.api.java.JavaStreamingContextFactory"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.api.java.JavaStreamingContext.awaitTermination"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.api.java.JavaStreamingContext.sc"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.api.java.JavaDStreamLike.reduceByWindow"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.api.java.JavaDStreamLike.foreachRDD"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.api.java.JavaDStreamLike.foreach"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.streaming.api.java.JavaStreamingContext.getOrCreate")
) ++ Seq(
// SPARK-12847 Remove StreamingListenerBus and post all Streaming events to the same thread as Spark events
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.util.AsynchronousListenerBus$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.util.AsynchronousListenerBus")
) ++ Seq(
// SPARK-11622 Make LibSVMRelation extends HadoopFsRelation and Add LibSVMOutputWriter
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.source.libsvm.DefaultSource"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.source.libsvm.DefaultSource.createRelation")
) ++ Seq(
// SPARK-6363 Make Scala 2.11 the default Scala version
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.cleanup"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.metadataCleaner"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.scheduler.cluster.YarnSchedulerBackend$YarnDriverEndpoint"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.scheduler.cluster.YarnSchedulerBackend$YarnSchedulerEndpoint")
) ++ Seq(
// SPARK-7889
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.deploy.history.HistoryServer.org$apache$spark$deploy$history$HistoryServer$@tachSparkUI"),
// SPARK-13296
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.UDFRegistration.register"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.UserDefinedPythonFunction$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.UserDefinedPythonFunction"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.UserDefinedFunction"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.UserDefinedFunction$")
) ++ Seq(
// SPARK-12995 Remove deprecated APIs in graphx
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.lib.SVDPlusPlus.runSVDPlusPlus"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.Graph.mapReduceTriplets"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.Graph.mapReduceTriplets$default$3"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.graphx.impl.GraphImpl.mapReduceTriplets")
) ++ Seq(
// SPARK-13426 Remove the support of SIMR
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkMasterRegex.SIMR_REGEX")
) ++ Seq(
// SPARK-13413 Remove SparkContext.metricsSystem/schedulerBackend_ setter
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.metricsSystem"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.SparkContext.schedulerBackend_=")
) ++ Seq(
// SPARK-13220 Deprecate yarn-client and yarn-cluster mode
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.SparkContext.org$apache$spark$SparkContext$$createTaskScheduler")
) ++ Seq(
// SPARK-13465 TaskContext.
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.TaskContext.addTaskFailureListener")
) ++ Seq (
// SPARK-7729 Executor which has been killed should also be displayed on Executor Tab
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.status.api.v1.ExecutorSummary.this")
) ++ Seq(
// SPARK-13526 Move SQLContext per-session states to new class
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.sql.UDFRegistration.this")
) ++ Seq(
// [SPARK-13486][SQL] Move SQLConf into an internal package
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf$SQLConfEntry"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf$SQLConfEntry$")
) ++ Seq(
//SPARK-11011 UserDefinedType serialization should be strongly typed
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.linalg.VectorUDT.serialize"),
// SPARK-12073: backpressure rate controller consumes events preferentially from lagging partitions
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.kafka.KafkaTestUtils.createTopic"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.streaming.kafka.DirectKafkaInputDStream.maxMessagesPerPartition")
) ++ Seq(
// [SPARK-13244][SQL] Migrates DataFrame to Dataset
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.tables"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.sql"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.baseRelationToDataFrame"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.table"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrame.apply"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.DataFrame"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.DataFrame$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.LegacyFunctions"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.DataFrameHolder"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.DataFrameHolder$"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLImplicits.localSeqToDataFrameHolder"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLImplicits.stringRddToDataFrameHolder"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLImplicits.rddToDataFrameHolder"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLImplicits.longRddToDataFrameHolder"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLImplicits.intRddToDataFrameHolder"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.GroupedDataset"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.Dataset.subtract"),
// [SPARK-14451][SQL] Move encoder definition into Aggregator interface
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.expressions.Aggregator.toColumn"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.expressions.Aggregator.bufferEncoder"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.expressions.Aggregator.outputEncoder"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.mllib.evaluation.MultilabelMetrics.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.predictions"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.classification.LogisticRegressionSummary.predictions")
) ++ Seq(
// [SPARK-13686][MLLIB][STREAMING] Add a constructor parameter `reqParam` to (Streaming)LinearRegressionWithSGD
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.regression.LinearRegressionWithSGD.this")
) ++ Seq(
// SPARK-15250 Remove deprecated json API in DataFrameReader
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.DataFrameReader.json")
) ++ Seq(
// SPARK-13920: MIMA checks should apply to @Experimental and @DeveloperAPI APIs
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.Aggregator.combineCombinersByKey"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.Aggregator.combineValuesByKey"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ComplexFutureAction.run"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ComplexFutureAction.runJob"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ComplexFutureAction.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkEnv.actorSystem"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkEnv.cacheManager"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkEnv.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.deploy.SparkHadoopUtil.getConfigurationFromJobContext"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.deploy.SparkHadoopUtil.getTaskAttemptIDFromTaskAttemptContext"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.deploy.SparkHadoopUtil.newConfiguration"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.bytesReadCallback"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.bytesReadCallback_="),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.canEqual"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.copy"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.productArity"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.productElement"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.productIterator"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.productPrefix"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.setBytesReadCallback"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.updateBytesRead"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.canEqual"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.copy"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.productArity"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.productElement"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.productIterator"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.productPrefix"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleReadMetrics.decFetchWaitTime"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleReadMetrics.decLocalBlocksFetched"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleReadMetrics.decRecordsRead"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleReadMetrics.decRemoteBlocksFetched"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleReadMetrics.decRemoteBytesRead"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleWriteMetrics.decShuffleBytesWritten"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleWriteMetrics.decShuffleRecordsWritten"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleWriteMetrics.decShuffleWriteTime"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleWriteMetrics.incShuffleBytesWritten"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleWriteMetrics.incShuffleRecordsWritten"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleWriteMetrics.incShuffleWriteTime"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.ShuffleWriteMetrics.setShuffleRecordsWritten"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.feature.PCAModel.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.regression.StreamingLinearRegressionWithSGD.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.rdd.RDD.mapPartitionsWithContext"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.AccumulableInfo.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate.taskMetrics"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.scheduler.TaskInfo.attempt"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.ExperimentalMethods.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.callUDF"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.callUdf"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.cumeDist"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.denseRank"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.inputFileName"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.isNaN"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.percentRank"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.rowNumber"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.functions.sparkPartitionId"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.BlockStatus.apply"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.BlockStatus.copy"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.BlockStatus.externalBlockStoreSize"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.BlockStatus.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.StorageStatus.offHeapUsed"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.StorageStatus.offHeapUsedByRdd"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.storage.StorageStatusListener.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.streaming.scheduler.BatchInfo.streamIdToNumRecords"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.storageStatusList"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.storage.StorageListener.storageStatusList"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ExceptionFailure.apply"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ExceptionFailure.copy"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ExceptionFailure.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.executor.InputMetrics.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.executor.OutputMetrics.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Estimator.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Pipeline.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.PipelineModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.PredictionModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.PredictionModel.transformImpl"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Predictor.extractLabeledPoints"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Predictor.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Predictor.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.Transformer.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionTrainingSummary.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.ClassificationModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.GBTClassifier.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.MultilayerPerceptronClassifier.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.NaiveBayes.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.OneVsRest.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.OneVsRestModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.RandomForestClassifier.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.clustering.KMeans.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.clustering.KMeansModel.computeCost"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.clustering.KMeansModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.clustering.LDAModel.logLikelihood"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.clustering.LDAModel.logPerplexity"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.clustering.LDAModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.evaluation.BinaryClassificationEvaluator.evaluate"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.evaluation.Evaluator.evaluate"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator.evaluate"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.evaluation.RegressionEvaluator.evaluate"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.Binarizer.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.Bucketizer.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.ChiSqSelector.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.ChiSqSelectorModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.CountVectorizer.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.CountVectorizerModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.HashingTF.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.IDF.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.IDFModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.IndexToString.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.Interaction.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.MinMaxScaler.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.MinMaxScalerModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.OneHotEncoder.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.PCA.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.PCAModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.QuantileDiscretizer.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.RFormula.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.RFormulaModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.SQLTransformer.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StandardScaler.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StandardScalerModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StopWordsRemover.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StringIndexer.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StringIndexerModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.VectorAssembler.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.VectorIndexer.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.VectorIndexerModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.VectorSlicer.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.Word2Vec.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.Word2VecModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.recommendation.ALS.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.recommendation.ALSModel.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.recommendation.ALSModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.AFTSurvivalRegression.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.AFTSurvivalRegressionModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.GBTRegressor.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.IsotonicRegression.extractWeightedLabeledPoints"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.IsotonicRegression.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.IsotonicRegressionModel.extractWeightedLabeledPoints"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.IsotonicRegressionModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.LinearRegression.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.LinearRegressionSummary.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.LinearRegressionTrainingSummary.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.RandomForestRegressor.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.CrossValidator.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.CrossValidatorModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.TrainValidationSplit.fit"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.tuning.TrainValidationSplitModel.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.mllib.evaluation.BinaryClassificationMetrics.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.mllib.evaluation.MulticlassMetrics.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.mllib.evaluation.RegressionMetrics.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.DataFrameNaFunctions.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.DataFrameStatFunctions.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.DataFrameWriter.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.functions.broadcast"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.functions.callUDF"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.sources.CreatableRelationProvider.createRelation"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.sources.InsertableRelation.insert"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.fMeasureByThreshold"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.pr"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.precisionByThreshold"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.predictions"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.recallByThreshold"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.BinaryLogisticRegressionSummary.roc"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.clustering.LDAModel.describeTopics"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.Word2VecModel.findSynonyms"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.Word2VecModel.getVectors"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.recommendation.ALSModel.itemFactors"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.recommendation.ALSModel.userFactors"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.LinearRegressionSummary.predictions"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.LinearRegressionSummary.residuals"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.scheduler.AccumulableInfo.name"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.scheduler.AccumulableInfo.value"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameNaFunctions.drop"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameNaFunctions.fill"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameNaFunctions.replace"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameReader.jdbc"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameReader.json"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameReader.load"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameReader.orc"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameReader.parquet"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameReader.table"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameReader.text"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameStatFunctions.crosstab"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameStatFunctions.freqItems"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.DataFrameStatFunctions.sampleBy"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.createExternalTable"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.emptyDataFrame"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.range"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.functions.udf"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.scheduler.JobLogger"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.streaming.receiver.ActorHelper"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.streaming.receiver.ActorSupervisorStrategy"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.streaming.receiver.ActorSupervisorStrategy$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.streaming.receiver.Statistics"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.streaming.receiver.Statistics$"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.executor.InputMetrics"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.executor.InputMetrics$"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.executor.OutputMetrics"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.executor.OutputMetrics$"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.sql.functions$"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.Estimator.fit"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.Predictor.train"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.Transformer.transform"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.evaluation.Evaluator.evaluate"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.scheduler.SparkListener.onOtherEvent"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.sources.CreatableRelationProvider.createRelation"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.sources.InsertableRelation.insert")
) ++ Seq(
// [SPARK-13926] Automatically use Kryo serializer when shuffling RDDs with simple types
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ShuffleDependency.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ShuffleDependency.serializer"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.serializer.Serializer$")
) ++ Seq(
// SPARK-13927: add row/column iterator to local matrices
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.linalg.Matrix.rowIter"),
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.linalg.Matrix.colIter")
) ++ Seq(
// SPARK-13948: MiMa Check should catch if the visibility change to `private`
// TODO(josh): Some of these may be legitimate incompatibilities; we should follow up before the 2.0.0 release
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.Dataset.toDS"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.sources.OutputWriterFactory.newInstance"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.util.RpcUtils.askTimeout"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.util.RpcUtils.lookupTimeout"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.UnaryTransformer.transform"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.DecisionTreeClassifier.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegression.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.DecisionTreeRegressor.train"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.Dataset.groupBy"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.Dataset.groupBy"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.Dataset.select"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.Dataset.toDF"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.Logging.initializeLogIfNecessary"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.scheduler.SparkListenerEvent.logEvent"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.sources.OutputWriterFactory.newInstance")
) ++ Seq(
// [SPARK-14014] Replace existing analysis.Catalog with SessionCatalog
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLContext.this")
) ++ Seq(
// [SPARK-13928] Move org.apache.spark.Logging into org.apache.spark.internal.Logging
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.Logging"),
(problem: Problem) => problem match {
case MissingTypesProblem(_, missing)
if missing.map(_.fullName).sameElements(Seq("org.apache.spark.Logging")) => false
case _ => true
}
) ++ Seq(
// [SPARK-13990] Automatically pick serializer when caching RDDs
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.network.netty.NettyBlockTransferService.uploadBlock")
) ++ Seq(
// [SPARK-14089][CORE][MLLIB] Remove methods that has been deprecated since 1.1, 1.2, 1.3, 1.4, and 1.5
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkEnv.getThreadLocal"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.rdd.RDDFunctions.treeReduce"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.rdd.RDDFunctions.treeAggregate"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.tree.configuration.Strategy.defaultStategy"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.mllib.util.MLUtils.loadLibSVMFile"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.mllib.util.MLUtils.loadLibSVMFile"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.util.MLUtils.loadLibSVMFile"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.util.MLUtils.saveLabeledData"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.util.MLUtils.loadLabeledData"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.optimization.LBFGS.setMaxNumIterations"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.evaluation.BinaryClassificationEvaluator.setScoreCol")
) ++ Seq(
// [SPARK-14205][SQL] remove trait Queryable
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.sql.Dataset")
) ++ Seq(
// [SPARK-11262][ML] Unit test for gradient, loss layers, memory management
// for multilayer perceptron.
// This class is marked as `private`.
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.ml.ann.SoftmaxFunction")
) ++ Seq(
// [SPARK-13674][SQL] Add wholestage codegen support to Sample
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.util.random.PoissonSampler.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.util.random.PoissonSampler.this")
) ++ Seq(
// [SPARK-13430][ML] moved featureCol from LinearRegressionModelSummary to LinearRegressionSummary
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.ml.regression.LinearRegressionSummary.this")
) ++ Seq(
// [SPARK-14437][Core] Use the address that NettyBlockTransferService listens to create BlockManagerId
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.network.netty.NettyBlockTransferService.this")
) ++ Seq(
// [SPARK-13048][ML][MLLIB] keepLastCheckpoint option for LDA EM optimizer
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.mllib.clustering.DistributedLDAModel.this")
) ++ Seq(
// [SPARK-14475] Propagate user-defined context from driver to executors
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.TaskContext.getLocalProperty"),
// [SPARK-14617] Remove deprecated APIs in TaskMetrics
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.executor.InputMetrics$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.executor.OutputMetrics$"),
// [SPARK-14628] Simplify task metrics by always tracking read/write metrics
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.readMethod"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.writeMethod")
) ++ Seq(
// SPARK-14628: Always track input/output/shuffle metrics
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ShuffleReadMetrics.totalBlocksFetched"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.status.api.v1.ShuffleReadMetrics.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.inputMetrics"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.outputMetrics"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.shuffleWriteMetrics"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.shuffleReadMetrics"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.inputMetrics"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.outputMetrics"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.shuffleWriteMetrics"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.shuffleReadMetrics"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.this")
) ++ Seq(
// SPARK-13643: Move functionality from SQLContext to SparkSession
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.SQLContext.getSchema")
) ++ Seq(
// [SPARK-14407] Hides HadoopFsRelation related data source API into execution package
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.OutputWriter"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.sources.OutputWriterFactory")
) ++ Seq(
// SPARK-14734: Add conversions between mllib and ml Vector, Matrix types
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.mllib.linalg.Vector.asML"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.mllib.linalg.Matrix.asML")
) ++ Seq(
// SPARK-14704: Create accumulators in TaskMetrics
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.this")
) ++ Seq(
// SPARK-14861: Replace internal usages of SQLContext with SparkSession
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.ml.clustering.LocalLDAModel.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.ml.clustering.DistributedLDAModel.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.ml.clustering.LDAModel.this"),
ProblemFilters.exclude[DirectMissingMethodProblem](
"org.apache.spark.ml.clustering.LDAModel.sqlContext"),
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.sql.Dataset.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem](
"org.apache.spark.sql.DataFrameReader.this")
) ++ Seq(
// SPARK-14542 configurable buffer size for pipe RDD
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.rdd.RDD.pipe"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.pipe")
) ++ Seq(
// [SPARK-4452][Core]Shuffle data structures can starve others on the same thread for memory
ProblemFilters.exclude[IncompatibleTemplateDefProblem]("org.apache.spark.util.collection.Spillable")
) ++ Seq(
// [SPARK-14952][Core][ML] Remove methods deprecated in 1.6
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.input.PortableDataStream.close"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.weights"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.regression.LinearRegressionModel.weights")
) ++ Seq(
// [SPARK-10653] [Core] Remove unnecessary things from SparkEnv
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkEnv.sparkFilesDir"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.SparkEnv.blockTransferService")
) ++ Seq(
// SPARK-14654: New accumulator API
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ExceptionFailure$"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ExceptionFailure.apply"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ExceptionFailure.metrics"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ExceptionFailure.copy"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ExceptionFailure.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.executor.ShuffleReadMetrics.remoteBlocksFetched"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.executor.ShuffleReadMetrics.totalBlocksFetched"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.executor.ShuffleReadMetrics.localBlocksFetched"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.ShuffleReadMetrics.remoteBlocksFetched"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.ShuffleReadMetrics.localBlocksFetched")
) ++ Seq(
// [SPARK-14615][ML] Use the new ML Vector and Matrix in the ML pipeline based algorithms
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.clustering.LDAModel.getOldDocConcentration"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.clustering.LDAModel.estimatedDocConcentration"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.clustering.LDAModel.topicsMatrix"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.clustering.KMeansModel.clusterCenters"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LabelConverter.decodeLabel"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LabelConverter.encodeLabeledPoint"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.MultilayerPerceptronClassificationModel.weights"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.MultilayerPerceptronClassificationModel.predict"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.MultilayerPerceptronClassificationModel.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.NaiveBayesModel.predictRaw"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.NaiveBayesModel.raw2probabilityInPlace"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.NaiveBayesModel.theta"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.NaiveBayesModel.pi"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.NaiveBayesModel.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.probability2prediction"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.predictRaw"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.raw2prediction"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.raw2probabilityInPlace"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.predict"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.coefficients"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.classification.ClassificationModel.raw2prediction"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.ClassificationModel.predictRaw"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.classification.ClassificationModel.predictRaw"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.ElementwiseProduct.getScalingVec"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.ElementwiseProduct.setScalingVec"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.PCAModel.pc"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.MinMaxScalerModel.originalMax"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.MinMaxScalerModel.originalMin"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.MinMaxScalerModel.this"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.Word2VecModel.findSynonyms"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.IDFModel.idf"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.StandardScalerModel.mean"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.feature.StandardScalerModel.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.feature.StandardScalerModel.std"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.AFTSurvivalRegressionModel.predict"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.AFTSurvivalRegressionModel.coefficients"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.AFTSurvivalRegressionModel.predictQuantiles"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.AFTSurvivalRegressionModel.this"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.IsotonicRegressionModel.predictions"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.IsotonicRegressionModel.boundaries"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.LinearRegressionModel.predict"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.LinearRegressionModel.coefficients"),
ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.ml.regression.LinearRegressionModel.this")
) ++ Seq(
// [SPARK-15290] Move annotations, like @Since / @DeveloperApi, into spark-tags
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.annotation.package$"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.annotation.package"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.annotation.Private"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.annotation.AlphaComponent"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.annotation.Experimental"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.annotation.DeveloperApi")
) ++ Seq(
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.mllib.linalg.Vector.asBreeze"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.mllib.linalg.Matrix.asBreeze")
) ++ Seq(
// [SPARK-15914] Binary compatibility is broken since consolidation of Dataset and DataFrame
// in Spark 2.0. However, source level compatibility is still maintained.
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.load"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.jsonRDD"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.jsonFile"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.jdbc"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.parquetFile"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.sql.SQLContext.applySchema")
) ++ Seq(
// SPARK-17096: Improve exception string reported through the StreamingQueryListener
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryTerminated.stackTrace"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.streaming.StreamingQueryListener#QueryTerminated.this")
) ++ Seq(
// SPARK-17406 limit timeline executor events
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorIdToData"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToTasksActive"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToTasksComplete"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToInputRecords"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToShuffleRead"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToTasksFailed"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToShuffleWrite"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToDuration"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToInputBytes"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToLogUrls"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToOutputBytes"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToOutputRecords"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToTotalCores"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToTasksMax"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ui.exec.ExecutorsListener.executorToJvmGCTime")
) ++ Seq(
// [SPARK-17163] Unify logistic regression interface. Private constructor has new signature.
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.this")
) ++ Seq(
// [SPARK-17498] StringIndexer enhancement for handling unseen labels
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.feature.StringIndexer"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.feature.StringIndexerModel")
) ++ Seq(
// [SPARK-17365][Core] Remove/Kill multiple executors together to reduce RPC call time
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.SparkContext")
) ++ Seq(
// [SPARK-12221] Add CPU time to metrics
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.TaskMetrics.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.this")
) ++ Seq(
// [SPARK-18481] ML 2.1 QA: Remove deprecated methods for ML
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.PipelineStage.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.param.JavaParams.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.param.Params.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.GBTClassificationModel.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.LogisticRegression.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.GBTClassifier.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.LogisticRegressionModel.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.classification.RandomForestClassificationModel.numTrees"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.feature.ChiSqSelectorModel.setLabelCol"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.evaluation.Evaluator.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.regression.GBTRegressor.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.regression.GBTRegressionModel.validateParams"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.regression.LinearRegressionSummary.model"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.ml.regression.RandomForestRegressionModel.numTrees"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.classification.RandomForestClassifier"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.classification.RandomForestClassificationModel"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.classification.GBTClassifier"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.classification.GBTClassificationModel"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.regression.RandomForestRegressor"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.regression.RandomForestRegressionModel"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.regression.GBTRegressor"),
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.ml.regression.GBTRegressionModel"),
ProblemFilters.exclude[FinalMethodProblem]("org.apache.spark.ml.classification.RandomForestClassificationModel.getNumTrees"),
ProblemFilters.exclude[FinalMethodProblem]("org.apache.spark.ml.regression.RandomForestRegressionModel.getNumTrees"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.RandomForestClassificationModel.numTrees"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.classification.RandomForestClassificationModel.setFeatureSubsetStrategy"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.RandomForestRegressionModel.numTrees"),
ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.ml.regression.RandomForestRegressionModel.setFeatureSubsetStrategy")
) ++ Seq(
// [SPARK-21680][ML][MLLIB]optimzie Vector coompress
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.mllib.linalg.Vector.toSparseWithSize"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Vector.toSparseWithSize")
)
}
def excludes(version: String) = version match {
case v if v.startsWith("2.3") => v23excludes
case v if v.startsWith("2.2") => v22excludes
case v if v.startsWith("2.1") => v21excludes
case v if v.startsWith("2.0") => v20excludes
case _ => Seq()
}
}
| SHASHANKB/spark | project/MimaExcludes.scala | Scala | apache-2.0 | 95,078 |
package latis.reader.tsml.agg
import latis.dm.Dataset
import latis.ops.agg.TileAggregation
import latis.reader.tsml.ml.Tsml
/**
* Delegate to the TileAggregation Operation to join Datasets based on their
* bounding domain sets.
*/
class TileUnionAdapter(tsml: Tsml) extends AggregationAdapter(tsml) {
def aggregate(left: Dataset, right: Dataset): Dataset = TileAggregation(left, right)
} | dlindhol/LaTiS | src/main/scala/latis/reader/tsml/agg/TileUnionAdapter.scala | Scala | epl-1.0 | 398 |
package com.twitter.finagle.http.codec
import com.twitter.finagle.http.{Fields, Message, Request, Response, Status}
import com.twitter.util.{Future, Promise}
/**
* The HTTP connection manager implements connection management in
* accordance with RFC 2616 § 8. This is just the state machine; the
* codec implementations are in {Server,Client}ConnectionManager.
*/
private[finagle] class ConnectionManager {
/**
* Indicates whether the connection should be closed when it becomes idle.
* Because the connection is initially idle, we set this to `true` to avoid
* the connection starting in a closed state.
*/
private[this] var isKeepAlive = true
/** When false, the connection is busy servicing a request. */
private[this] var isIdle = true
/**
* Indicates the number of chunked messages currently being
* transmitted on this connection. Practically, on [0, 2].
*/
private[this] var activeStreams = 0
/**
* Indicates the number of requests that have been issued that have
* not yet received a response. Practically, on [0, 1].
*/
private[this] var pendingResponses = 0
/** Satisfied when the connection is ready to be torn down. */
private[this] val closeP = new Promise[Unit]
def observeMessage(message: Message, onFinish: Future[Unit]): Unit = synchronized {
message match {
case req: Request => observeRequest(req, onFinish)
case rep: Response => observeResponse(rep, onFinish)
case _ => isKeepAlive = false // conservative
}
}
def observeRequest(request: Request, onFinish: Future[Unit]): Unit = synchronized {
pendingResponses += 1
isIdle = false
isKeepAlive = request.isKeepAlive
handleIfStream(onFinish)
}
def observeResponse(response: Response, onFinish: Future[Unit]): Unit = synchronized {
pendingResponses -= 1
if (!isKeepAlive || mustCloseOnFinish(response) || !response.isKeepAlive) {
// We are going to close the connection after this response so we ensure that
// the 'Connection' header is set to 'close' in order to give the client notice.
response.headerMap.set(Fields.Connection, "close")
isKeepAlive = false
}
// If a response isn't chunked, then we're done with this request,
// and hence idle.
isIdle = !response.isChunked
handleIfStream(onFinish)
}
// this can be unsynchronized because all callers are synchronized.
private[this] def handleIfStream(onFinish: Future[Unit]): Unit = {
if (!onFinish.isDefined) {
activeStreams += 1
onFinish.ensure {
endStream()
if (shouldClose) closeP.setDone()
}
} else if (shouldClose) closeP.setDone()
}
private[this] def endStream(): Unit = synchronized {
activeStreams -= 1
isIdle = activeStreams == 0 && pendingResponses == 0
}
def shouldClose(): Boolean = synchronized { isIdle && !isKeepAlive }
def onClose: Future[Unit] = closeP
private[this] def mustCloseOnFinish(resp: Response): Boolean = {
// For a HTTP/1.x response that may have a body, the body length defined by
// either the `Transfer-Encoding: chunked` mechanism, the Content-Length header,
// or the end of the connection, in that order.
// See https://tools.ietf.org/html/rfc7230#section-3.3.3 for more details.
mayHaveContent(resp.status) && !resp.isChunked && resp.contentLength.isEmpty
}
// Some status codes are not permitted to have a message body.
private[this] def mayHaveContent(status: Status): Boolean = status match {
case Status.Informational(_) => false // all 1xx status codes must not have a body
case Status.NoContent => false // 204 No Content
case Status.NotModified => false // 304 Not Modified
case _ => true
}
}
| koshelev/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/codec/ConnectionManager.scala | Scala | apache-2.0 | 3,759 |
/*
* Copyright 2016 Miel Donkers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.codecentric.coffee.util
import com.zaxxer.hikari.HikariDataSource
/**
* @author Miel Donkers ([email protected])
*/
class DatabaseService(jdbcUrl: String, dbUser: String, dbPassword: String) {
private val ds = new HikariDataSource()
ds.setMaximumPoolSize(20)
ds.setDriverClassName("org.mariadb.jdbc.Driver")
ds.setJdbcUrl(jdbcUrl)
ds.addDataSourceProperty("user", dbUser)
ds.addDataSourceProperty("password", dbPassword)
// ds.setAutoCommit(false)
val driver = slick.driver.MySQLDriver
import driver.api._
val db = Database.forDataSource(ds)
implicit val dbSession = db.createSession()
}
| mdonkers/akka-cqrs-es-demo | src/main/scala/nl/codecentric/coffee/util/DatabaseService.scala | Scala | apache-2.0 | 1,235 |
package im.mange.little.classy
import scala.reflect.ClassTag
object Classy {
def name[T : ClassTag]: String = name(runtimeClass[T])
def runtimeClass[T : ClassTag]: Class[_] = implicitly[ClassTag[T]].runtimeClass
private def name(clazz: Class[_]): String = clazz.getSimpleName.stripSuffix("$")
}
| alltonp/little | src/main/scala/im/mange/little/classy/Classy.scala | Scala | apache-2.0 | 305 |
package com.taxis99.aws
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.{ Finders, MustMatchers, WordSpec }
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.CreateQueueRequest
import com.amazonaws.services.sqs.model.CreateQueueResult
import com.amazonaws.services.sqs.model.Message
import com.amazonaws.services.sqs.model.ReceiveMessageRequest
import com.amazonaws.services.sqs.model.ReceiveMessageResult
class SQSHelperSpec extends WordSpec with MustMatchers {
object SQSHelper extends SQSHelper(accessKey = "x", secretKey = "y", queueName = "queue", endpoint = "localhost:9000") {
override def createClient() = {
val client = mock(classOf[AmazonSQSClient])
val queueUrl = "queueUrl"
when(client.createQueue(any[CreateQueueRequest]()))
.thenReturn(new CreateQueueResult().withQueueUrl(queueUrl))
when(client.receiveMessage(any[ReceiveMessageRequest]()))
.thenReturn(new ReceiveMessageResult().withMessages(new java.util.ArrayList[Message]()))
client
}
}
"SQSHelper" must {
"receive nothing on empty list" in {
SQSHelper.fetchMessage must be(None)
}
}
} | mtrovo/awsscala | src/test/scala/com/taxis99/aws/SQSHelperSpec.scala | Scala | apache-2.0 | 1,208 |
/* https://www.hackerrank.com/challenges/solve-me-first */
package com.negrisoli.algorithms.warmup
import io.Source.stdin
object SolveMeFirst {
def main(args: Array[String]) {
println(stdin.getLines().take(2).map(_.toInt).sum)
}
} | rbatista/algorithms | challenges/hacker-rank/scala/src/main/scala/com/negrisoli/algorithms/warmup/SolveMeFirst.scala | Scala | mit | 240 |
package com.github.lstephen.ootp.ai.report;
import com.google.common.base.Function
import com.google.common.collect.Ordering
import com.github.lstephen.ootp.ai.io.Printable
import com.github.lstephen.ootp.ai.player.Player
import com.github.lstephen.ootp.ai.regression.Predictor
import com.github.lstephen.ootp.ai.roster.Team
import com.github.lstephen.ootp.ai.score.Score
import com.github.lstephen.ootp.ai.site.Financials
import com.github.lstephen.ootp.ai.site.Salary
import com.github.lstephen.ootp.ai.value.NowValue
import com.github.lstephen.ootp.ai.value.SalaryPredictor
import java.io.PrintWriter;
import java.text.NumberFormat;
import org.apache.commons.lang3.StringUtils;
import collection.JavaConversions._
import scala.math._
/**
*
* @author lstephen
*/
class SalaryReport(team: Team, salary: Salary, financials: Financials)(
implicit predictor: Predictor)
extends SalaryPredictor
with Printable {
private def value(p: Player): Score = {
val v = NowValue(p)
v.vsReplacement.orElseZero + v.vsMax.orElseZero
}
val currentTotal: Long = team.map(salary.getCurrentSalary(_).toLong).sum
val nextTotal: Long = team.map(salary.getNextSalary(_).toLong).sum
val replCurrentTotal: Score =
team.map(value(_)).filter(_.isPositive).total
val replNextTotal: Score = team
.filter(salary.getNextSalary(_) > 0)
.map(value(_))
.filter(_.isPositive)
.total
val availableForExtensions = min(
financials.getAvailableForExtensions,
max(financials.getLastYearRevenue - nextTotal, 0))
val maxCurrent = currentTotal + financials.getAvailableForFreeAgents
val maxNext = nextTotal + availableForExtensions
val maxReplCurrent = maxCurrent / replCurrentTotal.toLong
val maxReplNext = maxNext / replNextTotal.toLong
def format(i: Long): String = NumberFormat.getIntegerInstance().format(i)
def format(i: Int): String = NumberFormat.getIntegerInstance().format(i)
def print(w: PrintWriter): Unit = {
w.println()
team.toSeq
.filter(salary.getCurrentSalary(_) != 0)
.sortBy(salary.getCurrentSalary(_))
.reverse
.foreach { p =>
val s = salary getCurrentSalary p
val nextS = salary getNextSalary p
val position = p.getPosition
val name = StringUtils.abbreviate(p.getShortName, 15)
val age = p.getAge
val current = format(s)
val next = if (nextS == 0) "" else format(nextS)
w println f"$position%2s $name%-15s $age%2d| $current%11s $next%11s"
}
val line = "-" * 45
val totalCurrent = format(currentTotal)
val totalNext = format(nextTotal)
val buffer = " " * 21
val perReplLabel = "$/Value"
val perReplCurrent = format(currentTotal / replCurrentTotal.toLong)
val perReplNext = format(nextTotal / replNextTotal.toLong)
val forLabel = "$ Available"
val forFreeAgents = format(financials.getAvailableForFreeAgents)
val forExtensions = format(availableForExtensions)
val maxLabel = "Max $/Value"
w println line
w println f"$buffer| $totalCurrent%11s $totalNext%11s"
w println f"$perReplLabel%21s| $perReplCurrent%11s $perReplNext%11s"
w println f"$forLabel%21s| $forFreeAgents%11s $forExtensions%11s"
w println f"$maxLabel%21s| ${format(maxReplCurrent)}%11s ${format(maxReplNext)}%11s"
w println line
}
def predictNow(p: Player): Integer =
max(value(p).toLong * maxReplCurrent, 0).toInt
def predictNext(p: Player): Integer =
max(value(p).toLong * maxReplNext, 0).toInt
}
| lstephen/ootp-ai | src/main/scala/com/github/lstephen/ootp/ai/report/SalaryReport.scala | Scala | apache-2.0 | 3,534 |
package dotty.tools
package dotc
package typer
import core._
import ast.{Trees, untpd, tpd, TreeInfo}
import util.Positions._
import util.Stats.{track, record, monitored}
import printing.Showable
import Contexts._
import Types._
import Flags._
import Mode.ImplicitsEnabled
import Denotations._
import NameOps._
import SymDenotations._
import Symbols._
import Types._
import Decorators._
import Names._
import StdNames._
import Constants._
import Applications._
import ProtoTypes._
import ErrorReporting._
import Hashable._
import config.Config
import config.Printers._
import collection.mutable
/** Implicit resolution */
object Implicits {
/** A common base class of contextual implicits and of-type implicits which
* represents a set of implicit references.
*/
abstract class ImplicitRefs(initctx: Context) {
implicit val ctx: Context =
if (initctx == NoContext) initctx else initctx retractMode Mode.ImplicitsEnabled
/** The implicit references */
def refs: List[TermRef]
/** Return those references in `refs` that are compatible with type `pt`. */
protected def filterMatching(pt: Type)(implicit ctx: Context): List[TermRef] = track("filterMatching") {
def refMatches(ref: TermRef)(implicit ctx: Context) = /*ctx.traceIndented(i"refMatches $ref $pt")*/ {
def discardForView(tpw: Type, argType: Type): Boolean = tpw match {
case mt: MethodType =>
mt.isImplicit ||
mt.paramTypes.length != 1 ||
!(argType relaxed_<:< mt.paramTypes.head)(ctx.fresh.setExploreTyperState)
case poly: PolyType =>
// We do not need to call ProtoTypes#constrained on `poly` because
// `refMatches` is always called with mode TypevarsMissContext enabled.
poly.resultType match {
case mt: MethodType =>
mt.isImplicit ||
mt.paramTypes.length != 1 ||
!(argType relaxed_<:< wildApprox(mt.paramTypes.head)(ctx.fresh.setExploreTyperState))
case rtp =>
discardForView(wildApprox(rtp), argType)
}
case tpw: TermRef =>
false // can't discard overloaded refs
case tpw =>
//if (ctx.typer.isApplicable(tp, argType :: Nil, resultType))
// println(i"??? $tp is applicable to $this / typeSymbol = ${tpw.typeSymbol}")
!tpw.derivesFrom(defn.FunctionClass(1)) ||
ref.symbol == defn.Predef_conforms //
// as an implicit conversion, Predef.$conforms is a no-op, so exclude it
}
def discardForValueType(tpw: Type): Boolean = tpw match {
case mt: MethodType => !mt.isImplicit
case mt: PolyType => discardForValueType(tpw.resultType)
case _ => false
}
def discard = pt match {
case pt: ViewProto => discardForView(ref.widen, pt.argType)
case _: ValueTypeOrProto => !defn.isFunctionType(pt) && discardForValueType(ref.widen)
case _ => false
}
(ref.symbol isAccessibleFrom ref.prefix) && {
if (discard) {
record("discarded eligible")
false
}
else NoViewsAllowed.isCompatible(normalize(ref, pt), pt)
}
}
if (refs.isEmpty) refs
else refs filter (refMatches(_)(ctx.fresh.addMode(Mode.TypevarsMissContext).setExploreTyperState)) // create a defensive copy of ctx to avoid constraint pollution
}
}
/** The implicit references coming from the implicit scope of a type.
* @param tp the type determining the implicit scope
* @param companionRefs the companion objects in the implicit scope.
*/
class OfTypeImplicits(tp: Type, val companionRefs: TermRefSet)(initctx: Context) extends ImplicitRefs(initctx) {
assert(initctx.typer != null)
val refs: List[TermRef] = {
val buf = new mutable.ListBuffer[TermRef]
for (companion <- companionRefs) buf ++= companion.implicitMembers
buf.toList
}
/** The implicit references that are eligible for expected type `tp` */
lazy val eligible: List[TermRef] =
/*>|>*/ track("eligible in tpe") /*<|<*/ {
/*>|>*/ ctx.traceIndented(i"eligible($tp), companions = ${companionRefs.toList}%, %", implicitsDetailed, show = true) /*<|<*/ {
if (refs.nonEmpty && monitored) record(s"check eligible refs in tpe", refs.length)
filterMatching(tp)
}
}
override def toString =
i"OfTypeImplicits($tp), companions = ${companionRefs.toList}%, %; refs = $refs%, %."
}
/** The implicit references coming from the context.
* @param refs the implicit references made visible by the current context.
* Note: The name of the reference might be different from the name of its symbol.
* In the case of a renaming import a => b, the name of the reference is the renamed
* name, b, whereas the name of the symbol is the original name, a.
* @param outerCtx the next outer context that makes visible further implicits
*/
class ContextualImplicits(val refs: List[TermRef], val outerImplicits: ContextualImplicits)(initctx: Context) extends ImplicitRefs(initctx) {
private val eligibleCache = new mutable.AnyRefMap[Type, List[TermRef]]
/** The implicit references that are eligible for type `tp`. */
def eligible(tp: Type): List[TermRef] = /*>|>*/ track(s"eligible in ctx") /*<|<*/ {
if (tp.hash == NotCached) computeEligible(tp)
else eligibleCache get tp match {
case Some(eligibles) =>
def elided(ci: ContextualImplicits): Int = {
val n = ci.refs.length
if (ci.outerImplicits == NoContext.implicits) n
else n + elided(ci.outerImplicits)
}
if (monitored) record(s"elided eligible refs", elided(this))
eligibles
case None =>
val savedEphemeral = ctx.typerState.ephemeral
ctx.typerState.ephemeral = false
try {
val result = computeEligible(tp)
if (ctx.typerState.ephemeral) record("ephemeral cache miss: eligible")
else eligibleCache(tp) = result
result
}
finally ctx.typerState.ephemeral |= savedEphemeral
}
}
private def computeEligible(tp: Type): List[TermRef] = /*>|>*/ ctx.traceIndented(i"computeEligible $tp in $refs%, %", implicitsDetailed) /*<|<*/ {
if (monitored) record(s"check eligible refs in ctx", refs.length)
val ownEligible = filterMatching(tp)
if (outerImplicits == NoContext.implicits) ownEligible
else ownEligible ::: {
val shadowed = (ownEligible map (_.name)).toSet
outerImplicits.eligible(tp) filterNot (ref => shadowed contains ref.name)
}
}
override def toString = {
val own = s"(implicits: ${refs mkString ","})"
if (outerImplicits == NoContext.implicits) own else own + "\\n " + outerImplicits
}
/** This context, or a copy, ensuring root import from symbol `root`
* is not present in outer implicits.
*/
def exclude(root: Symbol): ContextualImplicits =
if (this == NoContext.implicits) this
else {
val outerExcluded = outerImplicits exclude root
if (ctx.importInfo.site.termSymbol == root) outerExcluded
else if (outerExcluded eq outerImplicits) this
else new ContextualImplicits(refs, outerExcluded)(ctx)
}
}
/** The result of an implicit search */
abstract class SearchResult
/** A successful search
* @param ref The implicit reference that succeeded
* @param tree The typed tree that can needs to be inserted
* @param ctx The context after the implicit search
*/
case class SearchSuccess(tree: tpd.Tree, ref: TermRef, tstate: TyperState) extends SearchResult {
override def toString = s"SearchSuccess($tree, $ref)"
}
/** A failed search */
abstract class SearchFailure extends SearchResult {
/** A note describing the failure in more detail - this
* is either empty or starts with a '\\n'
*/
def postscript(implicit ctx: Context): String = ""
}
/** A "no matching implicit found" failure */
case object NoImplicitMatches extends SearchFailure
/** A search failure that can show information about the cause */
abstract class ExplainedSearchFailure extends SearchFailure {
protected def pt: Type
protected def argument: tpd.Tree
protected def qualify(implicit ctx: Context) =
if (argument.isEmpty) d"match type $pt"
else d"convert from ${argument.tpe} to $pt"
/** An explanation of the cause of the failure as a string */
def explanation(implicit ctx: Context): String
}
/** An ambiguous implicits failure */
class AmbiguousImplicits(alt1: TermRef, alt2: TermRef, val pt: Type, val argument: tpd.Tree) extends ExplainedSearchFailure {
def explanation(implicit ctx: Context): String =
d"both ${err.refStr(alt1)} and ${err.refStr(alt2)} $qualify"
override def postscript(implicit ctx: Context) =
"\\nNote that implicit conversions cannot be applied because they are ambiguous;" +
"\\n " + explanation
}
class NonMatchingImplicit(ref: TermRef, val pt: Type, val argument: tpd.Tree) extends ExplainedSearchFailure {
def explanation(implicit ctx: Context): String =
d"${err.refStr(ref)} does not $qualify"
}
class ShadowedImplicit(ref: TermRef, shadowing: Type, val pt: Type, val argument: tpd.Tree) extends ExplainedSearchFailure {
def explanation(implicit ctx: Context): String =
d"${err.refStr(ref)} does $qualify but is shadowed by ${err.refStr(shadowing)}"
}
class DivergingImplicit(ref: TermRef, val pt: Type, val argument: tpd.Tree) extends ExplainedSearchFailure {
def explanation(implicit ctx: Context): String =
d"${err.refStr(ref)} produces a diverging implicit search when trying to $qualify"
}
class FailedImplicit(failures: List[ExplainedSearchFailure], val pt: Type, val argument: tpd.Tree) extends ExplainedSearchFailure {
def explanation(implicit ctx: Context): String =
if (failures.isEmpty) s" No implicit candidates were found that $qualify"
else " " + (failures map (_.explanation) mkString "\\n ")
override def postscript(implicit ctx: Context): String =
"\\nImplicit search failure summary:\\n" + explanation
}
}
import Implicits._
/** Info relating to implicits that is kept for one run */
trait ImplicitRunInfo { self: RunInfo =>
private val implicitScopeCache = mutable.AnyRefMap[Type, OfTypeImplicits]()
/** The implicit scope of a type `tp`
* @param liftingCtx A context to be used when computing the class symbols of
* a type. Types may contain type variables with their instances
* recorded in the current context. To find out the instance of
* a type variable, we need the current context, the current
* runinfo context does not do.
*/
def implicitScope(tp: Type, liftingCtx: Context): OfTypeImplicits = {
val seen: mutable.Set[Type] = mutable.Set()
/** Replace every typeref that does not refer to a class by a conjunction of class types
* that has the same implicit scope as the original typeref. The motivation for applying
* this map is that it reduces the total number of types for which we need to
* compute and cache the implicit scope; all variations wrt type parameters or
* abstract types are eliminated.
*/
object liftToClasses extends TypeMap {
override implicit protected val ctx: Context = liftingCtx
override def stopAtStatic = true
def apply(tp: Type) = tp match {
case tp: TypeRef if tp.symbol.isAbstractOrAliasType =>
val pre = tp.prefix
def joinClass(tp: Type, cls: ClassSymbol) =
AndType(tp, cls.typeRef.asSeenFrom(pre, cls.owner))
val lead = if (tp.prefix eq NoPrefix) defn.AnyType else apply(tp.prefix)
(lead /: tp.classSymbols)(joinClass)
case tp: TypeVar =>
apply(tp.underlying)
case _ =>
mapOver(tp)
}
}
def iscopeRefs(tp: Type): TermRefSet =
if (seen contains tp) EmptyTermRefSet
else {
seen += tp
iscope(tp).companionRefs
}
// todo: compute implicits directly, without going via companionRefs?
def collectCompanions(tp: Type): TermRefSet = track("computeImplicitScope") {
ctx.traceIndented(i"collectCompanions($tp)", implicits) {
val comps = new TermRefSet
tp match {
case tp: NamedType =>
val pre = tp.prefix
comps ++= iscopeRefs(pre)
def addClassScope(cls: ClassSymbol): Unit = {
def addRef(companion: TermRef): Unit = {
val compSym = companion.symbol
if (compSym is Package)
addRef(TermRef.withSig(companion, nme.PACKAGE, Signature.NotAMethod))
else if (compSym.exists)
comps += companion.asSeenFrom(pre, compSym.owner).asInstanceOf[TermRef]
}
def addParentScope(parent: TypeRef): Unit = {
iscopeRefs(parent) foreach addRef
for (param <- parent.typeParams)
comps ++= iscopeRefs(pre.member(param.name).info)
}
val companion = cls.companionModule
if (companion.exists) addRef(companion.valRef)
cls.classParents foreach addParentScope
}
tp.classSymbols(liftingCtx) foreach addClassScope
case _ =>
for (part <- tp.namedPartsWith(_.isType))
comps ++= iscopeRefs(part)
}
comps
}
}
def ofTypeImplicits(comps: TermRefSet) = new OfTypeImplicits(tp, comps)(ctx)
/** The implicit scope of type `tp`
* @param isLifted Type `tp` is the result of a `liftToClasses` application
*/
def iscope(tp: Type, isLifted: Boolean = false): OfTypeImplicits = {
def computeIScope(cacheResult: Boolean) = {
val savedEphemeral = ctx.typerState.ephemeral
ctx.typerState.ephemeral = false
try {
val liftedTp = if (isLifted) tp else liftToClasses(tp)
val result =
if (liftedTp ne tp) iscope(liftedTp, isLifted = true)
else ofTypeImplicits(collectCompanions(tp))
if (ctx.typerState.ephemeral) record("ephemeral cache miss: implicitScope")
else if (cacheResult) implicitScopeCache(tp) = result
result
}
finally ctx.typerState.ephemeral |= savedEphemeral
}
if (tp.hash == NotCached || !Config.cacheImplicitScopes)
computeIScope(cacheResult = false)
else implicitScopeCache get tp match {
case Some(is) => is
case None => computeIScope(cacheResult = true)
}
}
iscope(tp)
}
/** A map that counts the number of times an implicit ref was picked */
val useCount = new mutable.HashMap[TermRef, Int] {
override def default(key: TermRef) = 0
}
def clear() = implicitScopeCache.clear()
}
/** The implicit resolution part of type checking */
trait Implicits { self: Typer =>
import tpd._
override def viewExists(from: Type, to: Type)(implicit ctx: Context): Boolean = (
!from.isError
&& !to.isError
&& !ctx.isAfterTyper
&& (ctx.mode is Mode.ImplicitsEnabled)
&& from.isInstanceOf[ValueType]
&& ( from.isValueSubType(to)
|| inferView(dummyTreeOfType(from), to)
(ctx.fresh.addMode(Mode.ImplicitExploration).setExploreTyperState)
.isInstanceOf[SearchSuccess]
)
)
/** Find an implicit conversion to apply to given tree `from` so that the
* result is compatible with type `to`.
*/
def inferView(from: Tree, to: Type)(implicit ctx: Context): SearchResult = track("inferView") {
if ( (to isRef defn.AnyClass)
|| (to isRef defn.ObjectClass)
|| (to isRef defn.UnitClass)
|| (from.tpe isRef defn.NothingClass)
|| (from.tpe isRef defn.NullClass)
|| (from.tpe eq NoPrefix)) NoImplicitMatches
else
try inferImplicit(to.stripTypeVar.widenExpr, from, from.pos)
catch {
case ex: AssertionError =>
implicits.println(s"view $from ==> $to")
implicits.println(ctx.typerState.constraint.show)
implicits.println(TypeComparer.explained(implicit ctx => from.tpe <:< to))
throw ex
}
}
/** Find an implicit parameter or conversion.
* @param pt The expected type of the parameter or conversion.
* @param argument If an implicit conversion is searched, the argument to which
* it should be applied, EmptyTree otherwise.
* @param pos The position where errors should be reported.
* !!! todo: catch potential cycles
*/
def inferImplicit(pt: Type, argument: Tree, pos: Position)(implicit ctx: Context): SearchResult = track("inferImplicit") {
assert(!ctx.isAfterTyper,
if (argument.isEmpty) i"missing implicit parameter of type $pt after typer"
else i"type error: ${argument.tpe} does not conform to $pt${err.whyNoMatchStr(argument.tpe, pt)}")
val prevConstr = ctx.typerState.constraint
ctx.traceIndented(s"search implicit ${pt.show}, arg = ${argument.show}: ${argument.tpe.show}", implicits, show = true) {
assert(!pt.isInstanceOf[ExprType])
val isearch =
if (ctx.settings.explaintypes.value) new ExplainedImplicitSearch(pt, argument, pos)
else new ImplicitSearch(pt, argument, pos)
val result = isearch.bestImplicit
result match {
case result: SearchSuccess =>
result.tstate.commit()
result
case result: AmbiguousImplicits =>
val deepPt = pt.deepenProto
if (deepPt ne pt) inferImplicit(deepPt, argument, pos) else result
case _ =>
assert(prevConstr eq ctx.typerState.constraint)
result
}
}
}
/** An implicit search; parameters as in `inferImplicit` */
class ImplicitSearch(protected val pt: Type, protected val argument: Tree, pos: Position)(implicit ctx: Context) {
private def nestedContext = ctx.fresh.setMode(ctx.mode &~ Mode.ImplicitsEnabled)
private def implicitProto(resultType: Type, f: Type => Type) =
if (argument.isEmpty) f(resultType) else ViewProto(f(argument.tpe.widen), f(resultType))
// Not clear whether we need to drop the `.widen` here. All tests pass with it in place, though.
assert(argument.isEmpty || argument.tpe.isValueType || argument.tpe.isInstanceOf[ExprType],
d"found: $argument: ${argument.tpe}, expected: $pt")
/** The expected type for the searched implicit */
lazy val fullProto = implicitProto(pt, identity)
lazy val funProto = fullProto match {
case proto: ViewProto =>
FunProto(untpd.TypedSplice(dummyTreeOfType(proto.argType)) :: Nil, proto.resultType, self)
case proto => proto
}
/** The expected type where parameters and uninstantiated typevars are replaced by wildcard types */
val wildProto = implicitProto(pt, wildApprox(_))
/** Search failures; overridden in ExplainedImplicitSearch */
protected def nonMatchingImplicit(ref: TermRef): SearchFailure = NoImplicitMatches
protected def divergingImplicit(ref: TermRef): SearchFailure = NoImplicitMatches
protected def shadowedImplicit(ref: TermRef, shadowing: Type): SearchFailure = NoImplicitMatches
protected def failedSearch: SearchFailure = NoImplicitMatches
/** Search a list of eligible implicit references */
def searchImplicits(eligible: List[TermRef], contextual: Boolean): SearchResult = {
val constr = ctx.typerState.constraint
/** Try to typecheck an implicit reference */
def typedImplicit(ref: TermRef)(implicit ctx: Context): SearchResult = track("typedImplicit") { ctx.traceIndented(i"typed implicit $ref, pt = $pt, implicitsEnabled == ${ctx.mode is ImplicitsEnabled}", implicits, show = true) {
assert(constr eq ctx.typerState.constraint)
var generated: Tree = tpd.ref(ref).withPos(pos)
if (!argument.isEmpty)
generated = typedUnadapted(
untpd.Apply(untpd.TypedSplice(generated), untpd.TypedSplice(argument) :: Nil),
pt)
val generated1 = adapt(generated, pt)
lazy val shadowing =
typed(untpd.Ident(ref.name) withPos pos.toSynthetic, funProto)
(nestedContext.addMode(Mode.ImplicitShadowing).setExploreTyperState)
def refMatches(shadowing: Tree): Boolean =
ref.symbol == closureBody(shadowing).symbol || {
shadowing match {
case Trees.Select(qual, nme.apply) => refMatches(qual)
case _ => false
}
}
if (ctx.reporter.hasErrors)
nonMatchingImplicit(ref)
else if (contextual && !ctx.mode.is(Mode.ImplicitShadowing) &&
!shadowing.tpe.isError && !refMatches(shadowing)) {
implicits.println(i"SHADOWING $ref in ${ref.termSymbol.owner} is shadowed by $shadowing in ${shadowing.symbol.owner}")
shadowedImplicit(ref, methPart(shadowing).tpe)
}
else
SearchSuccess(generated1, ref, ctx.typerState)
}}
/** Given a list of implicit references, produce a list of all implicit search successes,
* where the first is supposed to be the best one.
* @param pending The list of implicit references that remain to be investigated
* @param acc An accumulator of successful matches found so far.
*/
def rankImplicits(pending: List[TermRef], acc: List[SearchSuccess]): List[SearchSuccess] = pending match {
case ref :: pending1 =>
val history = ctx.searchHistory nest wildProto
val result =
if (history eq ctx.searchHistory) divergingImplicit(ref)
else typedImplicit(ref)(nestedContext.setNewTyperState.setSearchHistory(history))
result match {
case fail: SearchFailure =>
rankImplicits(pending1, acc)
case best: SearchSuccess =>
if (ctx.mode.is(Mode.ImplicitExploration)) best :: Nil
else {
val newPending = pending1 filter (isAsGood(_, best.ref)(nestedContext.setExploreTyperState))
rankImplicits(newPending, best :: acc)
}
}
case nil => acc
}
/** If the (result types of) the expected type, and both alternatives
* are all numeric value types, return the alternative which has
* the smaller numeric subtype as result type, if it exists.
* (This alternative is then discarded).
*/
def numericValueTieBreak(alt1: SearchSuccess, alt2: SearchSuccess): SearchResult = {
def isNumeric(tp: Type) = tp.typeSymbol.isNumericValueClass
def isProperSubType(tp1: Type, tp2: Type) =
tp1.isValueSubType(tp2) && !tp2.isValueSubType(tp1)
val rpt = pt.resultType
val rt1 = alt1.ref.widen.resultType
val rt2 = alt2.ref.widen.resultType
if (isNumeric(rpt) && isNumeric(rt1) && isNumeric(rt2))
if (isProperSubType(rt1, rt2)) alt1
else if (isProperSubType(rt2, rt1)) alt2
else NoImplicitMatches
else NoImplicitMatches
}
/** Convert a (possibly empty) list of search successes into a single search result */
def condense(hits: List[SearchSuccess]): SearchResult = hits match {
case best :: alts =>
alts find (alt => isAsGood(alt.ref, best.ref)(ctx.fresh.setExploreTyperState)) match {
case Some(alt) =>
/* !!! DEBUG
println(i"ambiguous refs: ${hits map (_.ref) map (_.show) mkString ", "}")
isAsGood(best.ref, alt.ref, explain = true)(ctx.fresh.withExploreTyperState)
*/
numericValueTieBreak(best, alt) match {
case eliminated: SearchSuccess => condense(hits.filter(_ ne eliminated))
case _ => new AmbiguousImplicits(best.ref, alt.ref, pt, argument)
}
case None =>
ctx.runInfo.useCount(best.ref) += 1
best
}
case Nil =>
failedSearch
}
/** Sort list of implicit references according to their popularity
* (# of times each was picked in current run).
*/
def sort(eligible: List[TermRef]) = eligible match {
case Nil => eligible
case e1 :: Nil => eligible
case e1 :: e2 :: Nil =>
if (ctx.runInfo.useCount(e1) < ctx.runInfo.useCount(e2)) e2 :: e1 :: Nil
else eligible
case _ => eligible.sortBy(-ctx.runInfo.useCount(_))
}
condense(rankImplicits(sort(eligible), Nil))
}
/** Find a unique best implicit reference */
def bestImplicit: SearchResult = {
searchImplicits(ctx.implicits.eligible(wildProto), contextual = true) match {
case result: SearchSuccess => result
case result: AmbiguousImplicits => result
case result: SearchFailure =>
searchImplicits(implicitScope(wildProto).eligible, contextual = false)
}
}
def implicitScope(tp: Type): OfTypeImplicits = ctx.runInfo.implicitScope(tp, ctx)
}
final class ExplainedImplicitSearch(pt: Type, argument: Tree, pos: Position)(implicit ctx: Context)
extends ImplicitSearch(pt, argument, pos) {
private var myFailures = new mutable.ListBuffer[ExplainedSearchFailure]
private def record(fail: ExplainedSearchFailure) = {
myFailures += fail
fail
}
def failures = myFailures.toList
override def nonMatchingImplicit(ref: TermRef) =
record(new NonMatchingImplicit(ref, pt, argument))
override def divergingImplicit(ref: TermRef) =
record(new DivergingImplicit(ref, pt, argument))
override def shadowedImplicit(ref: TermRef, shadowing: Type): SearchFailure =
record(new ShadowedImplicit(ref, shadowing, pt, argument))
override def failedSearch: SearchFailure = {
//println(s"wildProto = $wildProto")
//println(s"implicit scope = ${implicitScope(wildProto).companionRefs}")
new FailedImplicit(failures, pt, argument)
}
}
}
/** Records the history of currently open implicit searches
* @param searchDepth The number of open searches.
* @param seen A map that records for each class symbol of a type
* that's currently searched for the complexity of the
* type that is searched for (wrt `typeSize`). The map
* is populated only once `searchDepth` is greater than
* the threshold given in the `XminImplicitSearchDepth` setting.
*/
class SearchHistory(val searchDepth: Int, val seen: Map[ClassSymbol, Int]) {
/** The number of RefinementTypes in this type, after all aliases are expanded */
private def typeSize(tp: Type)(implicit ctx: Context): Int = {
val accu = new TypeAccumulator[Int] {
def apply(n: Int, tp: Type): Int = tp match {
case tp: RefinedType =>
foldOver(n + 1, tp)
case tp: TypeRef if tp.info.isAlias =>
apply(n, tp.info.bounds.hi)
case _ =>
foldOver(n, tp)
}
}
accu.apply(0, tp)
}
/** Check for possible divergence. If one is detected return the current search history
* (this will be used as a criterion to abandon the implicit search in rankImplicits).
* If no divergence is detected, produce a new search history nested in the current one
* which records that we are now also looking for type `proto`.
*
* As long as `searchDepth` is lower than the `XminImplicitSearchDepth` value
* in settings, a new history is always produced, so the implicit search is always
* undertaken. If `searchDepth` matches or exceeds the `XminImplicitSearchDepth` value,
* we test that the new search is for a class that is either not yet in the set of
* `seen` classes, or the complexity of the type `proto` being searched for is strictly
* lower than the complexity of the type that was previously encountered and that had
* the same class symbol as `proto`. A possible divergence is detected if that test fails.
*/
def nest(proto: Type)(implicit ctx: Context): SearchHistory = {
if (searchDepth < ctx.settings.XminImplicitSearchDepth.value)
new SearchHistory(searchDepth + 1, seen)
else {
val size = typeSize(proto)
def updateMap(csyms: List[ClassSymbol], seen: Map[ClassSymbol, Int]): SearchHistory = csyms match {
case csym :: csyms1 =>
seen get csym match {
case Some(prevSize) if size >= prevSize => this
case _ => updateMap(csyms1, seen.updated(csym, size))
}
case nil =>
if (csyms.isEmpty) this
else new SearchHistory(searchDepth + 1, seen)
}
updateMap(proto.classSymbols, seen)
}
}
}
/** A set of term references where equality is =:= */
class TermRefSet(implicit ctx: Context) extends mutable.Traversable[TermRef] {
import collection.JavaConverters._
private val elems = (new java.util.LinkedHashMap[TermSymbol, List[Type]]).asScala
def += (ref: TermRef): Unit = {
val pre = ref.prefix
val sym = ref.symbol.asTerm
elems get sym match {
case Some(prefixes) =>
if (!(prefixes exists (_ =:= pre))) elems(sym) = pre :: prefixes
case None =>
elems(sym) = pre :: Nil
}
}
def ++= (refs: TraversableOnce[TermRef]): Unit =
refs foreach +=
override def foreach[U](f: TermRef => U): Unit =
for (sym <- elems.keysIterator)
for (pre <- elems(sym))
f(TermRef(pre, sym))
}
@sharable object EmptyTermRefSet extends TermRefSet()(NoContext)
| densh/dotty | src/dotty/tools/dotc/typer/Implicits.scala | Scala | bsd-3-clause | 30,063 |
package shapeless {
sealed trait HList extends Product with Serializable {
def :: (x: Any): HList = new ::(x, this)
}
final case class ::[+H, +T <: HList](head : H, tail : T) extends HList {
override def toString = head match {
case _: ::[_, _] => s"($head) :: $tail"
case _ => s"$head :: $tail"
}
}
sealed trait HNil extends HList {
override def toString = "HNil"
}
case object HNil extends HNil
}
import shapeless._
package test {
object Test {
val xs = 1 :: 2 :: Nil
val ys = (3, 4)
(xs: Any) match {
case x :: xs => ???
}
xs match {
case x :: xs => ??? // error: unreachable case
}
}
} | som-snytt/dotty | tests/neg/shapeless-hcons.scala | Scala | apache-2.0 | 637 |
package code.model
import _root_.net.liftweb.mapper._
import _root_.net.liftweb.util._
import _root_.net.liftweb.common._
import _root_.net.liftweb.sitemap.Loc._
import _root_.net.liftweb.http._
import _root_.java.math.MathContext
import _root_.scala.xml.transform._
import _root_.net.liftweb.util.Helpers._
import java.text.DateFormat
/**
* The singleton that has methods for accessing the database
*/
object User extends User with MetaMegaProtoUser[User] with LongKeyedMetaMapper[User] {
override def dbTableName = "Users" // define the DB table name
override def screenWrap = Full(<lift:surround with="frame" at="content">
<lift:bind/>
</lift:surround>)
override def signupFields = List(firstName, lastName, email, password);
override def editFields = List(firstName, lastName, email, password, sex, currentCity, hometown, birthday, aboutMe);
protected def profileFields = List(currentCity, hometown)
// define the order fields will appear in forms and output
override def fieldOrder = List(id, firstName, lastName, email, password);
// comment this line out to require email validations
override def skipEmailValidation = true;
override lazy val testLogginIn = If(loggedIn_? _, () => RedirectResponse("login"));
/**
* The LocParams for the menu item for login.
* Overwrite in order to add custom LocParams.
* Attention: Not calling super will change the default behavior!
*/
override def loginMenuLocParams: List[LocParam[Unit]] =
If(notLoggedIn_? _, S.??("already.logged.in")) ::
Template(() => wrapIt(login)) ::
Hidden ::
LocGroup("account") ::
Nil
/**
* The LocParams for the menu item for logout.
* Overwrite in order to add custom LocParams.
* Attention: Not calling super will change the default behavior!
*/
override def logoutMenuLocParams: List[LocParam[Unit]] =
Template(() => wrapIt(logout)) ::
testLogginIn ::
Hidden ::
LocGroup("account") ::
Nil
/**
* The LocParams for the menu item for creating the user/sign up.
* Overwrite in order to add custom LocParams.
* Attention: Not calling super will change the default behavior!
*/
override def createUserMenuLocParams: List[LocParam[Unit]] =
Template(() => wrapIt(signupFunc.map(_()) openOr signup)) ::
If(notLoggedIn_? _, S.??("logout.first")) ::
Hidden ::
LocGroup("account") ::
Nil
/**
* The LocParams for the menu item for lost password.
* Overwrite in order to add custom LocParams.
* Attention: Not calling super will change the default behavior!
*/
override def lostPasswordMenuLocParams: List[LocParam[Unit]] =
Template(() => wrapIt(lostPassword)) ::
If(notLoggedIn_? _, S.??("logout.first")) ::
Hidden ::
Nil
/**
* The LocParams for the menu item for resetting the password.
* Overwrite in order to add custom LocParams.
* Attention: Not calling super will change the default behavior!
*/
override def resetPasswordMenuLocParams: List[LocParam[Unit]] =
Template(() => wrapIt(passwordReset(snarfLastItem))) ::
If(notLoggedIn_? _, S.??("logout.first")) ::
Hidden ::
Nil
/**
* The LocParams for the menu item for editing the user.
* Overwrite in order to add custom LocParams.
* Attention: Not calling super will change the default behavior!
*/
override def editUserMenuLocParams: List[LocParam[Unit]] =
Template(() => wrapIt(editFunc.map(_()) openOr edit)) ::
testLogginIn ::
Hidden ::
LocGroup("account") ::
Nil
/**
* The LocParams for the menu item for changing password.
* Overwrite in order to add custom LocParams.
* Attention: Not calling super will change the default behavior!
*/
override def changePasswordMenuLocParams: List[LocParam[Unit]] =
Template(() => wrapIt(changePassword)) ::
testLogginIn ::
Hidden ::
LocGroup("account") ::
Nil
/**
* The LocParams for the menu item for validating a user.
* Overwrite in order to add custom LocParams. Attention: Not calling super will change the default behavior!
*/
override def validateUserMenuLocParams: List[LocParam[Unit]] =
Hidden ::
Template(() => wrapIt(validateUser(snarfLastItem))) ::
If(notLoggedIn_? _, S.??("logout.first")) ::
Nil
/**
* Override this method to do something else after the user signs up
*/
override def actionsAfterSignup(theUser: TheUserType, func: () => Nothing): Nothing = {
theUser.setValidated(skipEmailValidation).resetUniqueId()
theUser.save
if (!skipEmailValidation) {
sendValidationEmail(theUser)
S.notice(S.??("sign.up.message"))
func()
} else {
logUserIn(theUser, () => {
S.notice(S.??("welcome"))
func()
})
}
}
def editProfilePage = "editprofile"
override def signup = {
val theUser: TheUserType = mutateUserOnSignup(createNewUserInstance())
val theName = signUpPath.mkString("")
def testSignup() {
validateSignup(theUser) match {
case Nil =>
actionsAfterSignup(theUser, () => S.redirectTo(homePage))
case xs => S.error(xs) ; signupFunc(Full(innerSignup _))
}
}
def innerSignup = bind("user",
signupXhtml(theUser),
"submit" -> SHtml.submit(S.??("sign.up"), testSignup _))
innerSignup
}
object loginReferer extends SessionVar("/")
override def homePage = {
var ret = loginReferer.is
loginReferer.remove()
ret
}
override def login = {
for (r <- S.referer if loginReferer.is == "/") loginReferer.set(r)
super.login
}
override val basePath: List[String] = "account" :: Nil
override def signUpSuffix = "signup"
override def lostPasswordSuffix = "lostpassword"
override def passwordResetSuffix = "resetpassword"
override def changePasswordSuffix = "changepassword"
override def validateUserSuffix = "validateuser"
def findUsersLike(s: String): List[User] = {
val foundFirstNames = User.findAll(
Cmp(User.firstName, OprEnum.Like,
Full(s.toLowerCase + "%"), None, Full("LOWER")))
val foundLastNames = User.findAll(
Cmp(User.lastName, OprEnum.Like,
Full(s.toLowerCase + "%"), None, Full("LOWER")))
val foundAboutMes = User.findAll(
Cmp(User.aboutMe, OprEnum.Like,
Full(s.toLowerCase + "%"), None, Full("LOWER")))
foundFirstNames union foundLastNames union foundAboutMes distinct
}
}
/**
* An O-R mapped "User" class that includes first name, last name,
* password and we add a "Personal Essay" to it
*/
class User extends MegaProtoUser[User] with LongKeyedMapper[User] with OneToMany[Long, User] with ManyToMany {
def getSingleton = User // what's the "meta" server
object currentCity extends MappedString(this, 160) {
override def displayName = "Current City";
}
object hometown extends MappedString(this, 160) {
override def displayName = "Hometown";
}
object sex extends MappedGender(this) {
override def displayName = "I am";
}
object birthday extends MappedDate(this) {
final val dateFormat =
DateFormat.getDateInstance(DateFormat.SHORT)
override def displayName = "Birthday";
}
// define an additional field for a personal essay
object aboutMe extends MappedTextarea(this, 1024) {
override def textareaRows = 8
override def textareaCols = 70
override def displayName = "Summary"
}
object profilePicture extends LongMappedMapper(this, File) {
override def defaultValue = -1
}
/**
* Function adds a contact to a user.
* Returns 0 if contact created od 1 if contact already existed
*/
def addContact(contact: User): Int = {
val existing = Contact.findAll(By(Contact.user, this), By(Contact.contact, contact))
if (existing.length == 0) {
Contact.join(this, contact)
0
} else {
1
}
}
/**
* Removes a contact of a user.
* Returns 0 if successfull, else 1
*/
def removeContact(contact: User) : Int = {
var found = Contact.findAll(By(Contact.user, this),
By(Contact.contact, contact))
found.map(_.delete_!)
if (found.length > 0) 0 else 1
}
// gets the list of all contacts
def getContacts(): List[User] = {
var found: List[Contact] = Contact.findAll(By(Contact.user, this))
for {contact <- found
user <- contact.contact.obj}
yield user
}
def getRecommendedContacts(n: Int): List[User] = {
var contacts = User.findAll()
contacts = contacts.filterNot(_ == this)
contacts = contacts.filterNot(this.getContacts().contains(_))
contacts = contacts.sortWith(this.getMutualContacts(_).length >
this.getMutualContacts(_).length)
contacts.take(n)
}
def getMutualContacts(user: User): List[User] = {
val firstContacts: List[User] = this.getContacts()
val secContacts:List[User] = user.getContacts()
firstContacts.intersect(secContacts)
}
def isContactToUser(user: User) : Boolean = {
Contact.findAll(By(Contact.user, user),
By(Contact.contact, this)).length > 0
}
def isUsersContact(user: User) : Boolean = {
Contact.findAll(By(Contact.user, this),
By(Contact.contact, user)).length > 0
}
def getActivities() = {
Activity.findAll(By(Activity.user, this), OrderBy(Activity.time, Descending))
}
def getActivities(n: Int) = {
Activity.findAll(By(Activity.user, this),
OrderBy(Activity.time, Descending)) take n
}
def addActivity(a: Activity) = {
a.user(this)
a.save
}
def removeActivity(a: Activity) = {
a.delete_!
}
def getObservableActivities(n: Int): List[Activity] = {
val contacts = getContacts
val ret = contacts flatMap(_.getActivities) union getActivities
ret sortWith ((a, b) => a.time.is.compareTo(b.time.is) > 0) take n
}
def getIncomingPokes() = {
Poke.findAll(By(Poke.toUser, this))
}
def isPokedBy(user: User) = {
Poke.findAll(By(Poke.toUser, this), By(Poke.fromUser, user)).length > 0
}
}
| Cerovec/LiftSocial | src/main/scala/code/model/User.scala | Scala | apache-2.0 | 10,264 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.javadsl.api.broker
import javax.inject.Inject
import javax.inject.Singleton
import com.lightbend.lagom.javadsl.api.Descriptor.TopicCall
import com.lightbend.lagom.javadsl.api.broker.Topic
import play.api.inject.Injector
import scala.util.control.NonFatal
/**
* Factory for creating topics.
*
* Note: This class is useful only to create new message broker module implementations,
* and should not leak into the user api.
*/
trait TopicFactory {
def create[Message](topicCall: TopicCall[Message]): Topic[Message]
}
/**
* Provider for a topic factory.
*
* This layer of indirection is provided so that the ServiceClientImplementor doesn't have to directly depend on a
* TopicFactory, it can be optional.
*/
trait TopicFactoryProvider {
def get: Option[TopicFactory]
}
@Singleton
class InjectorTopicFactoryProvider @Inject()(injector: Injector) extends TopicFactoryProvider {
override lazy val get: Option[TopicFactory] = try {
Some(injector.instanceOf[TopicFactory])
} catch {
case NonFatal(e) => None
}
}
object NoTopicFactoryProvider extends TopicFactoryProvider {
override val get = None
}
| rcavalcanti/lagom | service/javadsl/api/src/main/scala/com/lightbend/lagom/internal/javadsl/api/broker/TopicFactory.scala | Scala | apache-2.0 | 1,242 |
/**
* Copyright (c) 2013-2016 Extended Mind Technologies Oy
*
* This file is part of Extended Mind.
*
* Extended Mind is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.extendedmind.api.test
import java.io.PrintWriter
import java.util.UUID
import org.extendedmind._
import org.extendedmind.bl._
import org.extendedmind.db._
import org.extendedmind.domain._
import org.extendedmind.security._
import org.extendedmind.email._
import org.extendedmind.test._
import org.extendedmind.test.TestGraphDatabase._
import org.mockito.Mockito._
import org.mockito.Matchers._
import org.mockito.Matchers.{ eq => mockEq }
import scaldi.Module
import spray.http.BasicHttpCredentials
import spray.http.HttpHeaders.Authorization
import org.zeroturnaround.zip.ZipUtil
import java.io.File
import org.zeroturnaround.zip.FileUtil
import org.apache.commons.io.FileUtils
import org.extendedmind.api.JsonImplicits._
import spray.httpx.SprayJsonSupport._
import spray.httpx.marshalling._
import spray.json.DefaultJsonProtocol._
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import java.util.concurrent.TimeUnit
abstract class ServiceSpecBase extends ImpermanentGraphDatabaseSpecBase {
implicit val timeout = RouteTestTimeout(Duration(10, TimeUnit.SECONDS))
def emailPasswordAuthenticate(email: String, password: String): SecurityContext = {
Post("/v2/users/authenticate") ~> addHeader(Authorization(BasicHttpCredentials(email, password))) ~> route ~> check {
responseAs[SecurityContext]
}
}
def emailPasswordAuthenticateRememberMe(email: String, password: String): SecurityContext = {
Post("/v2/users/authenticate", marshal(AuthenticatePayload(true, None)).right.get) ~> addHeader(Authorization(BasicHttpCredentials(email, password))) ~> route ~> check {
responseAs[SecurityContext]
}
}
def putNewItem(newItem: Item, authenticateResponse: SecurityContext): SetResult = {
Put("/v2/owners/" + authenticateResponse.userUUID + "/data/items",
marshal(newItem).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def putNewNote(newNote: Note, authenticateResponse: SecurityContext, foreignOwnerUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (foreignOwnerUUID.isDefined) foreignOwnerUUID.get else authenticateResponse.userUUID
Put("/v2/owners/" + ownerUUID + "/data/notes",
marshal(newNote).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def putExistingNote(existingNote: Note, noteUUID: UUID, authenticateResponse: SecurityContext, foreignOwnerUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (foreignOwnerUUID.isDefined) foreignOwnerUUID.get else authenticateResponse.userUUID
Put("/v2/owners/" + ownerUUID + "/data/notes/" + noteUUID.toString(),
marshal(existingNote).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def publishNoteCC(noteUUID: UUID, path: String, index: Boolean, authenticateResponse: SecurityContext, foreignOwnerUUID: Option[UUID] = None): PublishNoteResult = {
val ownerUUID = if (foreignOwnerUUID.isDefined) foreignOwnerUUID.get else authenticateResponse.userUUID
Post("/v2/owners/" + ownerUUID + "/data/notes/" + noteUUID + "/publish",
marshal(PublishPayload("md", path, Some(LicenceType.CC_BY_SA_4_0.toString), if (index) Some(true) else None, None, None))) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[PublishNoteResult]
}
}
def deleteNote(noteUUID: UUID, authenticateResponse: SecurityContext, foreignUUID: Option[UUID] = None): DeleteItemResult = {
val ownerUUID = if (foreignUUID.isDefined) foreignUUID.get else authenticateResponse.userUUID
Delete("/v2/owners/" + ownerUUID + "/data/notes/" + noteUUID) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[DeleteItemResult]
}
}
def undeleteNote(noteUUID: UUID, authenticateResponse: SecurityContext, foreignUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (foreignUUID.isDefined) foreignUUID.get else authenticateResponse.userUUID
Post("/v2/owners/" + ownerUUID + "/data/notes/" + noteUUID + "/undelete") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def putNewTask(newTask: Task, authenticateResponse: SecurityContext, foreignOwnerUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (foreignOwnerUUID.isDefined) foreignOwnerUUID.get else authenticateResponse.userUUID
Put("/v2/owners/" + ownerUUID + "/data/tasks",
marshal(newTask).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def putExistingTask(existingTask: Task, taskUUID: UUID, authenticateResponse: SecurityContext,
foreignOwnerUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (foreignOwnerUUID.isDefined) foreignOwnerUUID.get else authenticateResponse.userUUID
Put("/v2/owners/" + ownerUUID + "/data/tasks/" + taskUUID.toString(),
marshal(existingTask).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def deleteTask(taskUUID: UUID, authenticateResponse: SecurityContext, foreignUUID: Option[UUID] = None): DeleteItemResult = {
val ownerUUID = if (foreignUUID.isDefined) foreignUUID.get else authenticateResponse.userUUID
Delete("/v2/owners/" + ownerUUID + "/data/tasks/" + taskUUID) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[DeleteItemResult]
}
}
def undeleteTask(taskUUID: UUID, authenticateResponse: SecurityContext, foreignUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (foreignUUID.isDefined) foreignUUID.get else authenticateResponse.userUUID
Post("/v2/owners/" + ownerUUID + "/data/tasks/" + taskUUID + "/undelete") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def putNewList(newList: List, authenticateResponse: SecurityContext, collectiveUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (collectiveUUID.isDefined) collectiveUUID.get else authenticateResponse.userUUID
Put("/v2/owners/" + ownerUUID + "/data/lists",
marshal(newList).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def getList(listUUID: UUID, authenticateResponse: SecurityContext): List = {
Get("/v2/owners/" + authenticateResponse.userUUID + "/data/lists/" + listUUID) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[List]
}
}
def putExistingList(existingList: List, listUUID: UUID, authenticateResponse: SecurityContext,
collectiveUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (collectiveUUID.isDefined) collectiveUUID.get else authenticateResponse.userUUID
Put("/v2/owners/" + ownerUUID + "/data/lists/" + listUUID.toString(),
marshal(existingList).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def putNewTag(newTag: Tag, authenticateResponse: SecurityContext, collectiveUUID: Option[UUID] = None): SetResult = {
val ownerUUID = if (collectiveUUID.isDefined) collectiveUUID.get else authenticateResponse.userUUID
Put("/v2/owners/" + ownerUUID + "/data/tags",
marshal(newTag).right.get) ~> addHeader("Content-Type", "application/json") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[SetResult]
}
}
def getItem(itemUUID: UUID, authenticateResponse: SecurityContext): Item = {
Get("/v2/owners/" + authenticateResponse.userUUID + "/data/items/" + itemUUID) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[Item]
}
}
def getTask(taskUUID: UUID, authenticateResponse: SecurityContext, collectiveUUID: Option[UUID] = None): Task = {
val ownerUUID = if (collectiveUUID.isDefined) collectiveUUID.get else authenticateResponse.userUUID
Get("/v2/owners/" + ownerUUID + "/data/tasks/" + taskUUID) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[Task]
}
}
def getNote(noteUUID: UUID, authenticateResponse: SecurityContext): Note = {
Get("/v2/owners/" + authenticateResponse.userUUID + "/data/notes/" + noteUUID) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[Note]
}
}
def getTag(tagUUID: UUID, authenticateResponse: SecurityContext): Tag = {
Get("/v2/owners/" + authenticateResponse.userUUID + "/data/tags/" + tagUUID) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[Tag]
}
}
def getUserUUID(email: String, authenticateResponse: SecurityContext): UUID = {
Get("/v2/users?email=" + email) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
responseAs[PublicUser].uuid
}
}
def getItemRevisionList(itemUUID: UUID, authenticateResponse: SecurityContext, jsonOutputName: Option[String] = None): ItemRevisions = {
Get("/v2/owners/" + authenticateResponse.userUUID + "/data/" + itemUUID + "/revisions") ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
if (jsonOutputName.isDefined) writeJsonOutput(jsonOutputName.get, responseAs[String])
responseAs[ItemRevisions]
}
}
def getItemRevision(itemUUID: UUID, revisionNumber: Long, authenticateResponse: SecurityContext, jsonOutputName: Option[String] = None): ExtendedItemChoice = {
Get("/v2/owners/" + authenticateResponse.userUUID + "/data/" + itemUUID + "/revision/" + revisionNumber) ~> addCredentials(BasicHttpCredentials("token", authenticateResponse.token.get)) ~> route ~> check {
if (jsonOutputName.isDefined) writeJsonOutput(jsonOutputName.get, responseAs[String])
responseAs[ExtendedItemChoice]
}
}
def isEmptyItems(items: Items): Boolean = {
return items.items.isEmpty && items.tasks.isEmpty && items.notes.isEmpty && items.lists.isEmpty && items.tags.isEmpty
}
// Helper file writer
def writeJsonOutput(filename: String, contents: String): Unit = {
Some(new PrintWriter(db.TEST_DATA_DESTINATION + "/" + filename + ".json")).foreach { p => p.write(contents); p.close }
}
}
| ttiurani/extendedmind | backend/src/test/scala/org/extendedmind/api/test/ServiceSpecBase.scala | Scala | agpl-3.0 | 12,238 |
package scorex.account
import scorex.crypto.EllipticCurveImpl
case class PrivateKeyAccount(seed: Array[Byte], privateKey: Array[Byte], override val publicKey: Array[Byte])
extends PublicKeyAccount(publicKey) {
override val address = Account.fromPublicKey(publicKey)
def this(seed: Array[Byte], keyPair: (Array[Byte], Array[Byte])) = this(seed, keyPair._1, keyPair._2)
def this(seed: Array[Byte]) = this(seed, EllipticCurveImpl.createKeyPair(seed))
}
| ScorexProject/Scorex-Lagonaki | scorex-basics/src/main/scala/scorex/account/PrivateKeyAccount.scala | Scala | cc0-1.0 | 463 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box.formats
import play.api.libs.json._
import uk.gov.hmrc.ct.box.CtBigDecimal
class BigDecimalFormat[T <: CtBigDecimal](builder: (BigDecimal => T)) extends Format[T] {
override def reads(json: JsValue): JsResult[T] = {
JsSuccess(builder(json.as[BigDecimal]))
}
override def writes(out: T): JsValue = {
Json.toJson[BigDecimal](out.value)
}
}
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/box/formats/BigDecimalFormat.scala | Scala | apache-2.0 | 996 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.content
import org.scalatest.{FunSpec, Matchers}
import play.api.data.validation.ValidationError
import play.api.libs.json._
import com.ibm.spark.kernel.protocol.v5.Data
class CommMsgSpec extends FunSpec with Matchers {
val commMsgJson: JsValue = Json.parse("""
{
"comm_id": "<UUID>",
"data": {}
}
""")
val commMsg = CommMsg(
"<UUID>", Data()
)
describe("CommMsg") {
describe("implicit conversions") {
it("should implicitly convert from valid json to a CommMsg instance") {
// This is the least safe way to convert as an error is thrown if it fails
commMsgJson.as[CommMsg] should be (commMsg)
}
it("should also work with asOpt") {
// This is safer, but we lose the error information as it returns
// None if the conversion fails
val newCompleteRequest = commMsgJson.asOpt[CommMsg]
newCompleteRequest.get should be (commMsg)
}
it("should also work with validate") {
// This is the safest as it collects all error information (not just first error) and reports it
val CompleteRequestResults = commMsgJson.validate[CommMsg]
CompleteRequestResults.fold(
(invalid: Seq[(JsPath, Seq[ValidationError])]) => println("Failed!"),
(valid: CommMsg) => valid
) should be (commMsg)
}
it("should implicitly convert from a CommMsg instance to valid json") {
Json.toJson(commMsg) should be (commMsgJson)
}
}
}
}
| bpburns/spark-kernel | protocol/src/test/scala/com/ibm/spark/kernel/protocol/v5/content/CommMsgSpec.scala | Scala | apache-2.0 | 2,132 |
/**
Copyright (C) 2011-2014 beamly Ltd. http://beamly.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
**/
package beamly.core.lang.future
import scala.concurrent.Future
import org.specs2.mutable.Specification
class FutureOptionWTest extends Specification {
"future option" should {
"map some values" in {
Future(Some(2)).mapOption(_ + 1).get must beSome(3)
}
"map none values" in {
val opt: Option[Int] = None
Future(opt).mapOption(_ + 1).get must beNone
}
"flatMap some values" in {
Future(Some(2)).flatMapOption(i => Future(Some(i + 1))).get must beSome(3)
}
"flatMap none values" in {
val opt: Option[Int] = None
Future(opt).flatMapOption(i => Future(Some(i + 1))).get must beNone
}
"return the result from the first future if this future returns something" in {
val first = Future successful Some(1)
val second = Future successful Some(2)
(first orElse second).get() must beSome(1)
}
"return the result from another future if this future returns None" in {
val first = Future successful None
val second = Future successful Some(2)
(first orElse second).get() must beSome(2)
}
"return None from another future if this future returns None" in {
val first = Future successful None
val second = Future successful None
(first orElse second).get() must beNone
}
}
}
| beamly/beamly.core.lang | src/test/scala/beamly/core/lang/future/FutureOptionWTest.scala | Scala | apache-2.0 | 1,935 |
package com.bfm.topnotch.tnengine
import java.io.{PrintWriter, StringWriter}
import org.json4s._
import org.json4s.native.Serialization
import org.json4s.native.Serialization.writePretty
/**
* A command for TnEngine to run
*/
abstract class TnCmd {
/** The key to use to store the resulting dataframe in the lookup table */
val outputKey: String
/** Whether to cache the resulting dataframe in memory. This should be a boolean defaulting to false,
* but json4s has a problem with default values other than None for option. Change it to a default value if json4s
* solves the bug. */
val cache: Option[Boolean]
/** If writing the output to disk, the path to write to on hdfs, otherwise none */
val outputPath: Option[String]
/** If writing the output in hdfs, the name of the table to mount, otherwise none. Note: this will be ignored if
* outputPath is not specified. */
val tableName: Option[String]
implicit val formats = Serialization.formats(NoTypeHints)
/**
* Overriding toString to making output of unit tests that have cmds in error logs easier to understand
*/
override def toString = writePretty(this)
}
/**
* The input to a command
* @param ref The reference to the data set, either the path on hdfs or the name in the lookup table
* @param onDisk Whether the input data set is stored on disk
* @param delimiter The delimiter for plain text, delimited files. Leave to empty string for parquet.
*/
case class Input(ref: String, onDisk: Boolean, delimiter: Option[String] = None)
/**
* The strings used for converting a config file into a TnCmd
*/
object TnCmdStrings {
val ioNamespace = "io"
val commandListStr = "commands"
val writerStr = "writer"
val commandStr = "command"
val paramsStr = "params"
val externalParamsStr = "externalParamsFile"
val outputKeyStr = "outputKey"
val writeToDiskStr = "writeToDisk"
val outputPathStr = "outputPath"
}
/**
* The class indicating that there was at least one error in the configuration for this command
* @param cmdString The JSON string for the command.
* @param errorStr The errors encountered in creating this command.
* @param cmdIdx The index of the command in the plan that failed
* @param outputKey This is meaningless in this class. This exists only so that TnErrorCmd can extend TnCmd.
* @param writeToDisk This is meaningless in this class. This exists only so that TnErrorCmd can extend TnCmd.
* @param outputPath This is meaningless in this class. This exists only so that TnErrorCmd can extend TnCmd.
*/
case class TnErrorCmd (
cmdString: String,
errorStr: String,
cmdIdx: Int,
outputKey: String = "",
cache: Option[Boolean] = None,
writeToDisk: Boolean = false,
outputPath: Option[String] = None,
tableName: Option[String] = None
) extends TnCmd {
override def toString: String = {
s"There was an error with the command in position ${cmdIdx} in its plan. The command was: \n ${cmdString} \n " +
s"The message was: \n ${errorStr} \n\n END OF ERROR MESSAGE FOR COMMAND IN POSITION ${cmdIdx} \n\n"
}
}
object TnErrorCmd {
/**
* Helper method for easily getting the stack trace of an exception as a string
* @param e The exception
* @return The exception's stack trace
*/
def getExceptionStackTrace(e: Exception): String = {
val sw = new StringWriter
e.printStackTrace(new PrintWriter(sw))
sw.toString
}
} | blackrock/TopNotch | src/main/scala/com/bfm/topnotch/tnengine/TnCmd.scala | Scala | apache-2.0 | 3,742 |
package scalaz.stream
import java.nio.BufferOverflowException
import org.scalacheck._
import Prop._
import scalaz.concurrent.Task
import scalaz.stream.Process._
import scalaz.stream.text.{LengthExceeded, lines}
class LinesSpec extends Properties("text") {
val samples = 0 until 5 flatMap { i => List("\\r\\n", "\\n").map { s =>
"Hello&World.&Foo&Bar&".replace("&", s*i)
}
}
// behavior should be identical to that of scala.io.Source
def checkLine(s: String): Boolean = {
val source = scala.io.Source.fromString(s).getLines().toList
emitAll(s.toCharArray.map(_.toString)).pipe(lines()).toList == source &&
emit(s).pipe(lines()).toList == source
}
property("lines()") = secure {
samples.forall(checkLine)
}
property("lines(n) should fail for lines with length greater than n") = secure {
val error = classOf[LengthExceeded]
emit("foo\\nbar").pipe(lines(3)).toList == List("foo", "bar") && // OK input
Process("foo\\n", "bar").pipe(lines(3)).toList == List("foo", "bar") && // OK input
Process("foo", "\\nbar").pipe(lines(3)).toList == List("foo", "bar") && // OK input
throws(error){ emit("foo").pipe(lines(2)).run[Task].run } &&
throws(error){ emit("foo\\nbarr").pipe(lines(3)).run[Task].run } &&
throws(error){ emit("fooo\\nbar").pipe(lines(3)).run[Task].run }
}
property("lines(n) can recover from lines longer than n") = {
import Gen._
val stringWithNewlinesGen: Gen[String] =
listOf(frequency((5, alphaChar), (1, oneOf('\\n', '\\r')))).map(_.mkString)
def rmWhitespace(s: String): String = s.replaceAll("\\\\s", "")
forAll(listOf(stringWithNewlinesGen)) { xs: List[String] =>
val stripped = rmWhitespace(xs.mkString)
val maxLength = Gen.choose(1, stripped.length).sample.getOrElse(1)
val nonFailingLines = lines(maxLength).onFailure {
case LengthExceeded(_, s) => emitAll(s.grouped(maxLength).toList)
}.repeat
val allLines = emitAll(xs).pipe(nonFailingLines).toList
allLines.forall(_.length <= maxLength) &&
rmWhitespace(allLines.mkString) == stripped
}
}
}
| refried/scalaz-stream | src/test/scala/scalaz/stream/LinesSpec.scala | Scala | mit | 2,132 |
package tap.ast
import scala.util.parsing.input.Positional
/**
* An extension of the Positional trait to include a filename along side source position information.
*/
trait FilePositional extends Positional {
/**
* The file this object came from.
*/
var file: String = null
/**
* Sets the source file this object came from.
*/
def setFile(newFile: String): this.type = {
file = newFile
this
}
/**
* Sets the source file and position within the file this object came from.
*/
def setFilePosFrom(fp: FilePositional): this.type = {
file = fp.file
pos = fp.pos
this
}
}
object NullFilePosition extends FilePositional | garyb/tap | src/main/scala/tap/ast/FilePositional.scala | Scala | mit | 722 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kafka.utils
import org.opengis.feature.simple.SimpleFeature
sealed trait GeoMessage
object GeoMessage {
/**
* Creates a `Clear` message with the current time
*
* @return
*/
def clear(): Clear = Clear
/**
* Creates a `Delete` message with the current time
*
* @param id feature id being deleted
* @return
*/
def delete(id: String): GeoMessage = Delete(id)
/**
* Creates a `Change` message with the current time
*
* @param sf simple feature being added/updated
* @return
*/
def change(sf: SimpleFeature): GeoMessage = Change(sf)
/**
* Message indicating a feature has been added/updated
*
* @param feature feature being added/updated
*/
case class Change(feature: SimpleFeature) extends GeoMessage
/**
* Message indicating a feature has been deleted
*
* @param id feature id of the feature being deleted
*/
case class Delete(id: String) extends GeoMessage
/**
* Message indicating all features have been deleted
*
*/
trait Clear extends GeoMessage
case object Clear extends Clear
}
| ddseapy/geomesa | geomesa-kafka/geomesa-kafka-datastore/src/main/scala/org/locationtech/geomesa/kafka/utils/GeoMessage.scala | Scala | apache-2.0 | 1,614 |
package spinoco.protocol.mail.header
import org.scalacheck.Prop.protect
import org.scalacheck.Properties
import spinoco.protocol.mail.EmailAddress
/**
* Created by pach on 23/10/17.
*/
object ResentFromSpec extends Properties("ResentFrom") {
import spinoco.protocol.mail.SpecUtil._
implicit val HeaderCodec = `Resent-From`.codec
property("single-email") = protect {
verify(
"John Doe <[email protected]>"
, `Resent-From`(EmailAddress("john.doe", "spinoco.com", Some("John Doe")), Nil)
, "\\"John Doe\\" <[email protected]>"
)
}
property("multiple-email") = protect {
verify(
"\\"John Doe\\" <[email protected]>, [email protected]"
, `Resent-From`(EmailAddress("john.doe", "spinoco.com", Some("John Doe")), List(EmailAddress("jannet.doe", "spinoco.com", None)))
, "\\"John Doe\\" <[email protected]>,\\r\\n [email protected]"
)
}
}
| Spinoco/protocol | mail/src/test/scala/spinoco/protocol/mail/header/ResentFromSpec.scala | Scala | mit | 922 |
/*
val input = List(1,2,3,0,4)
val output = List(0,3,2,1,4)
*/
def garage(input:List[Int], output:List[Int]): List[List[Int]] = {
var result = List[List[Int]]()
var step = for (i <- input) yield i
while (step != output){
val ideal = step.map(output.indexOf)
val zeroPos = step.indexOf(0)
val benefit = List.range(0, input.length).map(i => math.abs(i - ideal(i)) - math.abs(zeroPos - ideal(i)))
val bestPos = benefit.zipWithIndex.filter(_._2 != zeroPos).max._2
step = step.map(x => x match{
case _ if x == 0 => step(bestPos)
case _ if x == step(bestPos) => 0
case _ => x
})
result = result :+ step
}
return result
} | marcosfede/algorithms | array/garage/garage.scala | Scala | gpl-3.0 | 727 |
package doobie.enum
import doobie.util.invariant._
import doobie.enum.{ nullability => N }
import java.sql.ParameterMetaData._
import scalaz.Equal
import scalaz.std.anyVal.intInstance
object parameternullable {
/** @group Implementation */
sealed abstract class ParameterNullable(val toInt: Int) {
def toNullability: N.Nullability =
N.Nullability.fromParameterNullable(this)
}
/** @group Values */ case object NoNulls extends ParameterNullable(parameterNoNulls)
/** @group Values */ case object Nullable extends ParameterNullable(parameterNullable)
/** @group Values */ case object NullableUnknown extends ParameterNullable(parameterNullableUnknown)
/** @group Implementation */
object ParameterNullable {
def fromInt(n:Int): Option[ParameterNullable] =
Some(n) collect {
case NoNulls.toInt => NoNulls
case Nullable.toInt => Nullable
case NullableUnknown.toInt => NullableUnknown
}
def fromNullability(n: N.Nullability): ParameterNullable =
n match {
case N.NoNulls => NoNulls
case N.Nullable => Nullable
case N.NullableUnknown => NullableUnknown
}
def unsafeFromInt(n: Int): ParameterNullable =
fromInt(n).getOrElse(throw InvalidOrdinal[ParameterNullable](n))
implicit val EqualParameterNullable: Equal[ParameterNullable] =
Equal.equalBy(_.toInt)
}
} | rperry/doobie | core/src/main/scala/doobie/enum/parameternullable.scala | Scala | mit | 1,438 |
package com.twitter.finagle.redis
import com.twitter.finagle.partitioning.ConsistentHashPartitioningService
import com.twitter.finagle.partitioning.ConsistentHashPartitioningService.NoPartitioningKeys
import com.twitter.finagle.partitioning.PartitioningService.PartitionedResults
import com.twitter.finagle.partitioning.param.NumReps
import com.twitter.finagle.redis.param.RedisKeyHasher
import com.twitter.finagle.redis.protocol.{Command, Reply, StatusReply}
import com.twitter.finagle.redis.util.{BufToString, ReplyFormat}
import com.twitter.finagle.util.DefaultLogger
import com.twitter.finagle.{ServiceFactory, Stack, Stackable}
import com.twitter.hashing
import com.twitter.io.Buf
import com.twitter.logging.Level
import com.twitter.util.Future
import scala.collection.{Set => SSet}
private[finagle] object RedisPartitioningService {
private[finagle] class UnsupportedCommand(msg: String) extends Exception(msg)
private[finagle] class UnsupportedBatchCommand(msg: String) extends Exception(msg)
private[finagle] class UnsupportedReply(msg: String) extends Exception(msg)
private[finagle] class FailedPartitionedCommand(msg: String = null, t: Throwable = null)
extends Exception(msg, t)
private[finagle] val role = Stack.Role("RedisPartitioning")
private[finagle] val description =
"Partitioning Service based on a consistent hash ring for the redis protocol"
def module: Stackable[ServiceFactory[Command, Reply]] =
new ConsistentHashPartitioningService.Module[Command, Reply, Buf] {
override val role: Stack.Role = RedisPartitioningService.role
override val description: String = RedisPartitioningService.description
def newConsistentHashPartitioningService(
underlying: Stack[ServiceFactory[Command, Reply]],
params: Stack.Params
): ConsistentHashPartitioningService[Command, Reply, Buf] = {
val RedisKeyHasher(hasher) = params[RedisKeyHasher]
val NumReps(numReps) = params[NumReps]
new RedisPartitioningService(
underlying,
params,
hasher,
numReps
)
}
}
private val StatusOK = StatusReply("OK")
}
private[finagle] class RedisPartitioningService(
underlying: Stack[ServiceFactory[Command, Reply]],
params: Stack.Params,
keyHasher: hashing.KeyHasher = hashing.KeyHasher.MURMUR3,
numReps: Int = NumReps.Default)
extends ConsistentHashPartitioningService[Command, Reply, Buf](
underlying,
params,
keyHasher,
numReps
) {
import RedisPartitioningService._
import com.twitter.finagle.redis.protocol._
private[this] val logger = DefaultLogger
final override protected def getKeyBytes(key: Buf): Array[Byte] =
Buf.ByteArray.Owned.extract(key)
private[this] def unsupportedCommand(cmd: Command): Nothing = {
val msg = s"Unsupported command: $cmd"
if (logger.isLoggable(Level.DEBUG))
logger.log(Level.DEBUG, msg)
throw new UnsupportedCommand(msg)
}
private[this] def unsupportedReply(reply: Reply): Nothing = {
val msg = s"UnsupportedReply: $reply"
if (logger.isLoggable(Level.DEBUG))
logger.log(Level.DEBUG, msg)
throw new UnsupportedReply(msg)
}
protected def getPartitionKeys(command: Command): Seq[Buf] =
command match {
// the following commands assume talking to a single redis server,
// this is incompatible with key based routing.
case keys: Keys => unsupportedCommand(keys)
case migrate: Migrate => unsupportedCommand(migrate)
case select: Select => unsupportedCommand(select)
case scan: Scan => unsupportedCommand(scan)
case randomkey: Randomkey.type => unsupportedCommand(randomkey)
// eval operations could possibly be supported, however the fact that the
// reply must be cast to a particular type makes it non-obvoius how we should
// handle the results in mergeResponse. Leaving it as a TODO.
case eval: Eval => unsupportedCommand(eval)
case evalSha: EvalSha => unsupportedCommand(evalSha)
// mSetNx is unsupported because we cannot guarantee the atomicity of this operation across
// multiple servers
case mSetNx: MSetNx => unsupportedCommand(mSetNx)
case _: Ping.type => Seq(Buf.Empty)
case kc: KeyCommand => Seq(kc.key)
case kcs: KeysCommand => kcs.keys
case _ => unsupportedCommand(command)
}
protected def createPartitionRequestForKeys(command: Command, pKeys: Seq[Buf]): Command =
command match {
case c: PFCount => c.copy(keys = pKeys)
case d: Del => d.copy(keys = pKeys)
case s: SInter => s.copy(keys = pKeys)
case m: MGet => m.copy(keys = pKeys)
case m: MSet => m.copy(kv = m.kv.filterKeys(pKeys.toSet).toMap)
case _ => unsupportedCommand(command)
}
// this is only called for partitioned commands, i.e. the commands referred to in
// createPartitionRequestForKeys
protected override def mergeResponses(
originalReq: Command,
pr: PartitionedResults[Command, Reply]
): Reply = {
if (pr.failures.nonEmpty) {
if (logger.isLoggable(Level.DEBUG)) {
logger.log(Level.DEBUG, "failures in bulk reply")
for {
(cmd, t) <- pr.failures
} {
logger.log(Level.DEBUG, s"Command: $cmd", t)
}
}
pr.failures.head match {
case (_, t) =>
throw new FailedPartitionedCommand("Partitioned command failed, first error is", t)
}
}
// if we get here, there are no failures in results
originalReq match {
case _: Ping.type => NoReply
case _: PFCount | _: Del =>
IntegerReply(
pr.successes.map {
case (_, IntegerReply(n)) => n
case (_, reply) => unsupportedReply(reply)
}.sum
)
case MGet(keys) =>
val resultsMap: Map[Buf, Reply] =
pr.successes.flatMap {
case (MGet(pkeys), MBulkReply(messages)) => pkeys.zip(messages)
case (_, rep) => unsupportedReply(rep)
}.toMap
// we map the keys over the results because the order of results should match
// the order of the supplied keys
MBulkReply(keys.map(resultsMap).toList)
case _: SInter => aggregateSetIntersection(pr.successes.map(_._2))
// from the redis documentation:
// > Simple string reply: always OK since MSET can't fail.
case _: MSet => StatusOK
case wat => unsupportedCommand(wat)
}
}
// Since the set intersection command (SInter) may be split across multiple partitions
// it's necessary to perform some post-processing in this step. This function takes the
// results from the various partitions and reduces them using the Set.intersect method
// and returns the result
private[this] def aggregateSetIntersection(reps: Seq[Reply]): Reply = {
val sets =
reps.map {
case MBulkReply(messages) => ReplyFormat.toBuf(messages).toSet
case EmptyMBulkReply => SSet.empty[Buf]
case rep => unsupportedReply(rep)
}
if (sets.isEmpty) {
EmptyMBulkReply
} else {
val reduced = sets.reduce(_ intersect _)
if (reduced.isEmpty) EmptyMBulkReply
else MBulkReply(reduced.toList.map(BulkReply))
}
}
final protected def noPartitionInformationHandler(req: Command): Future[Nothing] = {
val ex = new NoPartitioningKeys(
s"NoPartitioningKeys in for the thrift method: ${BufToString(req.name)}")
if (logger.isLoggable(Level.DEBUG))
logger.log(Level.DEBUG, "partitionRequest failed: ", ex)
Future.exception(ex)
}
}
| twitter/finagle | finagle-redis/src/main/scala/com/twitter/finagle/redis/RedisPartitioningService.scala | Scala | apache-2.0 | 7,622 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.samples
class Greeter {
def greet(person: Person) =
println("Hello ${person.name}");
}
| HenryHarper/Acquire-Reboot | gradle/samples/jvmComponents/scala/src/main/scala/org/gradle/samples/Greeter.scala | Scala | mit | 730 |
package edu.gemini.qv.plugin.util
import java.awt.Desktop
import java.awt.print.{PageFormat, PrinterException, PrinterJob}
import java.io.{File, PrintWriter}
import javax.swing.JTable
import javax.swing.JTable.PrintMode
import javax.swing.table.TableColumn
import edu.gemini.qv.plugin.table.ObservationTableModel.{DecValue, RaValue, TimeValue}
import edu.gemini.qv.plugin.table.renderer.EncodedObservationsRenderer.TextPane
import edu.gemini.qv.plugin.ui.QvGui
import edu.gemini.qv.plugin.ui.QvGui.ActionButton
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.swing.{Button, Label, TextArea}
/**
*/
object Exporter {
def print(table: JTable, hiddenHeaders: Option[Seq[TableColumn]] = None): Button = ActionButton(
"Print...",
"Prints this table scaling its size to fit the width of a page.",
() => doWithHiddenHeaders(table, hiddenHeaders) {
Exporter.print(table, PageFormat.PORTRAIT)
}
)
def printLandscape(table: JTable, hiddenHeaders: Option[Seq[TableColumn]] = None): Button = ActionButton(
"Print (Landscape)...",
"Prints this table using paper landscape orientation, scaling its size to fit the width of a page.",
() => doWithHiddenHeaders(table, hiddenHeaders) {
Exporter.print(table, PageFormat.LANDSCAPE)
}
)
def exportXls(table: JTable, hiddenHeaders: Option[Seq[TableColumn]] = None): Button = ActionButton(
"Open in Spreadsheet...",
"Opens this table with the application that is configured for xls files.",
() => doWithHiddenHeaders(table, hiddenHeaders) {
Exporter.openAsXls(table)
}
)
def exportHtml(table: JTable, hiddenHeaders: Option[Seq[TableColumn]] = None): Button = ActionButton(
"Open in Browser...",
"Opens this table with the application that is configured for html files.",
() => doWithHiddenHeaders(table, hiddenHeaders) {
Exporter.openAsHtml(table)
}
)
// === Deal with hidden headers that need to be printed..
/**
* In case there are hidden header columns which need to be printed we must add them before printing the
* table and then remove them again. That is a bit cumbersome, but it allows to use the Swing table
* printing even for the setup where we have the table row headers (Observation ID) in a separate table.
* @param table
* @param hiddenHeaders
* @param fn
* @tparam T
* @return
*/
private def doWithHiddenHeaders[T](table: JTable, hiddenHeaders: Option[Seq[TableColumn]])(fn: => T) {
hiddenHeaders.foreach(showHeaders(table, _))
fn
hiddenHeaders.foreach(hideHeaders(table, _))
}
private def showHeaders(table: JTable, hidden: Seq[TableColumn]) =
hidden.zipWithIndex.foreach { case (c, ix) =>
table.addColumn(c)
table.moveColumn(table.getColumnCount - 1, ix)
}
private def hideHeaders(table: JTable, hidden: Seq[TableColumn]) =
hidden.foreach(c => table.removeColumn(c))
// ===========
private def print(table: JTable, orientation: Int): Unit = {
val job = PrinterJob.getPrinterJob
// on MacOSx the native print dialog allows not to set the paper orientation (??)
val pageFormat = job.defaultPage()
pageFormat.setOrientation(orientation)
job.setPrintable(table.getPrintable(PrintMode.FIT_WIDTH, null, null), pageFormat)
// use the native print dialog, this allows users to print PDFs which is often useful
val ok = job.printDialog()
if (ok) {
try {
job.print()
} catch {
case e: PrinterException => QvGui.showError("Printing Failed", "Could not print data.", e)
}
}
}
private def openAsXls(table: JTable) = openAs(table, "xls")
private def openAsHtml(table: JTable) = openAs(table, "html")
private def openAs(table: JTable, format: String): Unit = {
val busy = QvGui.showBusy("Opening Data", "Opening table in external application...")
val header = headers(table)
val data = values(table)
val file = toTable(header, data, format)
Future {
Desktop.getDesktop.open(file)
} andThen {
case _ => busy.done()
} onFailure {
case t: Throwable => QvGui.showError("Open Data Failed", s"Could not open data as $format.", t)
}
}
private def toTable(header: Vector[String], data: Vector[Vector[AnyRef]], format: String): File = {
val file = File.createTempFile("openAsFile", s".${format}")
val out = new PrintWriter(file)
out.append("""
|<html>
| <head>
| <style type="text/css">
| .text{
| mso-number-format:"\\@"; <!-- Excel: keep excel from being "clever" and interpret values -->
| mso-data-placement:same-cell; <!-- Excel: keep values with line breaks in single cell -->
| }
| </style>
| </head>
| <body>
| <table>
| <thead>
| <tr>
""".stripMargin)
for (h <- 0 to header.size - 1) {
out.append(" <td>")
out.append(header(h))
out.append("</td>")
}
out.append(
"""
| </tr>
| </thead>
| <tbody>
""".stripMargin)
for (r <- 0 to data.size - 1) {
out.append(" <tr>\\n")
for (c <- 0 to data(r).size - 1) {
val string = toString(data(r)(c))
out.append(" <td class=\\"text\\">")
out.append(string.replaceAllLiterally("\\n", "<br>"))
out.append(" </td>\\n")
}
out.append(" </tr>\\n")
}
out.append(
"""
| </tbody>
| </table>
| </body>
|</html>
""".stripMargin)
out.close
file
}
private def headers(table: JTable): Vector[String] =
(for (h <- 0 to table.getColumnCount - 1) yield table.getColumnName(h)).toVector
private def values(table: JTable): Vector[Vector[AnyRef]] =
(for (r <- 0 to table.getRowCount - 1) yield colValues(table, r)).toVector
private def colValues(table: JTable, r: Int): Vector[AnyRef] =
(for (c <- 0 to table.getColumnCount - 1) yield table.getValueAt(r, c)).toVector
private def toString(v: AnyRef): String = v match {
case s: String => s
case d: java.lang.Double => f"$d%.2f"
case l: Label => l.text
case t: TextArea => t.text
case t: TextPane => t.styledDocument.getText(0, t.styledDocument.getLength)
case r: RaValue => r.prettyString
case d: DecValue => d.prettyString
case t: TimeValue => t.prettyString
case x => x.toString
}
}
| arturog8m/ocs | bundle/edu.gemini.qv.plugin/src/main/scala/edu/gemini/qv/plugin/util/Exporter.scala | Scala | bsd-3-clause | 6,648 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom.queryapitests.simple
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import nl.ebpi.yaidom.parse.DocumentParserUsingDom
import nl.ebpi.yaidom.queryapitests.AbstractXbrlInstanceQueryTest
import nl.ebpi.yaidom.resolved
import nl.ebpi.yaidom.simple.Elem
/**
* XBRL instance query test case for simple elements.
*
* @author Chris de Vreeze
*/
@RunWith(classOf[JUnitRunner])
class XbrlInstanceQueryTest extends AbstractXbrlInstanceQueryTest {
final type E = Elem
protected final val xbrlInstance: Elem = {
val docParser = DocumentParserUsingDom.newInstance()
val is = classOf[XbrlInstanceQueryTest].getResourceAsStream("/nl/ebpi/yaidom/queryapitests/sample-xbrl-instance.xml")
val doc = docParser.parse(is)
doc.documentElement
}
protected final def toResolvedElem(elem: E): resolved.Elem =
resolved.Elem(elem)
}
| EBPI/yaidom | src/test/scala/nl/ebpi/yaidom/queryapitests/simple/XbrlInstanceQueryTest.scala | Scala | apache-2.0 | 1,494 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.plugins.transformer
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.FlatSpec
import de.fuberlin.wiwiss.silk.plugins.Plugins
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import de.fuberlin.wiwiss.silk.plugins.transformer.normalize.LowerCaseTransformer
@RunWith(classOf[JUnitRunner])
class LowerCaseTransformerTest extends FlatSpec with ShouldMatchers {
Plugins.register()
val transformer = new LowerCaseTransformer()
"LowerCaseTransformer" should "return '123'" in {
transformer.evaluate("123") should equal("123")
}
val transformer1 = new LowerCaseTransformer()
"LowerCaseTransformer" should "return 'abc'" in {
transformer1.evaluate("ABc") should equal("abc")
}
} | fusepoolP3/p3-silk | silk-core/src/test/scala/de/fuberlin/wiwiss/silk/plugins/transformer/LowerCaseTransformerTest.scala | Scala | apache-2.0 | 1,333 |
package org.jetbrains.plugins.scala
package lang
package formatting
import com.intellij.lang.ASTNode
import psi.api.expr.ScIfStmt
import psi.api.ScalaFile
/**
* User: Alexander Podkhalyuzin
* Date: 05.10.2008
*/
object FormatterUtil {
def calcIndent(node: ASTNode): Int = {
node.getTreeParent.getPsi match {
case ifStmt: ScIfStmt => {
ifStmt.getParent match {
case parent: ScIfStmt if parent.getLastChild == ifStmt && parent.elseBranch != None => calcIndent(node.getTreeParent)
case parent => calcAbsolutePosition(node) - calcAbsolutePosition(parent.getNode) match {
case i if i >= 0 => i + calcIndent(parent.getNode)
case _ => calcIndent(parent.getNode)
}
}
}
case _: ScalaFile => 0
case _ => calcIndent(node.getTreeParent)
}
}
def calcAbsolutePosition(node: ASTNode): Int = {
val text = node.getPsi.getContainingFile.getText
var offset = node.getTextRange.getStartOffset - 1
var result = 0
while (offset >= 0 && text(offset) != '\n') {offset += -1; result += 1}
result
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/formatting/FormatterUtil.scala | Scala | apache-2.0 | 1,110 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.workflow
import org.apache.predictionio.data.storage.EngineInstance
import org.json4s._
trait EngineServerPlugin {
val pluginName: String
val pluginDescription: String
val pluginType: String
def start(context: EngineServerPluginContext): Unit
def process(
engineInstance: EngineInstance,
query: JValue,
prediction: JValue,
context: EngineServerPluginContext): JValue
def handleREST(arguments: Seq[String]): String
}
object EngineServerPlugin {
val outputBlocker = "outputblocker"
val outputSniffer = "outputsniffer"
}
| alex9311/PredictionIO | core/src/main/scala/org/apache/predictionio/workflow/EngineServerPlugin.scala | Scala | apache-2.0 | 1,202 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import org.scalacheck.Arbitrary
import org.scalacheck.Prop._
class BaseMetricProperties extends CheckProperties with MetricProperties {
property("double metric") {
metricLaws[Double](defaultEqFn)
}
property("int metric") {
metricLaws[Int](defaultEqFn)
}
property("float metric") {
metricLaws[Float](defaultEqFn)
}
property("long metric") {
metricLaws[Long](defaultEqFn)
}
property("short metric") {
metricLaws[Short](defaultEqFn)
}
implicit val iterMetric = Metric.L1Iterable[Double]
// TODO: we won't need this when we have an Equatable trait
def listEqfn(a: List[Double], b: List[Double]) = {
val maxSize = scala.math.max(a.size, b.size)
val diffA = maxSize - a.size
val diffB = maxSize - b.size
val newA = if (diffA > 0) a ++ Iterator.fill(diffA)(0.0) else a
val newB = if (diffB > 0) b ++ Iterator.fill(diffB)(0.0) else b
newA == newB
}
property("double iterable metric") {
metricLaws[List[Double]](listEqfn)
}
implicit val mapMetric = Metric.L1Map[Int, Double]
// TODO: we won't need this when we have an Equatable trait
def mapEqFn(a: Map[Int, Double], b: Map[Int, Double]) = {
(a.keySet ++ b.keySet).forall { key =>
(a.get(key), b.get(key)) match {
case (Some(aVal), Some(bVal)) => aVal == bVal
case (Some(aVal), None) => aVal == 0.0
case (None, Some(bVal)) => bVal == 0.0
case _ => true
}
}
}
property("int double map metric") {
metricLaws[Map[Int, Double]](mapEqFn)
}
}
trait MetricProperties {
def isNonNegative[T: Metric: Arbitrary] = forAll { (a: T, b: T) =>
val m = Metric(a, b)
beGreaterThan(m, 0.0) || beCloseTo(m, 0.0)
}
def isEqualIffZero[T: Metric: Arbitrary](eqfn: (T, T) => Boolean) = forAll { (a: T, b: T) =>
if (eqfn(a, b)) beCloseTo(Metric(a, b), 0.0) else !beCloseTo(Metric(a, b), 0.0)
}
def isSymmetric[T: Metric: Arbitrary] = forAll { (a: T, b: T) =>
beCloseTo(Metric(a, b), Metric(b, a))
}
def satisfiesTriangleInequality[T: Metric: Arbitrary] = forAll { (a: T, b: T, c: T) =>
val m1 = Metric(a, b) + Metric(b, c)
val m2 = Metric(a, c)
beGreaterThan(m1, m2) || beCloseTo(m1, m2)
}
def metricLaws[T: Metric: Arbitrary](eqfn: (T, T) => Boolean) =
isNonNegative[T] && isEqualIffZero[T](eqfn) && isSymmetric[T] && satisfiesTriangleInequality[T]
// TODO: these are copied elsewhere in the tests. Move them to a common place
def beCloseTo(a: Double, b: Double, eps: Double = 1e-10) = a == b || (math.abs(a - b) / math.abs(a)) < eps || (a.isInfinite && b.isInfinite)
def beGreaterThan(a: Double, b: Double, eps: Double = 1e-10) = a > b - eps || (a.isInfinite && b.isInfinite)
def defaultEqFn[T](a: T, b: T): Boolean = a == b
}
| erikerlandson/algebird | algebird-test/src/test/scala/com/twitter/algebird/MetricProperties.scala | Scala | apache-2.0 | 3,355 |
package org.jetbrains.plugins.scala
package lang
package transformation
import com.intellij.openapi.util.TextRange
import com.intellij.psi.impl.DebugUtil
import com.intellij.psi.{PsiElement, PsiFile, PsiFileFactory}
import org.intellij.lang.annotations.Language
import org.jetbrains.plugins.scala.extensions._
import org.junit.Assert.assertEquals
/**
* @author Pavel Fatin
*/
abstract class TransformationTest extends base.ScalaLightCodeInsightFixtureTestAdapter with util.Markers {
@Language("Scala")
protected val header: String = ""
import TransformationTest._
protected def transform(element: PsiElement, file: PsiFile, reformat: Transformer.ReformatAction): Unit
protected final def check(@Language("Scala") before: String,
@Language("Scala") after: String)
(@Language("Scala") header: String = "",
@Language("Scala") footer: String = ""): Unit = {
doCheck(
before.withNormalizedSeparator,
after.withNormalizedSeparator
)(
header.withNormalizedSeparator,
footer.withNormalizedSeparator
)
}
private def doCheck(@Language("Scala") before: String,
@Language("Scala") after: String)
(@Language("Scala") header: String,
@Language("Scala") footer: String): Unit = {
implicit val headerAndFooter: (String, String) = (createHeader(header), footer)
val actualFile = configureByText(before)
// collect all ranges that should be formatted
var actualRewriteTextRanges = List.empty[TextRange]
val reformat: Transformer.ReformatAction = (textRanges, _, _) => actualRewriteTextRanges :::= textRanges
actualFile.depthFirst()
.foreach(transform(_, actualFile, reformat))
val (afterCode, expectedReformatRanges) = extractNumberedMarkers(after)
val expectedReformatRangesWithHeader = expectedReformatRanges.map(adjustMarkerRanges)
assertEquals(afterCode.trim, slice(actualFile).trim)
val expectedFile = configureByText(afterCode)
assertEquals(psiToString(expectedFile), psiToString(actualFile))
assertEquals(
sortRanges(expectedReformatRangesWithHeader),
sortRanges(actualRewriteTextRanges)
)
}
private def createHeader(header: String) =
s"""$PredefinedHeader
|${this.header}
|$header""".stripMargin.withNormalizedSeparator
private def configureByText(text: String)
(implicit headerAndFooter: (String, String)): PsiFile = {
val (header, footer) = headerAndFooter
val fileText =
s"""$header
|$text
|$footer""".stripMargin.withNormalizedSeparator
PsiFileFactory.getInstance(getProject).createFileFromText(
"foo.scala",
ScalaFileType.INSTANCE,
fileText
)
}
private def sortRanges(ranges: Seq[TextRange]) =
ranges.sorted(Ordering.by((range: TextRange) => (range.getStartOffset, range.getEndOffset))).toList
}
object TransformationTest {
val ScalaSourceHeader = "import scala.io.Source"
private val PredefinedHeader: String =
s"""class A { def a(): Unit = _ }
|class B { def b(): Unit = _ }
|class C { def c(): Unit = _ }
|object A extends A
|object B extends B
|object C extends C""".stripMargin.withNormalizedSeparator
private def psiToString(file: PsiFile): String =
DebugUtil.psiToString(file, true)
private def slice(file: PsiFile)
(implicit headerAndFooter: (String, String)): String = {
val (header, footer) = headerAndFooter
val text = file.getText
text.substring(header.length + 1, text.length - (footer.length + 1))
}
private def adjustMarkerRanges(range: TextRange)
(implicit headerAndFooter: (String, String)): TextRange = {
val (header, _) = headerAndFooter
range.shiftRight(header.length + 1)
}
}
abstract class TransformerTest(private val transformer: Transformer) extends TransformationTest {
override protected final def transform(element: PsiElement, file: PsiFile, reformat: Transformer.ReformatAction): Unit =
Transformer.applyTransformerAndReformat(element, file, transformer, reformat)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/transformation/TransformationTest.scala | Scala | apache-2.0 | 4,233 |
/*
* Copyright 2012-2014 Kieron Wilkinson.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package viper.ui.fonts
/** Handy placeholder to act as a root for resources. */
class Fonts {
}
| vyadh/viper | ui/src/main/scala/viper/ui/fonts/Fonts.scala | Scala | apache-2.0 | 704 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.