code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.scalajs.testsuite.javalib.time.chrono
import java.time.LocalDate
import org.scalajs.jasminetest.JasmineTest
import org.scalajs.testsuite.utils.ExpectExceptions
import java.time.chrono.ChronoPeriod
object ChronoPeriodTest extends JasmineTest with ExpectExceptions {
describe("java.time.chrono.ChronoPeriod") {
it("should respond to `between`") {
val ds = Seq(LocalDate.MIN, LocalDate.of(2011, 2, 28), LocalDate.MAX)
for {
d1 <- ds
d2 <- ds
} {
expect(ChronoPeriod.between(d1, d2) == d1.until(d2)).toBeTruthy
}
}
}
}
|
jasonchaffee/scala-js
|
test-suite/js/src/test/require-jdk8/org/scalajs/testsuite/javalib/time/chrono/ChronoPeriodTest.scala
|
Scala
|
bsd-3-clause
| 590 |
package dao
import com.novus.salat.dao.SalatDAO
import se.radley.plugin.salat._
import model.{SortMode, Article}
import play.api.Play.current
import org.bson.types.ObjectId
import com.mongodb.casbah.commons.MongoDBObject
import org.joda.time.DateTime
/**
* The Class ArticleDao.
*
* @author Nguyen Duc Dung
* @since 1/31/14 4:03 AM
*
*/
object ArticleDao extends BaseDao[Article, String] {
override def dao = new SalatDAO[Article, String](collection = mongoCollection("article")) {}
def findByCatId(catId: ObjectId, sortMode: String, page: Int = 1, itemDisplay: Int = 10) = {
val skip = (page - 1) * itemDisplay
val blogIds = BlogDao.find(
MongoDBObject(
"categoryId" -> catId,
"homePage" -> true
)
).sort(MongoDBObject("read" -> -1)).toList.map(_._id)
val query = MongoDBObject("blogId" -> MongoDBObject("$in" -> blogIds))
val totalRow = count(query)
var totalPage = totalRow / itemDisplay
if (totalRow - totalPage * itemDisplay > 0) {
totalPage += 1
}
val sort = if (sortMode == SortMode.newest) {
MongoDBObject("publishedDate" -> -1)
} else {
query.put("publishedDate", MongoDBObject("$gt" -> DateTime.now().minusDays(30)))
MongoDBObject("commentTotal" -> -1, "clicked" -> -1, "publishedDate" -> -1)
}
val articles = find(query)
.skip(skip)
.limit(itemDisplay)
.sort(sort)
.toList
(articles, totalPage.toInt)
}
def findByBlogId(blogId: ObjectId, sortMode: String, page: Int = 1, itemDisplay: Int = 10) = {
val skip = (page - 1) * itemDisplay
val query = MongoDBObject("blogId" -> blogId)
val totalRow = count(query)
var totalPage = totalRow / itemDisplay
if (totalRow - totalPage * itemDisplay > 0) {
totalPage += 1
}
val sort = if (sortMode == SortMode.newest) {
MongoDBObject("publishedDate" -> -1)
} else {
query.put("publishedDate", MongoDBObject("$gt" -> DateTime.now().minusDays(30)))
MongoDBObject("commentTotal" -> -1, "clicked" -> -1, "publishedDate" -> -1)
}
val articles = find(query)
.skip(skip)
.limit(itemDisplay)
.sort(sort)
.toList
(articles, totalPage.toInt)
}
def findByTag(tag: String, sortMode: String, page: Int = 1, itemDisplay: Int = 10) = {
val skip = (page - 1) * itemDisplay
val query = MongoDBObject("tags" -> MongoDBObject("$regex" -> tag))
val totalRow = count(query)
var totalPage = totalRow / itemDisplay
if (totalRow - totalPage * itemDisplay > 0) {
totalPage += 1
}
val sort = if (sortMode == SortMode.newest) {
MongoDBObject("publishedDate" -> -1)
} else {
query.put("publishedDate", MongoDBObject("$gt" -> DateTime.now().minusDays(30)))
MongoDBObject("commentTotal" -> -1, "clicked" -> -1, "publishedDate" -> -1)
}
val articles = find(query)
.skip(skip)
.limit(itemDisplay)
.sort(sort)
.toList
(articles, totalPage.toInt)
}
def latest(take: Int = 10) = {
val blogIds = BlogDao.canShowInHomePage.map(_._id)
find(MongoDBObject("blogId" -> MongoDBObject("$in" -> blogIds)))
.sort(MongoDBObject("publishedDate" -> -1))
.take(take)
.toList
}
def mostRead(take: Int = 6) = find(MongoDBObject.empty)
.sort(MongoDBObject("clicked" -> -1))
.take(take)
.toList
def mostReadByCatId(catId: ObjectId, take: Int) = {
val blogIds = BlogDao.findByCatId(catId).map(_._id)
val query = MongoDBObject("blogId" -> MongoDBObject("$in" -> blogIds))
find(query).sort(MongoDBObject("clicked" -> -1)).take(take).toList
}
def mostReadByBlogId(blogId: ObjectId, take: Int) = {
val query = MongoDBObject("blogId" -> blogId)
find(query).sort(MongoDBObject("clicked" -> -1)).take(take).toList
}
def findByUrl(url: String) = findOne(MongoDBObject("url" -> url))
def findByUniqueTitleAndBlogName(blogName: String, uniqueTitle: String) = findOne(
MongoDBObject("uniqueTitle" -> uniqueTitle, "blogName" -> blogName)
)
def removeByBlogId(blogId: ObjectId) = remove(MongoDBObject("blogId" -> blogId))
def needToUpdateComment = find(
MongoDBObject(
"publishedDate" -> MongoDBObject("$gt" -> DateTime.now().minusDays(30))
)
).toList
}
|
SunriseSoftVN/hayhayblog
|
core/app/dao/ArticleDao.scala
|
Scala
|
gpl-2.0
| 4,303 |
package api.hue.dao.attribute
import play.api.libs.json._
/**
* Represents on/off state
*
* @author ddexter
*/
case class On(on: Boolean) extends Attribute {
import On._
override def name:String = NAME
override def toJs: JsObject = Json.obj("on" -> on)
}
object On {
val NAME: String = "on"
implicit val reads: Reads[On] = (__ \ "on").read[Boolean].map(On(_))
}
|
ddexter/HomeBackend
|
src/main/scala/api/hue/dao/attribute/On.scala
|
Scala
|
apache-2.0
| 384 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.sql.{Date, Timestamp}
import java.util.Locale
import javax.xml.bind.DatatypeConverter
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.antlr.v4.runtime.{ParserRuleContext, Token}
import org.antlr.v4.runtime.tree.{ParseTree, RuleNode, TerminalNode}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{First, Last}
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.random.RandomSampler
/**
* The AstBuilder converts an ANTLR4 ParseTree into a catalyst Expression, LogicalPlan or
* TableIdentifier.
*/
class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging {
import ParserUtils._
protected def typedVisit[T](ctx: ParseTree): T = {
ctx.accept(this).asInstanceOf[T]
}
/**
* Override the default behavior for all visit methods. This will only return a non-null result
* when the context has only one child. This is done because there is no generic method to
* combine the results of the context children. In all other cases null is returned.
*/
override def visitChildren(node: RuleNode): AnyRef = {
if (node.getChildCount == 1) {
node.getChild(0).accept(this)
} else {
null
}
}
override def visitSingleStatement(ctx: SingleStatementContext): LogicalPlan = withOrigin(ctx) {
visit(ctx.statement).asInstanceOf[LogicalPlan]
}
override def visitSingleExpression(ctx: SingleExpressionContext): Expression = withOrigin(ctx) {
visitNamedExpression(ctx.namedExpression)
}
override def visitSingleTableIdentifier(
ctx: SingleTableIdentifierContext): TableIdentifier = withOrigin(ctx) {
visitTableIdentifier(ctx.tableIdentifier)
}
override def visitSingleFunctionIdentifier(
ctx: SingleFunctionIdentifierContext): FunctionIdentifier = withOrigin(ctx) {
visitFunctionIdentifier(ctx.functionIdentifier)
}
override def visitSingleDataType(ctx: SingleDataTypeContext): DataType = withOrigin(ctx) {
visitSparkDataType(ctx.dataType)
}
/* ********************************************************************************************
* Plan parsing
* ******************************************************************************************** */
protected def plan(tree: ParserRuleContext): LogicalPlan = typedVisit(tree)
/**
* Create a top-level plan with Common Table Expressions.
*/
override def visitQuery(ctx: QueryContext): LogicalPlan = withOrigin(ctx) {
val query = plan(ctx.queryNoWith)
// Apply CTEs
query.optional(ctx.ctes) {
val ctes = ctx.ctes.namedQuery.asScala.map { nCtx =>
val namedQuery = visitNamedQuery(nCtx)
(namedQuery.alias, namedQuery)
}
// Check for duplicate names.
checkDuplicateKeys(ctes, ctx)
With(query, ctes)
}
}
/**
* Create a named logical plan.
*
* This is only used for Common Table Expressions.
*/
override def visitNamedQuery(ctx: NamedQueryContext): SubqueryAlias = withOrigin(ctx) {
SubqueryAlias(ctx.name.getText, plan(ctx.query))
}
/**
* Create a logical plan which allows for multiple inserts using one 'from' statement. These
* queries have the following SQL form:
* {{{
* [WITH cte...]?
* FROM src
* [INSERT INTO tbl1 SELECT *]+
* }}}
* For example:
* {{{
* FROM db.tbl1 A
* INSERT INTO dbo.tbl1 SELECT * WHERE A.value = 10 LIMIT 5
* INSERT INTO dbo.tbl2 SELECT * WHERE A.value = 12
* }}}
* This (Hive) feature cannot be combined with set-operators.
*/
override def visitMultiInsertQuery(ctx: MultiInsertQueryContext): LogicalPlan = withOrigin(ctx) {
val from = visitFromClause(ctx.fromClause)
// Build the insert clauses.
val inserts = ctx.multiInsertQueryBody.asScala.map {
body =>
validate(body.querySpecification.fromClause == null,
"Multi-Insert queries cannot have a FROM clause in their individual SELECT statements",
body)
withQuerySpecification(body.querySpecification, from).
// Add organization statements.
optionalMap(body.queryOrganization)(withQueryResultClauses).
// Add insert.
optionalMap(body.insertInto())(withInsertInto)
}
// If there are multiple INSERTS just UNION them together into one query.
inserts match {
case Seq(query) => query
case queries => Union(queries)
}
}
/**
* Create a logical plan for a regular (single-insert) query.
*/
override def visitSingleInsertQuery(
ctx: SingleInsertQueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.queryTerm).
// Add organization statements.
optionalMap(ctx.queryOrganization)(withQueryResultClauses).
// Add insert.
optionalMap(ctx.insertInto())(withInsertInto)
}
/**
* Add an INSERT INTO [TABLE]/INSERT OVERWRITE TABLE operation to the logical plan.
*/
private def withInsertInto(
ctx: InsertIntoContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val tableIdent = visitTableIdentifier(ctx.tableIdentifier)
val partitionKeys = Option(ctx.partitionSpec).map(visitPartitionSpec).getOrElse(Map.empty)
val dynamicPartitionKeys: Map[String, Option[String]] = partitionKeys.filter(_._2.isEmpty)
if (ctx.EXISTS != null && dynamicPartitionKeys.nonEmpty) {
throw new ParseException(s"Dynamic partitions do not support IF NOT EXISTS. Specified " +
"partitions with value: " + dynamicPartitionKeys.keys.mkString("[", ",", "]"), ctx)
}
InsertIntoTable(
UnresolvedRelation(tableIdent),
partitionKeys,
query,
ctx.OVERWRITE != null,
ctx.EXISTS != null)
}
/**
* Create a partition specification map.
*/
override def visitPartitionSpec(
ctx: PartitionSpecContext): Map[String, Option[String]] = withOrigin(ctx) {
val parts = ctx.partitionVal.asScala.map { pVal =>
val name = pVal.identifier.getText
val value = Option(pVal.constant).map(visitStringConstant)
name -> value
}
// Before calling `toMap`, we check duplicated keys to avoid silently ignore partition values
// in partition spec like PARTITION(a='1', b='2', a='3'). The real semantical check for
// partition columns will be done in analyzer.
checkDuplicateKeys(parts, ctx)
parts.toMap
}
/**
* Create a partition specification map without optional values.
*/
protected def visitNonOptionalPartitionSpec(
ctx: PartitionSpecContext): Map[String, String] = withOrigin(ctx) {
visitPartitionSpec(ctx).map {
case (key, None) => throw new ParseException(s"Found an empty partition key '$key'.", ctx)
case (key, Some(value)) => key -> value
}
}
/**
* Convert a constant of any type into a string. This is typically used in DDL commands, and its
* main purpose is to prevent slight differences due to back to back conversions i.e.:
* String -> Literal -> String.
*/
protected def visitStringConstant(ctx: ConstantContext): String = withOrigin(ctx) {
ctx match {
case s: StringLiteralContext => createString(s)
case o => o.getText
}
}
/**
* Add ORDER BY/SORT BY/CLUSTER BY/DISTRIBUTE BY/LIMIT/WINDOWS clauses to the logical plan. These
* clauses determine the shape (ordering/partitioning/rows) of the query result.
*/
private def withQueryResultClauses(
ctx: QueryOrganizationContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
import ctx._
// Handle ORDER BY, SORT BY, DISTRIBUTE BY, and CLUSTER BY clause.
val withOrder = if (
!order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// ORDER BY ...
Sort(order.asScala.map(visitSortItem), global = true, query)
} else if (order.isEmpty && !sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ...
Sort(sort.asScala.map(visitSortItem), global = false, query)
} else if (order.isEmpty && sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// DISTRIBUTE BY ...
withRepartitionByExpression(ctx, expressionList(distributeBy), query)
} else if (order.isEmpty && !sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ... DISTRIBUTE BY ...
Sort(
sort.asScala.map(visitSortItem),
global = false,
withRepartitionByExpression(ctx, expressionList(distributeBy), query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && !clusterBy.isEmpty) {
// CLUSTER BY ...
val expressions = expressionList(clusterBy)
Sort(
expressions.map(SortOrder(_, Ascending)),
global = false,
withRepartitionByExpression(ctx, expressions, query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// [EMPTY]
query
} else {
throw new ParseException(
"Combination of ORDER BY/SORT BY/DISTRIBUTE BY/CLUSTER BY is not supported", ctx)
}
// WINDOWS
val withWindow = withOrder.optionalMap(windows)(withWindows)
// LIMIT
withWindow.optional(limit) {
Limit(typedVisit(limit), withWindow)
}
}
/**
* Create a clause for DISTRIBUTE BY.
*/
protected def withRepartitionByExpression(
ctx: QueryOrganizationContext,
expressions: Seq[Expression],
query: LogicalPlan): LogicalPlan = {
throw new ParseException("DISTRIBUTE BY is not supported", ctx)
}
/**
* Create a logical plan using a query specification.
*/
override def visitQuerySpecification(
ctx: QuerySpecificationContext): LogicalPlan = withOrigin(ctx) {
val from = OneRowRelation.optional(ctx.fromClause) {
visitFromClause(ctx.fromClause)
}
withQuerySpecification(ctx, from)
}
/**
* Add a query specification to a logical plan. The query specification is the core of the logical
* plan, this is where sourcing (FROM clause), transforming (SELECT TRANSFORM/MAP/REDUCE),
* projection (SELECT), aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place.
*
* Note that query hints are ignored (both by the parser and the builder).
*/
private def withQuerySpecification(
ctx: QuerySpecificationContext,
relation: LogicalPlan): LogicalPlan = withOrigin(ctx) {
import ctx._
// WHERE
def filter(ctx: BooleanExpressionContext, plan: LogicalPlan): LogicalPlan = {
Filter(expression(ctx), plan)
}
// Expressions.
val expressions = Option(namedExpressionSeq).toSeq
.flatMap(_.namedExpression.asScala)
.map(typedVisit[Expression])
// Create either a transform or a regular query.
val specType = Option(kind).map(_.getType).getOrElse(SqlBaseParser.SELECT)
specType match {
case SqlBaseParser.MAP | SqlBaseParser.REDUCE | SqlBaseParser.TRANSFORM =>
// Transform
// Add where.
val withFilter = relation.optionalMap(where)(filter)
// Create the attributes.
val (attributes, schemaLess) = if (colTypeList != null) {
// Typed return columns.
(createSchema(colTypeList).toAttributes, false)
} else if (identifierSeq != null) {
// Untyped return columns.
val attrs = visitIdentifierSeq(identifierSeq).map { name =>
AttributeReference(name, StringType, nullable = true)()
}
(attrs, false)
} else {
(Seq(AttributeReference("key", StringType)(),
AttributeReference("value", StringType)()), true)
}
// Create the transform.
ScriptTransformation(
expressions,
string(script),
attributes,
withFilter,
withScriptIOSchema(
ctx, inRowFormat, recordWriter, outRowFormat, recordReader, schemaLess))
case SqlBaseParser.SELECT =>
// Regular select
// Add lateral views.
val withLateralView = ctx.lateralView.asScala.foldLeft(relation)(withGenerate)
// Add where.
val withFilter = withLateralView.optionalMap(where)(filter)
// Add aggregation or a project.
val namedExpressions = expressions.map {
case e: NamedExpression => e
case e: Expression => UnresolvedAlias(e)
}
val withProject = if (aggregation != null) {
withAggregation(aggregation, namedExpressions, withFilter)
} else if (namedExpressions.nonEmpty) {
Project(namedExpressions, withFilter)
} else {
withFilter
}
// Having
val withHaving = withProject.optional(having) {
// Note that we add a cast to non-predicate expressions. If the expression itself is
// already boolean, the optimizer will get rid of the unnecessary cast.
val predicate = expression(having) match {
case p: Predicate => p
case e => Cast(e, BooleanType)
}
Filter(predicate, withProject)
}
// Distinct
val withDistinct = if (setQuantifier() != null && setQuantifier().DISTINCT() != null) {
Distinct(withHaving)
} else {
withHaving
}
// Window
val withWindow = withDistinct.optionalMap(windows)(withWindows)
// Hint
withWindow.optionalMap(hint)(withHints)
}
}
/**
* Create a (Hive based) [[ScriptInputOutputSchema]].
*/
protected def withScriptIOSchema(
ctx: QuerySpecificationContext,
inRowFormat: RowFormatContext,
recordWriter: Token,
outRowFormat: RowFormatContext,
recordReader: Token,
schemaLess: Boolean): ScriptInputOutputSchema = {
throw new ParseException("Script Transform is not supported", ctx)
}
/**
* Create a logical plan for a given 'FROM' clause. Note that we support multiple (comma
* separated) relations here, these get converted into a single plan by condition-less inner join.
*/
override def visitFromClause(ctx: FromClauseContext): LogicalPlan = withOrigin(ctx) {
val from = ctx.relation.asScala.foldLeft(null: LogicalPlan) { (left, relation) =>
val right = plan(relation.relationPrimary)
val join = right.optionalMap(left)(Join(_, _, Inner, None))
withJoinRelations(join, relation)
}
ctx.lateralView.asScala.foldLeft(from)(withGenerate)
}
/**
* Connect two queries by a Set operator.
*
* Supported Set operators are:
* - UNION [DISTINCT]
* - UNION ALL
* - EXCEPT [DISTINCT]
* - MINUS [DISTINCT]
* - INTERSECT [DISTINCT]
*/
override def visitSetOperation(ctx: SetOperationContext): LogicalPlan = withOrigin(ctx) {
val left = plan(ctx.left)
val right = plan(ctx.right)
val all = Option(ctx.setQuantifier()).exists(_.ALL != null)
ctx.operator.getType match {
case SqlBaseParser.UNION if all =>
Union(left, right)
case SqlBaseParser.UNION =>
Distinct(Union(left, right))
case SqlBaseParser.INTERSECT if all =>
throw new ParseException("INTERSECT ALL is not supported.", ctx)
case SqlBaseParser.INTERSECT =>
Intersect(left, right)
case SqlBaseParser.EXCEPT if all =>
throw new ParseException("EXCEPT ALL is not supported.", ctx)
case SqlBaseParser.EXCEPT =>
Except(left, right)
case SqlBaseParser.SETMINUS if all =>
throw new ParseException("MINUS ALL is not supported.", ctx)
case SqlBaseParser.SETMINUS =>
Except(left, right)
}
}
/**
* Add a [[WithWindowDefinition]] operator to a logical plan.
*/
private def withWindows(
ctx: WindowsContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Collect all window specifications defined in the WINDOW clause.
val baseWindowMap = ctx.namedWindow.asScala.map {
wCtx =>
(wCtx.identifier.getText, typedVisit[WindowSpec](wCtx.windowSpec))
}.toMap
// Handle cases like
// window w1 as (partition by p_mfgr order by p_name
// range between 2 preceding and 2 following),
// w2 as w1
val windowMapView = baseWindowMap.mapValues {
case WindowSpecReference(name) =>
baseWindowMap.get(name) match {
case Some(spec: WindowSpecDefinition) =>
spec
case Some(ref) =>
throw new ParseException(s"Window reference '$name' is not a window specification", ctx)
case None =>
throw new ParseException(s"Cannot resolve window reference '$name'", ctx)
}
case spec: WindowSpecDefinition => spec
}
// Note that mapValues creates a view instead of materialized map. We force materialization by
// mapping over identity.
WithWindowDefinition(windowMapView.map(identity), query)
}
/**
* Add an [[Aggregate]] or [[GroupingSets]] to a logical plan.
*/
private def withAggregation(
ctx: AggregationContext,
selectExpressions: Seq[NamedExpression],
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val groupByExpressions = expressionList(ctx.groupingExpressions)
if (ctx.GROUPING != null) {
// GROUP BY .... GROUPING SETS (...)
val selectedGroupByExprs =
ctx.groupingSet.asScala.map(_.expression.asScala.map(e => expression(e)))
GroupingSets(selectedGroupByExprs, groupByExpressions, query, selectExpressions)
} else {
// GROUP BY .... (WITH CUBE | WITH ROLLUP)?
val mappedGroupByExpressions = if (ctx.CUBE != null) {
Seq(Cube(groupByExpressions))
} else if (ctx.ROLLUP != null) {
Seq(Rollup(groupByExpressions))
} else {
groupByExpressions
}
Aggregate(mappedGroupByExpressions, selectExpressions, query)
}
}
/**
* Add a [[Hint]] to a logical plan.
*/
private def withHints(
ctx: HintContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val stmt = ctx.hintStatement
Hint(stmt.hintName.getText, stmt.parameters.asScala.map(_.getText), query)
}
/**
* Add a [[Generate]] (Lateral View) to a logical plan.
*/
private def withGenerate(
query: LogicalPlan,
ctx: LateralViewContext): LogicalPlan = withOrigin(ctx) {
val expressions = expressionList(ctx.expression)
Generate(
UnresolvedGenerator(visitFunctionName(ctx.qualifiedName), expressions),
join = true,
outer = ctx.OUTER != null,
Some(ctx.tblName.getText.toLowerCase),
ctx.colName.asScala.map(_.getText).map(UnresolvedAttribute.apply),
query)
}
/**
* Create a single relation referenced in a FROM clause. This method is used when a part of the
* join condition is nested, for example:
* {{{
* select * from t1 join (t2 cross join t3) on col1 = col2
* }}}
*/
override def visitRelation(ctx: RelationContext): LogicalPlan = withOrigin(ctx) {
withJoinRelations(plan(ctx.relationPrimary), ctx)
}
/**
* Join one more [[LogicalPlan]]s to the current logical plan.
*/
private def withJoinRelations(base: LogicalPlan, ctx: RelationContext): LogicalPlan = {
ctx.joinRelation.asScala.foldLeft(base) { (left, join) =>
withOrigin(join) {
val baseJoinType = join.joinType match {
case null => Inner
case jt if jt.CROSS != null => Cross
case jt if jt.FULL != null => FullOuter
case jt if jt.SEMI != null => LeftSemi
case jt if jt.ANTI != null => LeftAnti
case jt if jt.LEFT != null => LeftOuter
case jt if jt.RIGHT != null => RightOuter
case _ => Inner
}
// Resolve the join type and join condition
val (joinType, condition) = Option(join.joinCriteria) match {
case Some(c) if c.USING != null =>
(UsingJoin(baseJoinType, c.identifier.asScala.map(_.getText)), None)
case Some(c) if c.booleanExpression != null =>
(baseJoinType, Option(expression(c.booleanExpression)))
case None if join.NATURAL != null =>
if (baseJoinType == Cross) {
throw new ParseException("NATURAL CROSS JOIN is not supported", ctx)
}
(NaturalJoin(baseJoinType), None)
case None =>
(baseJoinType, None)
}
Join(left, plan(join.right), joinType, condition)
}
}
}
/**
* Add a [[Sample]] to a logical plan.
*
* This currently supports the following sampling methods:
* - TABLESAMPLE(x ROWS): Sample the table down to the given number of rows.
* - TABLESAMPLE(x PERCENT): Sample the table down to the given percentage. Note that percentages
* are defined as a number between 0 and 100.
* - TABLESAMPLE(BUCKET x OUT OF y): Sample the table down to a 'x' divided by 'y' fraction.
*/
private def withSample(ctx: SampleContext, query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Create a sampled plan if we need one.
def sample(fraction: Double): Sample = {
// The range of fraction accepted by Sample is [0, 1]. Because Hive's block sampling
// function takes X PERCENT as the input and the range of X is [0, 100], we need to
// adjust the fraction.
val eps = RandomSampler.roundingEpsilon
validate(fraction >= 0.0 - eps && fraction <= 1.0 + eps,
s"Sampling fraction ($fraction) must be on interval [0, 1]",
ctx)
Sample(0.0, fraction, withReplacement = false, (math.random * 1000).toInt, query)(true)
}
ctx.sampleType.getType match {
case SqlBaseParser.ROWS =>
Limit(expression(ctx.expression), query)
case SqlBaseParser.PERCENTLIT =>
val fraction = ctx.percentage.getText.toDouble
sample(fraction / 100.0d)
case SqlBaseParser.BYTELENGTH_LITERAL =>
throw new ParseException(
"TABLESAMPLE(byteLengthLiteral) is not supported", ctx)
case SqlBaseParser.BUCKET if ctx.ON != null =>
if (ctx.identifier != null) {
throw new ParseException(
"TABLESAMPLE(BUCKET x OUT OF y ON colname) is not supported", ctx)
} else {
throw new ParseException(
"TABLESAMPLE(BUCKET x OUT OF y ON function) is not supported", ctx)
}
case SqlBaseParser.BUCKET =>
sample(ctx.numerator.getText.toDouble / ctx.denominator.getText.toDouble)
}
}
/**
* Create a logical plan for a sub-query.
*/
override def visitSubquery(ctx: SubqueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.queryNoWith)
}
/**
* Create an un-aliased table reference. This is typically used for top-level table references,
* for example:
* {{{
* INSERT INTO db.tbl2
* TABLE db.tbl1
* }}}
*/
override def visitTable(ctx: TableContext): LogicalPlan = withOrigin(ctx) {
UnresolvedRelation(visitTableIdentifier(ctx.tableIdentifier))
}
/**
* Create an aliased table reference. This is typically used in FROM clauses.
*/
override def visitTableName(ctx: TableNameContext): LogicalPlan = withOrigin(ctx) {
val table = UnresolvedRelation(visitTableIdentifier(ctx.tableIdentifier))
val tableWithAlias = Option(ctx.strictIdentifier).map(_.getText) match {
case Some(strictIdentifier) =>
SubqueryAlias(strictIdentifier, table)
case _ => table
}
tableWithAlias.optionalMap(ctx.sample)(withSample)
}
/**
* Create a table-valued function call with arguments, e.g. range(1000)
*/
override def visitTableValuedFunction(ctx: TableValuedFunctionContext)
: LogicalPlan = withOrigin(ctx) {
val func = ctx.functionTable
val aliases = if (func.tableAlias.identifierList != null) {
visitIdentifierList(func.tableAlias.identifierList)
} else {
Seq.empty
}
val tvf = UnresolvedTableValuedFunction(
func.identifier.getText, func.expression.asScala.map(expression), aliases)
tvf.optionalMap(func.tableAlias.identifier)(aliasPlan)
}
/**
* Create an inline table (a virtual table in Hive parlance).
*/
override def visitInlineTable(ctx: InlineTableContext): LogicalPlan = withOrigin(ctx) {
// Get the backing expressions.
val rows = ctx.expression.asScala.map { e =>
expression(e) match {
// inline table comes in two styles:
// style 1: values (1), (2), (3) -- multiple columns are supported
// style 2: values 1, 2, 3 -- only a single column is supported here
case struct: CreateNamedStruct => struct.valExprs // style 1
case child => Seq(child) // style 2
}
}
val aliases = if (ctx.tableAlias.identifierList != null) {
visitIdentifierList(ctx.tableAlias.identifierList)
} else {
Seq.tabulate(rows.head.size)(i => s"col${i + 1}")
}
val table = UnresolvedInlineTable(aliases, rows)
table.optionalMap(ctx.tableAlias.identifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a join relation. This is practically the same as
* visitAliasedQuery and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks.
*/
override def visitAliasedRelation(ctx: AliasedRelationContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.relation)
.optionalMap(ctx.sample)(withSample)
.optionalMap(ctx.strictIdentifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a sub-query. This is practically the same as
* visitAliasedRelation and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks.
*/
override def visitAliasedQuery(ctx: AliasedQueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.queryNoWith)
.optionalMap(ctx.sample)(withSample)
.optionalMap(ctx.strictIdentifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a LogicalPlan.
*/
private def aliasPlan(alias: ParserRuleContext, plan: LogicalPlan): LogicalPlan = {
SubqueryAlias(alias.getText, plan)
}
/**
* Create a Sequence of Strings for a parenthesis enclosed alias list.
*/
override def visitIdentifierList(ctx: IdentifierListContext): Seq[String] = withOrigin(ctx) {
visitIdentifierSeq(ctx.identifierSeq)
}
/**
* Create a Sequence of Strings for an identifier list.
*/
override def visitIdentifierSeq(ctx: IdentifierSeqContext): Seq[String] = withOrigin(ctx) {
ctx.identifier.asScala.map(_.getText)
}
/* ********************************************************************************************
* Table Identifier parsing
* ******************************************************************************************** */
/**
* Create a [[TableIdentifier]] from a 'tableName' or 'databaseName'.'tableName' pattern.
*/
override def visitTableIdentifier(
ctx: TableIdentifierContext): TableIdentifier = withOrigin(ctx) {
TableIdentifier(ctx.table.getText, Option(ctx.db).map(_.getText))
}
/**
* Create a [[FunctionIdentifier]] from a 'functionName' or 'databaseName'.'functionName' pattern.
*/
override def visitFunctionIdentifier(
ctx: FunctionIdentifierContext): FunctionIdentifier = withOrigin(ctx) {
FunctionIdentifier(ctx.function.getText, Option(ctx.db).map(_.getText))
}
/* ********************************************************************************************
* Expression parsing
* ******************************************************************************************** */
/**
* Create an expression from the given context. This method just passes the context on to the
* visitor and only takes care of typing (We assume that the visitor returns an Expression here).
*/
protected def expression(ctx: ParserRuleContext): Expression = typedVisit(ctx)
/**
* Create sequence of expressions from the given sequence of contexts.
*/
private def expressionList(trees: java.util.List[ExpressionContext]): Seq[Expression] = {
trees.asScala.map(expression)
}
/**
* Create a star (i.e. all) expression; this selects all elements (in the specified object).
* Both un-targeted (global) and targeted aliases are supported.
*/
override def visitStar(ctx: StarContext): Expression = withOrigin(ctx) {
UnresolvedStar(Option(ctx.qualifiedName()).map(_.identifier.asScala.map(_.getText)))
}
/**
* Create an aliased expression if an alias is specified. Both single and multi-aliases are
* supported.
*/
override def visitNamedExpression(ctx: NamedExpressionContext): Expression = withOrigin(ctx) {
val e = expression(ctx.expression)
if (ctx.identifier != null) {
Alias(e, ctx.identifier.getText)()
} else if (ctx.identifierList != null) {
MultiAlias(e, visitIdentifierList(ctx.identifierList))
} else {
e
}
}
/**
* Combine a number of boolean expressions into a balanced expression tree. These expressions are
* either combined by a logical [[And]] or a logical [[Or]].
*
* A balanced binary tree is created because regular left recursive trees cause considerable
* performance degradations and can cause stack overflows.
*/
override def visitLogicalBinary(ctx: LogicalBinaryContext): Expression = withOrigin(ctx) {
val expressionType = ctx.operator.getType
val expressionCombiner = expressionType match {
case SqlBaseParser.AND => And.apply _
case SqlBaseParser.OR => Or.apply _
}
// Collect all similar left hand contexts.
val contexts = ArrayBuffer(ctx.right)
var current = ctx.left
def collectContexts: Boolean = current match {
case lbc: LogicalBinaryContext if lbc.operator.getType == expressionType =>
contexts += lbc.right
current = lbc.left
true
case _ =>
contexts += current
false
}
while (collectContexts) {
// No body - all updates take place in the collectContexts.
}
// Reverse the contexts to have them in the same sequence as in the SQL statement & turn them
// into expressions.
val expressions = contexts.reverse.map(expression)
// Create a balanced tree.
def reduceToExpressionTree(low: Int, high: Int): Expression = high - low match {
case 0 =>
expressions(low)
case 1 =>
expressionCombiner(expressions(low), expressions(high))
case x =>
val mid = low + x / 2
expressionCombiner(
reduceToExpressionTree(low, mid),
reduceToExpressionTree(mid + 1, high))
}
reduceToExpressionTree(0, expressions.size - 1)
}
/**
* Invert a boolean expression.
*/
override def visitLogicalNot(ctx: LogicalNotContext): Expression = withOrigin(ctx) {
Not(expression(ctx.booleanExpression()))
}
/**
* Create a filtering correlated sub-query (EXISTS).
*/
override def visitExists(ctx: ExistsContext): Expression = {
Exists(plan(ctx.query))
}
/**
* Create a comparison expression. This compares two expressions. The following comparison
* operators are supported:
* - Equal: '=' or '=='
* - Null-safe Equal: '<=>'
* - Not Equal: '<>' or '!='
* - Less than: '<'
* - Less then or Equal: '<='
* - Greater than: '>'
* - Greater then or Equal: '>='
*/
override def visitComparison(ctx: ComparisonContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
val operator = ctx.comparisonOperator().getChild(0).asInstanceOf[TerminalNode]
operator.getSymbol.getType match {
case SqlBaseParser.EQ =>
EqualTo(left, right)
case SqlBaseParser.NSEQ =>
EqualNullSafe(left, right)
case SqlBaseParser.NEQ | SqlBaseParser.NEQJ =>
Not(EqualTo(left, right))
case SqlBaseParser.LT =>
LessThan(left, right)
case SqlBaseParser.LTE =>
LessThanOrEqual(left, right)
case SqlBaseParser.GT =>
GreaterThan(left, right)
case SqlBaseParser.GTE =>
GreaterThanOrEqual(left, right)
}
}
/**
* Create a predicated expression. A predicated expression is a normal expression with a
* predicate attached to it, for example:
* {{{
* a + 1 IS NULL
* }}}
*/
override def visitPredicated(ctx: PredicatedContext): Expression = withOrigin(ctx) {
val e = expression(ctx.valueExpression)
if (ctx.predicate != null) {
withPredicate(e, ctx.predicate)
} else {
e
}
}
/**
* Add a predicate to the given expression. Supported expressions are:
* - (NOT) BETWEEN
* - (NOT) IN
* - (NOT) LIKE
* - (NOT) RLIKE
* - IS (NOT) NULL.
* - IS (NOT) DISTINCT FROM
*/
private def withPredicate(e: Expression, ctx: PredicateContext): Expression = withOrigin(ctx) {
// Invert a predicate if it has a valid NOT clause.
def invertIfNotDefined(e: Expression): Expression = ctx.NOT match {
case null => e
case not => Not(e)
}
// Create the predicate.
ctx.kind.getType match {
case SqlBaseParser.BETWEEN =>
// BETWEEN is translated to lower <= e && e <= upper
invertIfNotDefined(And(
GreaterThanOrEqual(e, expression(ctx.lower)),
LessThanOrEqual(e, expression(ctx.upper))))
case SqlBaseParser.IN if ctx.query != null =>
invertIfNotDefined(In(e, Seq(ListQuery(plan(ctx.query)))))
case SqlBaseParser.IN =>
invertIfNotDefined(In(e, ctx.expression.asScala.map(expression)))
case SqlBaseParser.LIKE =>
invertIfNotDefined(Like(e, expression(ctx.pattern)))
case SqlBaseParser.RLIKE =>
invertIfNotDefined(RLike(e, expression(ctx.pattern)))
case SqlBaseParser.NULL if ctx.NOT != null =>
IsNotNull(e)
case SqlBaseParser.NULL =>
IsNull(e)
case SqlBaseParser.DISTINCT if ctx.NOT != null =>
EqualNullSafe(e, expression(ctx.right))
case SqlBaseParser.DISTINCT =>
Not(EqualNullSafe(e, expression(ctx.right)))
}
}
/**
* Create a binary arithmetic expression. The following arithmetic operators are supported:
* - Multiplication: '*'
* - Division: '/'
* - Hive Long Division: 'DIV'
* - Modulo: '%'
* - Addition: '+'
* - Subtraction: '-'
* - Binary AND: '&'
* - Binary XOR
* - Binary OR: '|'
*/
override def visitArithmeticBinary(ctx: ArithmeticBinaryContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
ctx.operator.getType match {
case SqlBaseParser.ASTERISK =>
Multiply(left, right)
case SqlBaseParser.SLASH =>
Divide(left, right)
case SqlBaseParser.PERCENT =>
Remainder(left, right)
case SqlBaseParser.DIV =>
Cast(Divide(left, right), LongType)
case SqlBaseParser.PLUS =>
Add(left, right)
case SqlBaseParser.MINUS =>
Subtract(left, right)
case SqlBaseParser.AMPERSAND =>
BitwiseAnd(left, right)
case SqlBaseParser.HAT =>
BitwiseXor(left, right)
case SqlBaseParser.PIPE =>
BitwiseOr(left, right)
}
}
/**
* Create a unary arithmetic expression. The following arithmetic operators are supported:
* - Plus: '+'
* - Minus: '-'
* - Bitwise Not: '~'
*/
override def visitArithmeticUnary(ctx: ArithmeticUnaryContext): Expression = withOrigin(ctx) {
val value = expression(ctx.valueExpression)
ctx.operator.getType match {
case SqlBaseParser.PLUS =>
value
case SqlBaseParser.MINUS =>
UnaryMinus(value)
case SqlBaseParser.TILDE =>
BitwiseNot(value)
}
}
/**
* Create a [[Cast]] expression.
*/
override def visitCast(ctx: CastContext): Expression = withOrigin(ctx) {
Cast(expression(ctx.expression), visitSparkDataType(ctx.dataType))
}
/**
* Create a [[First]] expression.
*/
override def visitFirst(ctx: FirstContext): Expression = withOrigin(ctx) {
val ignoreNullsExpr = ctx.IGNORE != null
First(expression(ctx.expression), Literal(ignoreNullsExpr)).toAggregateExpression()
}
/**
* Create a [[Last]] expression.
*/
override def visitLast(ctx: LastContext): Expression = withOrigin(ctx) {
val ignoreNullsExpr = ctx.IGNORE != null
Last(expression(ctx.expression), Literal(ignoreNullsExpr)).toAggregateExpression()
}
/**
* Create a (windowed) Function expression.
*/
override def visitFunctionCall(ctx: FunctionCallContext): Expression = withOrigin(ctx) {
// Create the function call.
val name = ctx.qualifiedName.getText
val isDistinct = Option(ctx.setQuantifier()).exists(_.DISTINCT != null)
val arguments = ctx.namedExpression().asScala.map(expression) match {
case Seq(UnresolvedStar(None))
if name.toLowerCase(Locale.ROOT) == "count" && !isDistinct =>
// Transform COUNT(*) into COUNT(1).
Seq(Literal(1))
case expressions =>
expressions
}
val function = UnresolvedFunction(visitFunctionName(ctx.qualifiedName), arguments, isDistinct)
// Check if the function is evaluated in a windowed context.
ctx.windowSpec match {
case spec: WindowRefContext =>
UnresolvedWindowExpression(function, visitWindowRef(spec))
case spec: WindowDefContext =>
WindowExpression(function, visitWindowDef(spec))
case _ => function
}
}
/**
* Create a current timestamp/date expression. These are different from regular function because
* they do not require the user to specify braces when calling them.
*/
override def visitTimeFunctionCall(ctx: TimeFunctionCallContext): Expression = withOrigin(ctx) {
ctx.name.getType match {
case SqlBaseParser.CURRENT_DATE =>
CurrentDate()
case SqlBaseParser.CURRENT_TIMESTAMP =>
CurrentTimestamp()
}
}
/**
* Create a function database (optional) and name pair.
*/
protected def visitFunctionName(ctx: QualifiedNameContext): FunctionIdentifier = {
ctx.identifier().asScala.map(_.getText) match {
case Seq(db, fn) => FunctionIdentifier(fn, Option(db))
case Seq(fn) => FunctionIdentifier(fn, None)
case other => throw new ParseException(s"Unsupported function name '${ctx.getText}'", ctx)
}
}
/**
* Create a reference to a window frame, i.e. [[WindowSpecReference]].
*/
override def visitWindowRef(ctx: WindowRefContext): WindowSpecReference = withOrigin(ctx) {
WindowSpecReference(ctx.identifier.getText)
}
/**
* Create a window definition, i.e. [[WindowSpecDefinition]].
*/
override def visitWindowDef(ctx: WindowDefContext): WindowSpecDefinition = withOrigin(ctx) {
// CLUSTER BY ... | PARTITION BY ... ORDER BY ...
val partition = ctx.partition.asScala.map(expression)
val order = ctx.sortItem.asScala.map(visitSortItem)
// RANGE/ROWS BETWEEN ...
val frameSpecOption = Option(ctx.windowFrame).map { frame =>
val frameType = frame.frameType.getType match {
case SqlBaseParser.RANGE => RangeFrame
case SqlBaseParser.ROWS => RowFrame
}
SpecifiedWindowFrame(
frameType,
visitFrameBound(frame.start),
Option(frame.end).map(visitFrameBound).getOrElse(CurrentRow))
}
WindowSpecDefinition(
partition,
order,
frameSpecOption.getOrElse(UnspecifiedFrame))
}
/**
* Create or resolve a [[FrameBoundary]]. Simple math expressions are allowed for Value
* Preceding/Following boundaries. These expressions must be constant (foldable) and return an
* integer value.
*/
override def visitFrameBound(ctx: FrameBoundContext): FrameBoundary = withOrigin(ctx) {
// We currently only allow foldable integers.
def value: Int = {
val e = expression(ctx.expression)
validate(e.resolved && e.foldable && e.dataType == IntegerType,
"Frame bound value must be a constant integer.",
ctx)
e.eval().asInstanceOf[Int]
}
// Create the FrameBoundary
ctx.boundType.getType match {
case SqlBaseParser.PRECEDING if ctx.UNBOUNDED != null =>
UnboundedPreceding
case SqlBaseParser.PRECEDING =>
ValuePreceding(value)
case SqlBaseParser.CURRENT =>
CurrentRow
case SqlBaseParser.FOLLOWING if ctx.UNBOUNDED != null =>
UnboundedFollowing
case SqlBaseParser.FOLLOWING =>
ValueFollowing(value)
}
}
/**
* Create a [[CreateStruct]] expression.
*/
override def visitRowConstructor(ctx: RowConstructorContext): Expression = withOrigin(ctx) {
CreateStruct(ctx.namedExpression().asScala.map(expression))
}
/**
* Create a [[ScalarSubquery]] expression.
*/
override def visitSubqueryExpression(
ctx: SubqueryExpressionContext): Expression = withOrigin(ctx) {
ScalarSubquery(plan(ctx.query))
}
/**
* Create a value based [[CaseWhen]] expression. This has the following SQL form:
* {{{
* CASE [expression]
* WHEN [value] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*/
override def visitSimpleCase(ctx: SimpleCaseContext): Expression = withOrigin(ctx) {
val e = expression(ctx.value)
val branches = ctx.whenClause.asScala.map { wCtx =>
(EqualTo(e, expression(wCtx.condition)), expression(wCtx.result))
}
CaseWhen(branches, Option(ctx.elseExpression).map(expression))
}
/**
* Create a condition based [[CaseWhen]] expression. This has the following SQL syntax:
* {{{
* CASE
* WHEN [predicate] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*
* @param ctx the parse tree
* */
override def visitSearchedCase(ctx: SearchedCaseContext): Expression = withOrigin(ctx) {
val branches = ctx.whenClause.asScala.map { wCtx =>
(expression(wCtx.condition), expression(wCtx.result))
}
CaseWhen(branches, Option(ctx.elseExpression).map(expression))
}
/**
* Create a dereference expression. The return type depends on the type of the parent, this can
* either be a [[UnresolvedAttribute]] (if the parent is an [[UnresolvedAttribute]]), or an
* [[UnresolvedExtractValue]] if the parent is some expression.
*/
override def visitDereference(ctx: DereferenceContext): Expression = withOrigin(ctx) {
val attr = ctx.fieldName.getText
expression(ctx.base) match {
case UnresolvedAttribute(nameParts) =>
UnresolvedAttribute(nameParts :+ attr)
case e =>
UnresolvedExtractValue(e, Literal(attr))
}
}
/**
* Create an [[UnresolvedAttribute]] expression.
*/
override def visitColumnReference(ctx: ColumnReferenceContext): Expression = withOrigin(ctx) {
UnresolvedAttribute.quoted(ctx.getText)
}
/**
* Create an [[UnresolvedExtractValue]] expression, this is used for subscript access to an array.
*/
override def visitSubscript(ctx: SubscriptContext): Expression = withOrigin(ctx) {
UnresolvedExtractValue(expression(ctx.value), expression(ctx.index))
}
/**
* Create an expression for an expression between parentheses. This is need because the ANTLR
* visitor cannot automatically convert the nested context into an expression.
*/
override def visitParenthesizedExpression(
ctx: ParenthesizedExpressionContext): Expression = withOrigin(ctx) {
expression(ctx.expression)
}
/**
* Create a [[SortOrder]] expression.
*/
override def visitSortItem(ctx: SortItemContext): SortOrder = withOrigin(ctx) {
val direction = if (ctx.DESC != null) {
Descending
} else {
Ascending
}
val nullOrdering = if (ctx.FIRST != null) {
NullsFirst
} else if (ctx.LAST != null) {
NullsLast
} else {
direction.defaultNullOrdering
}
SortOrder(expression(ctx.expression), direction, nullOrdering, Set.empty)
}
/**
* Create a typed Literal expression. A typed literal has the following SQL syntax:
* {{{
* [TYPE] '[VALUE]'
* }}}
* Currently Date, Timestamp and Binary typed literals are supported.
*/
override def visitTypeConstructor(ctx: TypeConstructorContext): Literal = withOrigin(ctx) {
val value = string(ctx.STRING)
val valueType = ctx.identifier.getText.toUpperCase(Locale.ROOT)
try {
valueType match {
case "DATE" =>
Literal(Date.valueOf(value))
case "TIMESTAMP" =>
Literal(Timestamp.valueOf(value))
case "X" =>
val padding = if (value.length % 2 == 1) "0" else ""
Literal(DatatypeConverter.parseHexBinary(padding + value))
case other =>
throw new ParseException(s"Literals of type '$other' are currently not supported.", ctx)
}
} catch {
case e: IllegalArgumentException =>
val message = Option(e.getMessage).getOrElse(s"Exception parsing $valueType")
throw new ParseException(message, ctx)
}
}
/**
* Create a NULL literal expression.
*/
override def visitNullLiteral(ctx: NullLiteralContext): Literal = withOrigin(ctx) {
Literal(null)
}
/**
* Create a Boolean literal expression.
*/
override def visitBooleanLiteral(ctx: BooleanLiteralContext): Literal = withOrigin(ctx) {
if (ctx.getText.toBoolean) {
Literal.TrueLiteral
} else {
Literal.FalseLiteral
}
}
/**
* Create an integral literal expression. The code selects the most narrow integral type
* possible, either a BigDecimal, a Long or an Integer is returned.
*/
override def visitIntegerLiteral(ctx: IntegerLiteralContext): Literal = withOrigin(ctx) {
BigDecimal(ctx.getText) match {
case v if v.isValidInt =>
Literal(v.intValue())
case v if v.isValidLong =>
Literal(v.longValue())
case v => Literal(v.underlying())
}
}
/**
* Create a decimal literal for a regular decimal number.
*/
override def visitDecimalLiteral(ctx: DecimalLiteralContext): Literal = withOrigin(ctx) {
Literal(BigDecimal(ctx.getText).underlying())
}
/** Create a numeric literal expression. */
private def numericLiteral
(ctx: NumberContext, minValue: BigDecimal, maxValue: BigDecimal, typeName: String)
(converter: String => Any): Literal = withOrigin(ctx) {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
try {
val rawBigDecimal = BigDecimal(rawStrippedQualifier)
if (rawBigDecimal < minValue || rawBigDecimal > maxValue) {
throw new ParseException(s"Numeric literal ${rawStrippedQualifier} does not " +
s"fit in range [${minValue}, ${maxValue}] for type ${typeName}", ctx)
}
Literal(converter(rawStrippedQualifier))
} catch {
case e: NumberFormatException =>
throw new ParseException(e.getMessage, ctx)
}
}
/**
* Create a Byte Literal expression.
*/
override def visitTinyIntLiteral(ctx: TinyIntLiteralContext): Literal = {
numericLiteral(ctx, Byte.MinValue, Byte.MaxValue, ByteType.simpleString)(_.toByte)
}
/**
* Create a Short Literal expression.
*/
override def visitSmallIntLiteral(ctx: SmallIntLiteralContext): Literal = {
numericLiteral(ctx, Short.MinValue, Short.MaxValue, ShortType.simpleString)(_.toShort)
}
/**
* Create a Long Literal expression.
*/
override def visitBigIntLiteral(ctx: BigIntLiteralContext): Literal = {
numericLiteral(ctx, Long.MinValue, Long.MaxValue, LongType.simpleString)(_.toLong)
}
/**
* Create a Double Literal expression.
*/
override def visitDoubleLiteral(ctx: DoubleLiteralContext): Literal = {
numericLiteral(ctx, Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble)
}
/**
* Create a BigDecimal Literal expression.
*/
override def visitBigDecimalLiteral(ctx: BigDecimalLiteralContext): Literal = {
val raw = ctx.getText.substring(0, ctx.getText.length - 2)
try {
Literal(BigDecimal(raw).underlying())
} catch {
case e: AnalysisException =>
throw new ParseException(e.message, ctx)
}
}
/**
* Create a String literal expression.
*/
override def visitStringLiteral(ctx: StringLiteralContext): Literal = withOrigin(ctx) {
Literal(createString(ctx))
}
/**
* Create a String from a string literal context. This supports multiple consecutive string
* literals, these are concatenated, for example this expression "'hello' 'world'" will be
* converted into "helloworld".
*
* Special characters can be escaped by using Hive/C-style escaping.
*/
private def createString(ctx: StringLiteralContext): String = {
ctx.STRING().asScala.map(string).mkString
}
/**
* Create a [[CalendarInterval]] literal expression. An interval expression can contain multiple
* unit value pairs, for instance: interval 2 months 2 days.
*/
override def visitInterval(ctx: IntervalContext): Literal = withOrigin(ctx) {
val intervals = ctx.intervalField.asScala.map(visitIntervalField)
validate(intervals.nonEmpty, "at least one time unit should be given for interval literal", ctx)
Literal(intervals.reduce(_.add(_)))
}
/**
* Create a [[CalendarInterval]] for a unit value pair. Two unit configuration types are
* supported:
* - Single unit.
* - From-To unit (only 'YEAR TO MONTH' and 'DAY TO SECOND' are supported).
*/
override def visitIntervalField(ctx: IntervalFieldContext): CalendarInterval = withOrigin(ctx) {
import ctx._
val s = value.getText
try {
val unitText = unit.getText.toLowerCase(Locale.ROOT)
val interval = (unitText, Option(to).map(_.getText.toLowerCase(Locale.ROOT))) match {
case (u, None) if u.endsWith("s") =>
// Handle plural forms, e.g: yearS/monthS/weekS/dayS/hourS/minuteS/hourS/...
CalendarInterval.fromSingleUnitString(u.substring(0, u.length - 1), s)
case (u, None) =>
CalendarInterval.fromSingleUnitString(u, s)
case ("year", Some("month")) =>
CalendarInterval.fromYearMonthString(s)
case ("day", Some("second")) =>
CalendarInterval.fromDayTimeString(s)
case (from, Some(t)) =>
throw new ParseException(s"Intervals FROM $from TO $t are not supported.", ctx)
}
validate(interval != null, "No interval can be constructed", ctx)
interval
} catch {
// Handle Exceptions thrown by CalendarInterval
case e: IllegalArgumentException =>
val pe = new ParseException(e.getMessage, ctx)
pe.setStackTrace(e.getStackTrace)
throw pe
}
}
/* ********************************************************************************************
* DataType parsing
* ******************************************************************************************** */
/**
* Create a Spark DataType.
*/
private def visitSparkDataType(ctx: DataTypeContext): DataType = {
HiveStringType.replaceCharType(typedVisit(ctx))
}
/**
* Resolve/create a primitive type.
*/
override def visitPrimitiveDataType(ctx: PrimitiveDataTypeContext): DataType = withOrigin(ctx) {
val dataType = ctx.identifier.getText.toLowerCase(Locale.ROOT)
(dataType, ctx.INTEGER_VALUE().asScala.toList) match {
case ("boolean", Nil) => BooleanType
case ("tinyint" | "byte", Nil) => ByteType
case ("smallint" | "short", Nil) => ShortType
case ("int" | "integer", Nil) => IntegerType
case ("bigint" | "long", Nil) => LongType
case ("float", Nil) => FloatType
case ("double", Nil) => DoubleType
case ("date", Nil) => DateType
case ("timestamp", Nil) => TimestampType
case ("string", Nil) => StringType
case ("char", length :: Nil) => CharType(length.getText.toInt)
case ("varchar", length :: Nil) => VarcharType(length.getText.toInt)
case ("binary", Nil) => BinaryType
case ("decimal", Nil) => DecimalType.USER_DEFAULT
case ("decimal", precision :: Nil) => DecimalType(precision.getText.toInt, 0)
case ("decimal", precision :: scale :: Nil) =>
DecimalType(precision.getText.toInt, scale.getText.toInt)
case (dt, params) =>
val dtStr = if (params.nonEmpty) s"$dt(${params.mkString(",")})" else dt
throw new ParseException(s"DataType $dtStr is not supported.", ctx)
}
}
/**
* Create a complex DataType. Arrays, Maps and Structures are supported.
*/
override def visitComplexDataType(ctx: ComplexDataTypeContext): DataType = withOrigin(ctx) {
ctx.complex.getType match {
case SqlBaseParser.ARRAY =>
ArrayType(typedVisit(ctx.dataType(0)))
case SqlBaseParser.MAP =>
MapType(typedVisit(ctx.dataType(0)), typedVisit(ctx.dataType(1)))
case SqlBaseParser.STRUCT =>
StructType(Option(ctx.complexColTypeList).toSeq.flatMap(visitComplexColTypeList))
}
}
/**
* Create top level table schema.
*/
protected def createSchema(ctx: ColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitColTypeList))
}
/**
* Create a [[StructType]] from a number of column definitions.
*/
override def visitColTypeList(ctx: ColTypeListContext): Seq[StructField] = withOrigin(ctx) {
ctx.colType().asScala.map(visitColType)
}
/**
* Create a top level [[StructField]] from a column definition.
*/
override def visitColType(ctx: ColTypeContext): StructField = withOrigin(ctx) {
import ctx._
val builder = new MetadataBuilder
// Add comment to metadata
if (STRING != null) {
builder.putString("comment", string(STRING))
}
// Add Hive type string to metadata.
val rawDataType = typedVisit[DataType](ctx.dataType)
val cleanedDataType = HiveStringType.replaceCharType(rawDataType)
if (rawDataType != cleanedDataType) {
builder.putString(HIVE_TYPE_STRING, rawDataType.catalogString)
}
StructField(
identifier.getText,
cleanedDataType,
nullable = true,
builder.build())
}
/**
* Create a [[StructType]] from a sequence of [[StructField]]s.
*/
protected def createStructType(ctx: ComplexColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitComplexColTypeList))
}
/**
* Create a [[StructType]] from a number of column definitions.
*/
override def visitComplexColTypeList(
ctx: ComplexColTypeListContext): Seq[StructField] = withOrigin(ctx) {
ctx.complexColType().asScala.map(visitComplexColType)
}
/**
* Create a [[StructField]] from a column definition.
*/
override def visitComplexColType(ctx: ComplexColTypeContext): StructField = withOrigin(ctx) {
import ctx._
val structField = StructField(identifier.getText, typedVisit(dataType), nullable = true)
if (STRING == null) structField else structField.withComment(string(STRING))
}
}
|
patrick-nicholson/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
|
Scala
|
apache-2.0
| 56,145 |
package org.jetbrains.plugins.scala.failed.types.existentialSimplification
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.lang.types.existentialSimplification.ExistentialSimplificationTestBase
import org.junit.experimental.categories.Category
@Category(Array(classOf[PerfCycleTests]))
class ExistentialSimplificationAllRulesTest extends ExistentialSimplificationTestBase {
override def folderPath: String = super.folderPath + "allRules/"
def testAllRules() = doTest()
}
|
whorbowicz/intellij-scala
|
test/org/jetbrains/plugins/scala/failed/types/existentialSimplification/ExistentialSimplificationAllRulesTest.scala
|
Scala
|
apache-2.0
| 514 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering
import breeze.linalg.{DenseVector => BDV}
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.impl.Utils.EPSILON
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.stat.distribution.MultivariateGaussian
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.mllib.linalg.{Matrices => OldMatrices, Matrix => OldMatrix,
Vector => OldVector, Vectors => OldVectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types.{IntegerType, StructType}
/**
* Common params for GaussianMixture and GaussianMixtureModel
*/
private[clustering] trait GaussianMixtureParams extends Params with HasMaxIter with HasFeaturesCol
with HasSeed with HasPredictionCol with HasProbabilityCol with HasTol {
/**
* Number of independent Gaussians in the mixture model. Must be greater than 1. Default: 2.
*
* @group param
*/
@Since("2.0.0")
final val k = new IntParam(this, "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", ParamValidators.gt(1))
/** @group getParam */
@Since("2.0.0")
def getK: Int = $(k)
/**
* Validates and transforms the input schema.
*
* @param schema input schema
* @return output schema
*/
protected def validateAndTransformSchema(schema: StructType): StructType = {
SchemaUtils.validateVectorCompatibleColumn(schema, getFeaturesCol)
val schemaWithPredictionCol = SchemaUtils.appendColumn(schema, $(predictionCol), IntegerType)
SchemaUtils.appendColumn(schemaWithPredictionCol, $(probabilityCol), new VectorUDT)
}
}
/**
* Multivariate Gaussian Mixture Model (GMM) consisting of k Gaussians, where points
* are drawn from each Gaussian i with probability weights(i).
*
* @param weights Weight for each Gaussian distribution in the mixture.
* This is a multinomial probability distribution over the k Gaussians,
* where weights(i) is the weight for Gaussian i, and weights sum to 1.
* @param gaussians Array of `MultivariateGaussian` where gaussians(i) represents
* the Multivariate Gaussian (Normal) Distribution for Gaussian i
*/
@Since("2.0.0")
class GaussianMixtureModel private[ml] (
@Since("2.0.0") override val uid: String,
@Since("2.0.0") val weights: Array[Double],
@Since("2.0.0") val gaussians: Array[MultivariateGaussian])
extends Model[GaussianMixtureModel] with GaussianMixtureParams with MLWritable
with HasTrainingSummary[GaussianMixtureSummary] {
/** @group setParam */
@Since("2.1.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.1.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.1.0")
def setProbabilityCol(value: String): this.type = set(probabilityCol, value)
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixtureModel = {
val copied = copyValues(new GaussianMixtureModel(uid, weights, gaussians), extra)
copied.setSummary(trainingSummary).setParent(this.parent)
}
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
val predUDF = udf((vector: Vector) => predict(vector))
val probUDF = udf((vector: Vector) => predictProbability(vector))
dataset
.withColumn($(predictionCol), predUDF(DatasetUtils.columnToVector(dataset, getFeaturesCol)))
.withColumn($(probabilityCol), probUDF(DatasetUtils.columnToVector(dataset, getFeaturesCol)))
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
private[clustering] def predict(features: Vector): Int = {
val r = predictProbability(features)
r.argmax
}
private[clustering] def predictProbability(features: Vector): Vector = {
val probs: Array[Double] =
GaussianMixtureModel.computeProbabilities(features.asBreeze.toDenseVector, gaussians, weights)
Vectors.dense(probs)
}
/**
* Retrieve Gaussian distributions as a DataFrame.
* Each row represents a Gaussian Distribution.
* Two columns are defined: mean and cov.
* Schema:
* {{{
* root
* |-- mean: vector (nullable = true)
* |-- cov: matrix (nullable = true)
* }}}
*/
@Since("2.0.0")
def gaussiansDF: DataFrame = {
val modelGaussians = gaussians.map { gaussian =>
(OldVectors.fromML(gaussian.mean), OldMatrices.fromML(gaussian.cov))
}
SparkSession.builder().getOrCreate().createDataFrame(modelGaussians).toDF("mean", "cov")
}
/**
* Returns a [[org.apache.spark.ml.util.MLWriter]] instance for this ML instance.
*
* For [[GaussianMixtureModel]], this does NOT currently save the training [[summary]].
* An option to save [[summary]] may be added in the future.
*
*/
@Since("2.0.0")
override def write: MLWriter = new GaussianMixtureModel.GaussianMixtureModelWriter(this)
/**
* Gets summary of model on training set. An exception is
* thrown if `hasSummary` is false.
*/
@Since("2.0.0")
override def summary: GaussianMixtureSummary = super.summary
}
@Since("2.0.0")
object GaussianMixtureModel extends MLReadable[GaussianMixtureModel] {
@Since("2.0.0")
override def read: MLReader[GaussianMixtureModel] = new GaussianMixtureModelReader
@Since("2.0.0")
override def load(path: String): GaussianMixtureModel = super.load(path)
/** [[MLWriter]] instance for [[GaussianMixtureModel]] */
private[GaussianMixtureModel] class GaussianMixtureModelWriter(
instance: GaussianMixtureModel) extends MLWriter {
private case class Data(weights: Array[Double], mus: Array[OldVector], sigmas: Array[OldMatrix])
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: weights and gaussians
val weights = instance.weights
val gaussians = instance.gaussians
val mus = gaussians.map(g => OldVectors.fromML(g.mean))
val sigmas = gaussians.map(c => OldMatrices.fromML(c.cov))
val data = Data(weights, mus, sigmas)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class GaussianMixtureModelReader extends MLReader[GaussianMixtureModel] {
/** Checked against metadata when loading model */
private val className = classOf[GaussianMixtureModel].getName
override def load(path: String): GaussianMixtureModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val row = sparkSession.read.parquet(dataPath).select("weights", "mus", "sigmas").head()
val weights = row.getSeq[Double](0).toArray
val mus = row.getSeq[OldVector](1).toArray
val sigmas = row.getSeq[OldMatrix](2).toArray
require(mus.length == sigmas.length, "Length of Mu and Sigma array must match")
require(mus.length == weights.length, "Length of weight and Gaussian array must match")
val gaussians = mus.zip(sigmas).map {
case (mu, sigma) =>
new MultivariateGaussian(mu.asML, sigma.asML)
}
val model = new GaussianMixtureModel(metadata.uid, weights, gaussians)
metadata.getAndSetParams(model)
model
}
}
/**
* Compute the probability (partial assignment) for each cluster for the given data point.
*
* @param features Data point
* @param dists Gaussians for model
* @param weights Weights for each Gaussian
* @return Probability (partial assignment) for each of the k clusters
*/
private[clustering]
def computeProbabilities(
features: BDV[Double],
dists: Array[MultivariateGaussian],
weights: Array[Double]): Array[Double] = {
val p = weights.zip(dists).map {
case (weight, dist) => EPSILON + weight * dist.pdf(features)
}
val pSum = p.sum
var i = 0
while (i < weights.length) {
p(i) /= pSum
i += 1
}
p
}
}
/**
* Gaussian Mixture clustering.
*
* This class performs expectation maximization for multivariate Gaussian
* Mixture Models (GMMs). A GMM represents a composite distribution of
* independent Gaussian distributions with associated "mixing" weights
* specifying each's contribution to the composite.
*
* Given a set of sample points, this class will maximize the log-likelihood
* for a mixture of k Gaussians, iterating until the log-likelihood changes by
* less than convergenceTol, or until it has reached the max number of iterations.
* While this process is generally guaranteed to converge, it is not guaranteed
* to find a global optimum.
*
* @note This algorithm is limited in its number of features since it requires storing a covariance
* matrix which has size quadratic in the number of features. Even when the number of features does
* not exceed this limit, this algorithm may perform poorly on high-dimensional data.
* This is due to high-dimensional data (a) making it difficult to cluster at all (based
* on statistical/theoretical arguments) and (b) numerical issues with Gaussian distributions.
*/
@Since("2.0.0")
class GaussianMixture @Since("2.0.0") (
@Since("2.0.0") override val uid: String)
extends Estimator[GaussianMixtureModel] with GaussianMixtureParams with DefaultParamsWritable {
setDefault(
k -> 2,
maxIter -> 100,
tol -> 0.01)
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixture = defaultCopy(extra)
@Since("2.0.0")
def this() = this(Identifiable.randomUID("GaussianMixture"))
/** @group setParam */
@Since("2.0.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.0.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.0.0")
def setProbabilityCol(value: String): this.type = set(probabilityCol, value)
/** @group setParam */
@Since("2.0.0")
def setK(value: Int): this.type = set(k, value)
/** @group setParam */
@Since("2.0.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("2.0.0")
def setTol(value: Double): this.type = set(tol, value)
/** @group setParam */
@Since("2.0.0")
def setSeed(value: Long): this.type = set(seed, value)
/**
* Number of samples per cluster to use when initializing Gaussians.
*/
private val numSamples = 5
@Since("2.0.0")
override def fit(dataset: Dataset[_]): GaussianMixtureModel = instrumented { instr =>
transformSchema(dataset.schema, logging = true)
val sc = dataset.sparkSession.sparkContext
val numClusters = $(k)
val instances = dataset
.select(DatasetUtils.columnToVector(dataset, getFeaturesCol)).rdd.map {
case Row(features: Vector) => features
}.cache()
// Extract the number of features.
val numFeatures = instances.first().size
require(numFeatures < GaussianMixture.MAX_NUM_FEATURES, s"GaussianMixture cannot handle more " +
s"than ${GaussianMixture.MAX_NUM_FEATURES} features because the size of the covariance" +
s" matrix is quadratic in the number of features.")
instr.logPipelineStage(this)
instr.logDataset(dataset)
instr.logParams(this, featuresCol, predictionCol, probabilityCol, k, maxIter, seed, tol)
instr.logNumFeatures(numFeatures)
val shouldDistributeGaussians = GaussianMixture.shouldDistributeGaussians(
numClusters, numFeatures)
// TODO: SPARK-15785 Support users supplied initial GMM.
val (weights, gaussians) = initRandom(instances, numClusters, numFeatures)
var logLikelihood = Double.MinValue
var logLikelihoodPrev = 0.0
var iter = 0
while (iter < $(maxIter) && math.abs(logLikelihood - logLikelihoodPrev) > $(tol)) {
val bcWeights = instances.sparkContext.broadcast(weights)
val bcGaussians = instances.sparkContext.broadcast(gaussians)
// aggregate the cluster contribution for all sample points
val sums = instances.treeAggregate(
new ExpectationAggregator(numFeatures, bcWeights, bcGaussians))(
seqOp = (c, v) => (c, v) match {
case (aggregator, instance) => aggregator.add(instance)
},
combOp = (c1, c2) => (c1, c2) match {
case (aggregator1, aggregator2) => aggregator1.merge(aggregator2)
})
bcWeights.destroy(blocking = false)
bcGaussians.destroy(blocking = false)
if (iter == 0) {
val numSamples = sums.count
instr.logNumExamples(numSamples)
}
/*
Create new distributions based on the partial assignments
(often referred to as the "M" step in literature)
*/
val sumWeights = sums.weights.sum
if (shouldDistributeGaussians) {
val numPartitions = math.min(numClusters, 1024)
val tuples = Seq.tabulate(numClusters) { i =>
(sums.means(i), sums.covs(i), sums.weights(i))
}
val (ws, gs) = sc.parallelize(tuples, numPartitions).map { case (mean, cov, weight) =>
GaussianMixture.updateWeightsAndGaussians(mean, cov, weight, sumWeights)
}.collect().unzip
Array.copy(ws, 0, weights, 0, ws.length)
Array.copy(gs, 0, gaussians, 0, gs.length)
} else {
var i = 0
while (i < numClusters) {
val (weight, gaussian) = GaussianMixture.updateWeightsAndGaussians(
sums.means(i), sums.covs(i), sums.weights(i), sumWeights)
weights(i) = weight
gaussians(i) = gaussian
i += 1
}
}
logLikelihoodPrev = logLikelihood // current becomes previous
logLikelihood = sums.logLikelihood // this is the freshly computed log-likelihood
iter += 1
}
instances.unpersist(false)
val gaussianDists = gaussians.map { case (mean, covVec) =>
val cov = GaussianMixture.unpackUpperTriangularMatrix(numFeatures, covVec.values)
new MultivariateGaussian(mean, cov)
}
val model = copyValues(new GaussianMixtureModel(uid, weights, gaussianDists)).setParent(this)
val summary = new GaussianMixtureSummary(model.transform(dataset),
$(predictionCol), $(probabilityCol), $(featuresCol), $(k), logLikelihood, iter)
instr.logNamedValue("logLikelihood", logLikelihood)
instr.logNamedValue("clusterSizes", summary.clusterSizes)
model.setSummary(Some(summary))
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
/**
* Initialize weights and corresponding gaussian distributions at random.
*
* We start with uniform weights, a random mean from the data, and diagonal covariance matrices
* using component variances derived from the samples.
*
* @param instances The training instances.
* @param numClusters The number of clusters.
* @param numFeatures The number of features of training instance.
* @return The initialized weights and corresponding gaussian distributions. Note the
* covariance matrix of multivariate gaussian distribution is symmetric and
* we only save the upper triangular part as a dense vector (column major).
*/
private def initRandom(
instances: RDD[Vector],
numClusters: Int,
numFeatures: Int): (Array[Double], Array[(DenseVector, DenseVector)]) = {
val samples = instances.takeSample(withReplacement = true, numClusters * numSamples, $(seed))
val weights: Array[Double] = Array.fill(numClusters)(1.0 / numClusters)
val gaussians: Array[(DenseVector, DenseVector)] = Array.tabulate(numClusters) { i =>
val slice = samples.view(i * numSamples, (i + 1) * numSamples)
val mean = {
val v = new DenseVector(new Array[Double](numFeatures))
var i = 0
while (i < numSamples) {
BLAS.axpy(1.0, slice(i), v)
i += 1
}
BLAS.scal(1.0 / numSamples, v)
v
}
/*
Construct matrix where diagonal entries are element-wise
variance of input vectors (computes biased variance).
Since the covariance matrix of multivariate gaussian distribution is symmetric,
only the upper triangular part of the matrix (column major) will be saved as
a dense vector in order to reduce the shuffled data size.
*/
val cov = {
val ss = new DenseVector(new Array[Double](numFeatures)).asBreeze
slice.foreach(xi => ss += (xi.asBreeze - mean.asBreeze) ^:^ 2.0)
val diagVec = Vectors.fromBreeze(ss)
BLAS.scal(1.0 / numSamples, diagVec)
val covVec = new DenseVector(Array.fill[Double](
numFeatures * (numFeatures + 1) / 2)(0.0))
diagVec.toArray.zipWithIndex.foreach { case (v: Double, i: Int) =>
covVec.values(i + i * (i + 1) / 2) = v
}
covVec
}
(mean, cov)
}
(weights, gaussians)
}
}
@Since("2.0.0")
object GaussianMixture extends DefaultParamsReadable[GaussianMixture] {
/** Limit number of features such that numFeatures^2^ < Int.MaxValue */
private[clustering] val MAX_NUM_FEATURES = math.sqrt(Int.MaxValue).toInt
@Since("2.0.0")
override def load(path: String): GaussianMixture = super.load(path)
/**
* Heuristic to distribute the computation of the [[MultivariateGaussian]]s, approximately when
* numFeatures > 25 except for when numClusters is very small.
*
* @param numClusters Number of clusters
* @param numFeatures Number of features
*/
private[clustering] def shouldDistributeGaussians(
numClusters: Int,
numFeatures: Int): Boolean = {
((numClusters - 1.0) / numClusters) * numFeatures > 25.0
}
/**
* Convert an n * (n + 1) / 2 dimension array representing the upper triangular part of a matrix
* into an n * n array representing the full symmetric matrix (column major).
*
* @param n The order of the n by n matrix.
* @param triangularValues The upper triangular part of the matrix packed in an array
* (column major).
* @return A dense matrix which represents the symmetric matrix in column major.
*/
private[clustering] def unpackUpperTriangularMatrix(
n: Int,
triangularValues: Array[Double]): DenseMatrix = {
val symmetricValues = new Array[Double](n * n)
var r = 0
var i = 0
while (i < n) {
var j = 0
while (j <= i) {
symmetricValues(i * n + j) = triangularValues(r)
symmetricValues(j * n + i) = triangularValues(r)
r += 1
j += 1
}
i += 1
}
new DenseMatrix(n, n, symmetricValues)
}
/**
* Update the weight, mean and covariance of gaussian distribution.
*
* @param mean The mean of the gaussian distribution.
* @param cov The covariance matrix of the gaussian distribution. Note we only
* save the upper triangular part as a dense vector (column major).
* @param weight The weight of the gaussian distribution.
* @param sumWeights The sum of weights of all clusters.
* @return The updated weight, mean and covariance.
*/
private[clustering] def updateWeightsAndGaussians(
mean: DenseVector,
cov: DenseVector,
weight: Double,
sumWeights: Double): (Double, (DenseVector, DenseVector)) = {
BLAS.scal(1.0 / weight, mean)
BLAS.spr(-weight, mean, cov)
BLAS.scal(1.0 / weight, cov)
val newWeight = weight / sumWeights
val newGaussian = (mean, cov)
(newWeight, newGaussian)
}
}
/**
* ExpectationAggregator computes the partial expectation results.
*
* @param numFeatures The number of features.
* @param bcWeights The broadcast weights for each Gaussian distribution in the mixture.
* @param bcGaussians The broadcast array of Multivariate Gaussian (Normal) Distribution
* in the mixture. Note only upper triangular part of the covariance
* matrix of each distribution is stored as dense vector (column major)
* in order to reduce shuffled data size.
*/
private class ExpectationAggregator(
numFeatures: Int,
bcWeights: Broadcast[Array[Double]],
bcGaussians: Broadcast[Array[(DenseVector, DenseVector)]]) extends Serializable {
private val k: Int = bcWeights.value.length
private var totalCnt: Long = 0L
private var newLogLikelihood: Double = 0.0
private lazy val newWeights: Array[Double] = new Array[Double](k)
private lazy val newMeans: Array[DenseVector] = Array.fill(k)(
new DenseVector(Array.fill[Double](numFeatures)(0.0)))
private lazy val newCovs: Array[DenseVector] = Array.fill(k)(
new DenseVector(Array.fill[Double](numFeatures * (numFeatures + 1) / 2)(0.0)))
@transient private lazy val oldGaussians = {
bcGaussians.value.map { case (mean, covVec) =>
val cov = GaussianMixture.unpackUpperTriangularMatrix(numFeatures, covVec.values)
new MultivariateGaussian(mean, cov)
}
}
def count: Long = totalCnt
def logLikelihood: Double = newLogLikelihood
def weights: Array[Double] = newWeights
def means: Array[DenseVector] = newMeans
def covs: Array[DenseVector] = newCovs
/**
* Add a new training instance to this ExpectationAggregator, update the weights,
* means and covariances for each distributions, and update the log likelihood.
*
* @param instance The instance of data point to be added.
* @return This ExpectationAggregator object.
*/
def add(instance: Vector): this.type = {
val localWeights = bcWeights.value
val localOldGaussians = oldGaussians
val prob = new Array[Double](k)
var probSum = 0.0
var i = 0
while (i < k) {
val p = EPSILON + localWeights(i) * localOldGaussians(i).pdf(instance)
prob(i) = p
probSum += p
i += 1
}
newLogLikelihood += math.log(probSum)
val localNewWeights = newWeights
val localNewMeans = newMeans
val localNewCovs = newCovs
i = 0
while (i < k) {
prob(i) /= probSum
localNewWeights(i) += prob(i)
BLAS.axpy(prob(i), instance, localNewMeans(i))
BLAS.spr(prob(i), instance, localNewCovs(i))
i += 1
}
totalCnt += 1
this
}
/**
* Merge another ExpectationAggregator, update the weights, means and covariances
* for each distributions, and update the log likelihood.
* (Note that it's in place merging; as a result, `this` object will be modified.)
*
* @param other The other ExpectationAggregator to be merged.
* @return This ExpectationAggregator object.
*/
def merge(other: ExpectationAggregator): this.type = {
if (other.count != 0) {
totalCnt += other.totalCnt
val localThisNewWeights = this.newWeights
val localOtherNewWeights = other.newWeights
val localThisNewMeans = this.newMeans
val localOtherNewMeans = other.newMeans
val localThisNewCovs = this.newCovs
val localOtherNewCovs = other.newCovs
var i = 0
while (i < k) {
localThisNewWeights(i) += localOtherNewWeights(i)
BLAS.axpy(1.0, localOtherNewMeans(i), localThisNewMeans(i))
BLAS.axpy(1.0, localOtherNewCovs(i), localThisNewCovs(i))
i += 1
}
newLogLikelihood += other.newLogLikelihood
}
this
}
}
/**
* :: Experimental ::
* Summary of GaussianMixture.
*
* @param predictions `DataFrame` produced by `GaussianMixtureModel.transform()`.
* @param predictionCol Name for column of predicted clusters in `predictions`.
* @param probabilityCol Name for column of predicted probability of each cluster
* in `predictions`.
* @param featuresCol Name for column of features in `predictions`.
* @param k Number of clusters.
* @param logLikelihood Total log-likelihood for this model on the given data.
* @param numIter Number of iterations.
*/
@Since("2.0.0")
@Experimental
class GaussianMixtureSummary private[clustering] (
predictions: DataFrame,
predictionCol: String,
@Since("2.0.0") val probabilityCol: String,
featuresCol: String,
k: Int,
@Since("2.2.0") val logLikelihood: Double,
numIter: Int)
extends ClusteringSummary(predictions, predictionCol, featuresCol, k, numIter) {
/**
* Probability of each cluster.
*/
@Since("2.0.0")
@transient lazy val probability: DataFrame = predictions.select(probabilityCol)
}
|
hhbyyh/spark
|
mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
|
Scala
|
apache-2.0
| 25,758 |
package io.buoyant.linkerd.clientTls
import com.fasterxml.jackson.annotation.JsonIgnore
import com.twitter.finagle.{Path, ServiceFactory, Stack, Stackable}
import com.twitter.finagle.buoyant.TlsClientPrep
import com.twitter.finagle.client.AddrMetadataExtraction.AddrMetadata
import com.twitter.finagle.ssl.Engine
import com.twitter.finagle.transport.TlsConfig
import com.twitter.logging.Logger
import io.buoyant.config.Parser
import io.buoyant.linkerd.{TlsClientConfig, TlsClientInitializer}
import io.buoyant.namer.util.PathMatcher
import java.net.SocketAddress
class BoundPathInitializer extends TlsClientInitializer {
val configClass = classOf[BoundPathConfig]
override def configId = "io.l5d.boundPath"
}
object BoundPathInitializer extends BoundPathInitializer
case class BoundPathConfig(
caCertPath: Option[String],
names: Seq[NameMatcherConfig],
strict: Option[Boolean]
) extends TlsClientConfig {
private[clientTls] def peerCommonName(params: Stack.Params) =
params[AddrMetadata].metadata("id") match {
case id: String => commonNameFromPath(Path.read(id))
case _ => None
}
private[this] def commonNameFromPath(path: Path): Option[String] =
names.map { n =>
n.matcher.substitute(path, n.commonNamePattern)
}.collectFirst {
case Some(result) => result
} match {
case None if strict.getOrElse(true) =>
val n = names.map(_.prefix).mkString(",")
throw new MatcherError(s"Unable to match ${path.show} with available names: $n")
case default => default
}
@JsonIgnore
override def tlsClientPrep[Req, Rsp]: Stackable[ServiceFactory[Req, Rsp]] =
new Stack.Module[ServiceFactory[Req, Rsp]] {
override val role = TlsClientPrep.role
override val description = TlsClientPrep.description
override val parameters: Seq[Stack.Param[_]] = Seq(AddrMetadata.param)
private val log = Logger.get(getClass.getName)
private[this] def tlsify(params: Stack.Params): TlsClientPrep.Params =
peerCommonName(params) match {
case Some(cn) =>
val certs = caCertPath.toSeq.map(TlsClientPrep.loadCert(_))
val trust = TlsClientPrep.Trust.Verified(cn, certs)
Stack.Params.empty +
TlsClientPrep.TransportSecurity(TlsClientPrep.TransportSecurity.Secure()) +
TlsClientPrep.Trust(trust)
case None =>
Stack.Params.empty +
TlsClientPrep.TransportSecurity(TlsClientPrep.TransportSecurity.Insecure)
}
override def make(params: Stack.Params, next: Stack[ServiceFactory[Req, Rsp]]) =
Stack.Leaf(role, next.make(params ++ tlsify(params)))
}
}
class MatcherError(msg: String) extends Throwable(msg)
case class NameMatcherConfig(prefix: String, commonNamePattern: String) {
def matcher: PathMatcher = PathMatcher(prefix)
}
|
hhtpcd/linkerd
|
linkerd/tls/src/main/scala/io/buoyant/linkerd/clientTls/BoundPathInitializer.scala
|
Scala
|
apache-2.0
| 2,868 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class UpSampling1DSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val upsampling = UpSampling1D[Float](2).setName("upsampling")
val input = Tensor[Float](2, 5, 5).apply1(_ => Random.nextFloat())
runSerializationTest(upsampling, input)
}
}
|
yiheng/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/UpSampling1DSpec.scala
|
Scala
|
apache-2.0
| 1,071 |
package com.basrikahveci
package cardgame.messaging.response
import cardgame.messaging.Response
class InviteUserResponse(val success: Boolean, val reason: Int) extends Response
|
metanet/cardgame-server-scala
|
src/main/scala/com/basrikahveci/cardgame/messaging/response/InviteUserResponse.scala
|
Scala
|
mit
| 179 |
package org.talg
import org.talg.home.entities.{Person, Address}
import com.mongodb.casbah.Imports._
/**
* User: talg
*/
package object home {
def ADDRESS = Address(
street = "Lombard",
zipCode = 12345,
city = "San Francisco",
country = "United States"
)
def PERSON = Person(
_id = new ObjectId,
age = 99,
name = "Moses",
address = ADDRESS,
knownLanguages = List("english", "hebrew", "russian"),
socialId = Some(12345654321L)
)
}
|
talgendler/casbah
|
src/test/scala/org/talg/home/package.scala
|
Scala
|
mit
| 484 |
package com.github.nethad.clustermeister.integration.sc06
import akka.dispatch.Await
import java.lang.reflect.InvocationHandler
import java.lang.reflect.Proxy
import java.lang.reflect.Method
import akka.actor.ActorRef
import akka.util.Timeout
import akka.util.Duration
import akka.util.duration._
import java.util.concurrent.TimeUnit
import akka.dispatch.Future
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.atomic.AtomicInteger
import akka.pattern.ask
/**
* Used to create proxies
*/
object AkkaProxy {
def newStringProxy(actor: ActorRef): Node = newInstance[Node](actor)
def newInstance[T <: Any: Manifest](actor: ActorRef, sentMessagesCounter: AtomicInteger = new AtomicInteger(0), receivedMessagesCounter: AtomicInteger = new AtomicInteger(0), timeout: Timeout = Timeout(300 seconds)): T = {
val c = manifest[T].erasure
Proxy.newProxyInstance(
c.getClassLoader,
Array[Class[_]](c),
new AkkaProxy(actor, sentMessagesCounter, receivedMessagesCounter, timeout)).asInstanceOf[T]
}
}
/**
* Proxy that does RPC over Akka
*/
class AkkaProxy[ProxiedClass](actor: ActorRef, sentMessagesCounter: AtomicInteger, receivedMessagesCounter: AtomicInteger, timeout: Timeout) extends InvocationHandler with Serializable {
override def toString = "ProxyFor" + actor.toString
implicit val t = timeout
def invoke(proxy: Object, method: Method, arguments: Array[Object]) = {
val command = new Command[ProxiedClass](method.getDeclaringClass.getName, method.toString, arguments)
try {
val resultFuture: Future[Any] = actor ? Request(command, returnResult = true)
sentMessagesCounter.incrementAndGet
val result = Await.result(resultFuture, timeout.duration)
receivedMessagesCounter.incrementAndGet
result.asInstanceOf[AnyRef]
} catch {
case e: Exception =>
println("Exception in proxy method `" + method.getName + "(" + { if (arguments != null) { arguments.foldLeft("")(_ + ", " + _) + ")`: " } else { "`: " } } + e + " from " + actor + " " + e.printStackTrace)
throw e
}
}
}
case class Command[ParameterType](className: String, methodDescription: String, arguments: Array[Object]) extends Function1[ParameterType, AnyRef] {
def apply(proxiedClass: ParameterType) = {
val clazz = Class.forName(className)
val methods = clazz.getMethods map (method => (method.toString, method)) toMap
val method = methods(methodDescription)
val result = method.invoke(proxiedClass, arguments: _*)
result
}
override def toString: String = {
className + "." + methodDescription + { if (arguments != null) { "(" + arguments.toList.mkString("(", ", ", ")") } else { "" } }
}
}
|
nethad/clustermeister
|
integration-tests/src/main/scala/com/github/nethad/clustermeister/integration/sc06/AkkaProxy.scala
|
Scala
|
apache-2.0
| 2,721 |
package com.jwd.scala.sample.concurrency
import akka.actor.Actor
import akka.actor.ActorSystem
import akka.actor.Props
import akka.actor.ActorLogging
case class Greeting(who: String)
class GreetingActor extends Actor with ActorLogging {
def receive = {
case Greeting(who) => log.info("Hello " + who)
case value : Int => log.info("Received a number");
case _ => log.error("Invalid message");
}
}
object AkkaSample extends App {
val list = List(Greeting("Smith"), 10, Greeting("Scott"), 20, Greeting("James"), true, Greeting("John"), 1.1)
val system = ActorSystem("MySystem")
val greeter = system.actorOf(Props[GreetingActor], name = "greeter")
list.foreach(value => {
greeter ! value
});
}
|
williamjwd/learn-scala
|
learn-scala-sample/src/main/scala/com/jwd/scala/sample/concurrency/AkkaSample.scala
|
Scala
|
apache-2.0
| 739 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.annotation.AlphaComponent
import org.apache.spark.ml.UnaryTransformer
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.sql.types.{DataType, StringType, ArrayType}
/**
* :: AlphaComponent ::
* A tokenizer that converts the input string to lowercase and then splits it by white spaces.
*/
@AlphaComponent
class Tokenizer extends UnaryTransformer[String, Seq[String], Tokenizer] {
override protected def createTransformFunc(paramMap: ParamMap): String => Seq[String] = {
_.toLowerCase.split("\\\\s")
}
override protected def validateInputType(inputType: DataType): Unit = {
require(inputType == StringType, s"Input type must be string type but got $inputType.")
}
override protected def outputDataType: DataType = new ArrayType(StringType, false)
}
|
trueyao/spark-lever
|
mllib/src/main/scala/org/apache/spark/ml/feature/Tokenizer.scala
|
Scala
|
apache-2.0
| 1,649 |
package test
import scala.language.higherKinds
trait Functor[F[_]] {
def map[A, B](f: A => B, fa: F[A]): F[B]
}
object Functor {
implicit def function[A]: Functor[({ type l[B] = A => B })#l] =
new Functor[({ type l[B] = A => B })#l] {
def map[C, B](cb: C => B, ac: A => C): A => B = cb compose ac
}
}
object FunctorSyntax {
implicit class FunctorOps[F[_], A](fa: F[A])(implicit F: Functor[F]) {
def map[B](f: A => B): F[B] = F.map(f, fa)
}
}
object Test {
val f: Int => String = _.toString
import FunctorSyntax._
f.map((s: String) => s.reverse)
}
|
som-snytt/dotty
|
tests/pos/t2712-5.scala
|
Scala
|
apache-2.0
| 587 |
package org.jetbrains.plugins.scala
package annotator.quickfix
import com.intellij.codeInsight.intention.IntentionAction
import com.intellij.openapi.project.Project
import com.intellij.openapi.editor.Editor
import com.intellij.psi.PsiFile
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
import org.jetbrains.plugins.scala.lang.psi.types.result.TypeResult
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScParameterizedType}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
/**
* Nikolay.Tropin
* 6/27/13
*/
class WrapInOptionQuickFix(expr: ScExpression, expectedType: TypeResult[ScType], exprType: TypeResult[ScType]) extends IntentionAction {
def getText: String = ScalaBundle.message("wrap.in.option.hint")
def getFamilyName: String = ScalaBundle.message("wrap.in.option.name")
def isAvailable(project: Project, editor: Editor, file: PsiFile): Boolean = {
WrapInOptionQuickFix.isAvailable(expr, expectedType, exprType)
}
def invoke(project: Project, editor: Editor, file: PsiFile) {
if (expr.isValid) {
val newText = "Option(" + expr.getText + ")"
val newExpr = ScalaPsiElementFactory.createExpressionFromText(newText, expr.getManager)
expr.replaceExpression(newExpr, removeParenthesis = true)
}
}
def startInWriteAction(): Boolean = true
}
object WrapInOptionQuickFix {
def isAvailable(expr: ScExpression, expectedType: TypeResult[ScType], exprType: TypeResult[ScType]): Boolean = {
var result = false
for {
scType <- exprType
expectedType <- expectedType
} {
expectedType match {
case ScParameterizedType(des, Seq(typeArg)) =>
ScType.extractClass(des) match {
case Some(scClass: ScClass)
if scClass.qualifiedName == "scala.Option" && scType.conforms(typeArg) => result = true
case _ =>
}
case _ =>
}
}
result
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/annotator/quickfix/WrapInOptionQuickFix.scala
|
Scala
|
apache-2.0
| 2,018 |
package cats.scalatest
import org.scalatest.exceptions.{ TestFailedException, StackDepthException }
import cats.data.Validated
import Validated.{ Valid, Invalid }
import org.scalactic.source
trait ValidatedValues {
import scala.language.implicitConversions
/**
* Implicit conversion that adds a `value` method to `cats.data.Validated`
*
* @param validated the `cats.data.Validated` on which to add the `value` method
*/
implicit def convertValidatedToValidatable[E, T](validated: Validated[E, T])(implicit pos: source.Position): Validatable[E, T] =
new Validatable(validated, pos)
/**
* Container class for matching success
* type stuff in `cats.data.Validated` containers,
* similar to `org.scalatest.OptionValues.Valuable`
*
* Meant to allow you to make statements like:
*
* <pre class="stREPL">
* result.value should be > 15
* </pre>
*
* Where it only matches if result is `Valid(9)`
*
* Otherwise your test will fail, indicating that it was left instead of right
*
* @param validated A `cats.data.Validated` object to try converting to a `Validatable`
*
* @see org.scalatest.OptionValues.Valuable
*/
final class Validatable[E, T](validated: Validated[E, T], pos: source.Position) {
def value: T = validated match {
case Valid(valid) => valid
case Invalid(left) =>
throw new TestFailedException((_: StackDepthException) => Some(s"'$left' is Invalid, expected Valid."), None, pos)
}
/**
* Allow .invalidValue on an validated to extract the invalid side. Like .value, but for the `Invalid`.
*/
def invalidValue: E = validated match {
case Valid(valid) =>
throw new TestFailedException((_: StackDepthException) => Some(s"'$valid' is Valid, expected Invalid."), None, pos)
case Invalid(left) => left
}
}
}
/**
*
* Companion object for easy importing – rather than mixing in – to allow `ValidatedValues` operations.
*
* This will permit you to invoke a `value` method on an instance of a `cats.data.Validated`,
* which attempts to unwrap the Validated.Valid
*
* Similar to `org.scalatest.OptionValues.Valuable`
*
* Meant to allow you to make statements like:
*
* <pre class="stREPL">
* result.value should be > 15
* </pre>
*
* Where it only matches if result is both valid and greater than 15.
*
* Otherwise your test will fail, indicating that it was an Invalid instead of Valid
*
* @see org.scalatest.OptionValues.Valuable
*/
object ValidatedValues extends ValidatedValues
|
coltfred/cats-scalatest
|
src/main/scala/cats/scalatest/ValidatedValues.scala
|
Scala
|
apache-2.0
| 2,566 |
package se.gigurra.renderer
import scalaxy.streams.optimize
class TextModel(val text: String, vertices: Array[Float], colors: Array[Float], font: Font)
extends Model(PrimitiveType.TRIANGLES, vertices, colors) {
val width = (text map font.get).foldLeft(0.0f)(_ + _.advanceWidth)
override def toString(): String = s"TextModel: $text"
}
object TextModel {
def apply(text: String, vertices: Array[Float], colors: Array[Float], font: Font) = new TextModel(text, vertices, colors, font)
def apply(text: String, font: Font): TextModel = {
val glyphs = text map font.get
val vertices =
if (glyphs.length == 1) {
val g = glyphs.head
g.vertices
} else {
val vertices = new Array[Float](glyphs.foldLeft(0)(_ + _.vertices.length))
var width = 0.0f
var writeOffs = 0
for (g <- glyphs) {
optimize(for (i <- 0 until g.vertices.length by 4) {
vertices(writeOffs + i + 0) = g.vertices(i + 0) + width
vertices(writeOffs + i + 1) = g.vertices(i + 1)
vertices(writeOffs + i + 2) = g.vertices(i + 2)
vertices(writeOffs + i + 3) = g.vertices(i + 3)
})
writeOffs += g.vertices.length
width += g.advanceWidth
}
vertices
}
val colors = vertices.clone
optimize(for (i <- 0 until vertices.length by 4) {
colors(i + 0) = font.color.r
colors(i + 1) = font.color.g
colors(i + 2) = font.color.b
colors(i + 3) = font.color.a
})
new TextModel(text, vertices, colors, font)
}
}
|
GiGurra/gigurra-scala-2drenderer
|
src/main/scala/se/gigurra/renderer/TextModel.scala
|
Scala
|
mit
| 1,585 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.kryo.impl
import java.io.OutputStream
import java.util.{Date, UUID}
import com.esotericsoftware.kryo.io.Output
import com.vividsolutions.jts.geom.Geometry
import org.locationtech.geomesa.features.SimpleFeatureSerializer
import org.locationtech.geomesa.features.kryo.KryoFeatureSerializer.{NON_NULL_BYTE, NULL_BYTE, VERSION}
import org.locationtech.geomesa.features.kryo.json.KryoJsonSerialization
import org.locationtech.geomesa.features.kryo.serialization.{KryoGeometrySerialization, KryoUserDataSerialization}
import org.locationtech.geomesa.features.serialization.ObjectType
import org.locationtech.geomesa.features.serialization.ObjectType.ObjectType
import org.locationtech.geomesa.utils.cache.{CacheKeyGenerator, SoftThreadLocal, SoftThreadLocalCache}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
trait KryoFeatureSerialization extends SimpleFeatureSerializer {
private [kryo] def serializeSft: SimpleFeatureType
private val cacheKey = CacheKeyGenerator.cacheKey(serializeSft)
private val writers = KryoFeatureSerialization.getWriters(cacheKey, serializeSft)
private val withId = !options.withoutId
private val withUserData = options.withUserData
override def serialize(sf: SimpleFeature): Array[Byte] = {
val output = KryoFeatureSerialization.getOutput(null)
writeFeature(sf, output)
output.toBytes
}
override def serialize(sf: SimpleFeature, out: OutputStream): Unit = {
val output = KryoFeatureSerialization.getOutput(out)
writeFeature(sf, output)
output.flush()
}
private def writeFeature(sf: SimpleFeature, output: Output): Unit = {
val offsets = KryoFeatureSerialization.getOffsets(cacheKey, writers.length)
val offset = output.position()
output.writeInt(VERSION, true)
output.setPosition(offset + 5) // leave 4 bytes to write the offsets
if (withId) {
// TODO optimize for uuids?
output.writeString(sf.getID)
}
// write attributes and keep track off offset into byte array
var i = 0
while (i < writers.length) {
offsets(i) = output.position() - offset
writers(i)(output, sf.getAttribute(i))
i += 1
}
// write the offsets - variable width
i = 0
val offsetStart = output.position() - offset
while (i < writers.length) {
output.writeInt(offsets(i), true)
i += 1
}
// got back and write the start position for the offsets
val end = output.position()
output.setPosition(offset + 1)
output.writeInt(offsetStart)
// reset the position back to the end of the buffer so the bytes aren't lost, and we can keep writing user data
output.setPosition(end)
if (withUserData) {
KryoUserDataSerialization.serialize(output, sf.getUserData)
}
}
}
object KryoFeatureSerialization {
private [this] val outputs = new SoftThreadLocal[Output]()
private [this] val writers = new SoftThreadLocalCache[String, Array[(Output, AnyRef) => Unit]]()
private [this] val offsets = new SoftThreadLocalCache[String, Array[Int]]()
def getOutput(stream: OutputStream): Output = {
val out = outputs.getOrElseUpdate(new Output(1024, -1))
out.setOutputStream(stream)
out
}
private [kryo] def getOffsets(sft: String, size: Int): Array[Int] =
offsets.getOrElseUpdate(sft, Array.ofDim[Int](size))
// noinspection UnitInMap
def getWriters(key: String, sft: SimpleFeatureType): Array[(Output, AnyRef) => Unit] = {
import scala.collection.JavaConversions._
writers.getOrElseUpdate(key, sft.getAttributeDescriptors.map { ad =>
val bindings = ObjectType.selectType(ad.getType.getBinding, ad.getUserData)
matchWriter(bindings)
}.toArray)
}
private def matchWriter(bindings: Seq[ObjectType]): (Output, AnyRef) => Unit = {
bindings.head match {
case ObjectType.STRING =>
(o: Output, v: AnyRef) => o.writeString(v.asInstanceOf[String]) // write string supports nulls
case ObjectType.INT =>
val w = (o: Output, v: AnyRef) => o.writeInt(v.asInstanceOf[Int])
writeNullable(w)
case ObjectType.LONG =>
val w = (o: Output, v: AnyRef) => o.writeLong(v.asInstanceOf[Long])
writeNullable(w)
case ObjectType.FLOAT =>
val w = (o: Output, v: AnyRef) => o.writeFloat(v.asInstanceOf[Float])
writeNullable(w)
case ObjectType.DOUBLE =>
val w = (o: Output, v: AnyRef) => o.writeDouble(v.asInstanceOf[Double])
writeNullable(w)
case ObjectType.BOOLEAN =>
val w = (o: Output, v: AnyRef) => o.writeBoolean(v.asInstanceOf[Boolean])
writeNullable(w)
case ObjectType.DATE =>
val w = (o: Output, v: AnyRef) => o.writeLong(v.asInstanceOf[Date].getTime)
writeNullable(w)
case ObjectType.UUID =>
val w = (o: Output, v: AnyRef) => {
val uuid = v.asInstanceOf[UUID]
o.writeLong(uuid.getMostSignificantBits)
o.writeLong(uuid.getLeastSignificantBits)
}
writeNullable(w)
case ObjectType.GEOMETRY =>
// null checks are handled by geometry serializer
(o: Output, v: AnyRef) => KryoGeometrySerialization.serialize(o, v.asInstanceOf[Geometry])
case ObjectType.JSON =>
(o: Output, v: AnyRef) => KryoJsonSerialization.serialize(o, v.asInstanceOf[String])
case ObjectType.LIST =>
val valueWriter = matchWriter(bindings.drop(1))
(o: Output, v: AnyRef) => {
val list = v.asInstanceOf[java.util.List[AnyRef]]
if (list == null) {
o.writeInt(-1, true)
} else {
o.writeInt(list.size(), true)
val iter = list.iterator()
while (iter.hasNext) {
valueWriter(o, iter.next())
}
}
}
case ObjectType.MAP =>
val keyWriter = matchWriter(bindings.slice(1, 2))
val valueWriter = matchWriter(bindings.drop(2))
(o: Output, v: AnyRef) => {
val map = v.asInstanceOf[java.util.Map[AnyRef, AnyRef]]
if (map == null) {
o.writeInt(-1, true)
} else {
o.writeInt(map.size(), true)
val iter = map.entrySet.iterator()
while (iter.hasNext) {
val entry = iter.next()
keyWriter(o, entry.getKey)
valueWriter(o, entry.getValue)
}
}
}
case ObjectType.BYTES =>
(o: Output, v: AnyRef) => {
val arr = v.asInstanceOf[Array[Byte]]
if (arr == null) {
o.writeInt(-1, true)
} else {
o.writeInt(arr.length, true)
o.writeBytes(arr)
}
}
}
}
private def writeNullable(wrapped: (Output, AnyRef) => Unit): (Output, AnyRef) => Unit = {
(o: Output, v: AnyRef) => {
if (v == null) {
o.write(NULL_BYTE)
} else {
o.write(NON_NULL_BYTE)
wrapped(o, v)
}
}
}
}
|
jahhulbert-ccri/geomesa
|
geomesa-features/geomesa-feature-kryo/src/main/scala/org/locationtech/geomesa/features/kryo/impl/KryoFeatureSerialization.scala
|
Scala
|
apache-2.0
| 7,444 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package types
import org.jetbrains.plugins.scala.extensions.ObjectExt
/**
* @author Alexander Podkhalyuzin
* Date: 13.03.2008
*/
trait ScParenthesisedTypeElement extends ScTypeElement with ScParenthesizedElement {
override protected val typeName = "TypeInParenthesis"
type Kind = ScTypeElement
override def innerElement: Option[ScTypeElement] = findChild(classOf[ScTypeElement])
override def sameTreeParent: Option[ScTypeElement] = getParent.asOptionOf[ScTypeElement]
}
object ScParenthesisedTypeElement {
def unapply(e: ScParenthesisedTypeElement): Option[ScTypeElement] = e.innerElement
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/base/types/ScParenthesisedTypeElement.scala
|
Scala
|
apache-2.0
| 695 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.spark.cloud.adl
import com.cloudera.spark.cloud.common.BasicIOTests
/**
* Azure's basic IO operations.
*/
class AdlBasicIOSuite extends BasicIOTests with AdlTestSetup {
init()
def init(): Unit = {
if (enabled) {
initFS()
}
}
}
|
hortonworks-spark/cloud-integration
|
cloud-examples/src/test/scala/com/cloudera/spark/cloud/adl/AdlBasicIOSuite.scala
|
Scala
|
apache-2.0
| 1,077 |
package json.schema.codegen
import java.io.File
import java.net.URI
import json.schema.parser.SchemaDocument
import scala.util.control.NonFatal
import scalaz.Scalaz._
trait Naming {
implicit class StringToolsO(v: Option[String]) {
def noneIfEmpty: Option[String] =
v match {
case Some(s) if s == null || s.isEmpty => none
case _ => v
}
}
def packageName(scope: URI): String = {
val dots = dotNotation(scope)
dots.take(dots.length - 1).mkString(".")
}
def className(scope: URI): String = {
val dots = dotNotation(scope)
val name = dots.lastOption.getOrElse(dots.head)
escapeReserved(underscoreToCamel(identifier(name))).capitalize
}
def className(schema: SchemaDocument[_], defaultName: Option[String]): SValidation[String] =
schema.id.toRightDisjunction("Schema has no Id").map(className) orElse defaultName
.toRightDisjunction("Default name not given")
.map(name => escapeReserved(underscoreToCamel(identifier(name))).capitalize)
def identifier(scope: URI): scalaz.Validation[String, String] = {
val str = scope.toString
val lastSlash: Int = str.lastIndexOf('/')
val lastSegment = (lastSlash >= 0) ? str.substring(lastSlash) | str
val noExtSegment = removeExtension(lastSegment)
identifier(noExtSegment.filter(c => c != '#')).some.noneIfEmpty
.toSuccess(s"Unable to extract identifier from $scope")
}
def isIdentifier(c: Char): Boolean = c.isLetterOrDigit || c == '_'
def isIdentifier(s: String): Boolean = !s.exists(!isIdentifier(_))
def identifier(s: String): String = s.map(c => isIdentifier(c) ? c | '_')
def underscoreToCamel(name: String): String = "_([a-z\\\\d])".r.replaceAllIn(name, _.group(1).toUpperCase)
private def removeExtension(s: String) = {
val extIndex = s.lastIndexOf('.')
(extIndex >= 0) ? s.substring(0, extIndex) | s
}
private def dotNotation(scope: URI) = {
val fragment: String = scope.getFragment.some.noneIfEmpty.map(s => s.startsWith("/") ? s | "/" + s).getOrElse("")
// package from URI's fragment, path or host
lazy val fromURI: String = scope.getPath.some.noneIfEmpty.getOrElse("") + fragment
// package from file URI , using only the file name
val simpleScope: String =
try {
(scope.getScheme == "file") ? (removeExtension(
new File(new URI(scope.getScheme, scope.getHost, scope.getPath, null)).getName
) + fragment) | fromURI
} catch {
case NonFatal(e) => fromURI
}
val dottedString = removeExtension(simpleScope)
.map(c => Character.isJavaIdentifierPart(c) ? c | '.')
.replaceAll("\\\\.+$", "")
.replaceAll("^\\\\.+", "")
dottedString.split('.').map(s => escapeReserved(underscoreToCamel(identifier(s))))
}
def escapePropertyReserved(s: String): Option[String] = if (reservedKeywords.contains(s)) none else s.some
def escapeReserved(s: String): String = escapePropertyReserved(s).getOrElse('_' + s)
val reservedKeywords: Set[String]
}
|
VoxSupplyChain/json-schema-codegen
|
codegen/src/main/scala/json/schema/codegen/Naming.scala
|
Scala
|
apache-2.0
| 3,074 |
package controllers.images
import javax.inject._
import com.mohiva.play.silhouette.api.Silhouette
import controllers.WebJarAssets
import controllers.images.FormsData._
import controllers.blog.FormsData.categoryForm
import models.goods.Category
import models.images.{ Image, ImageCategoriesDAO, ImageInfo, ImagesDAO }
import play.api.Logger
import play.api.cache.CacheApi
import play.api.i18n.{ I18nSupport, MessagesApi }
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc._
import utils.auth.{ DefaultEnv, Roles }
import scala.concurrent.Future
class ImagesAdmin @Inject() (
imagesDAO: ImagesDAO,
imageCategoriesDAO: ImageCategoriesDAO,
cacheApi: CacheApi,
silhouette: Silhouette[DefaultEnv],
val messagesApi: MessagesApi,
implicit val webJarAssets: WebJarAssets
)
extends Controller with I18nSupport {
/**
* Shows AdminImageForm.
* @return
*/
def showAdminImageForm(name: String = "") = silhouette.SecuredAction(Roles.Admin).async { implicit request =>
imagesDAO.getImageInfo(name).flatMap { imageInfoO: Option[ImageInfo] =>
imageCategoriesDAO.listCategories.flatMap { allCategories: Seq[Category] =>
imageCategoriesDAO.getImageCategoryIDs(name).map { imageCategoryIDs: Seq[Int] =>
Ok(views.html.images.adminImages(
request.identity,
Seq.empty,
adminImageForm.fill(
if (imageInfoO.isEmpty) AdminImage.empty.copy(name = name)
else AdminImage.empty.copy(
name = imageInfoO.get.name,
content = imageInfoO.get.content,
albums = if (imageCategoryIDs.nonEmpty) Some(imageCategoryIDs) else None
)
),
allCategories
))
}
}
}
}
def handleAdminImageForm = silhouette.SecuredAction(Roles.Admin).async(parse.multipartFormData) { implicit request =>
adminImageForm.bindFromRequest().fold(
{ withErrors =>
Logger.error(s"handleAdminImageForm=> Bad form data ${withErrors.errors.mkString("; ")}")
Future(Redirect(controllers.images.routes.ImagesAdmin.showAdminImageForm(withErrors.data.getOrElse("name", "")))
.flashing("error" -> s"Инвалидные данные $withErrors"))
},
{ ok =>
ok.action match {
case "save" =>
val imageAndBytesO: Option[(Image, Array[Byte])] = request.body.file("new-image").flatMap { pic =>
Image.readImageFromFile(pic.ref.file, ok.name, ok.content, ok.resizeTo) match {
case Right(ok) => Some(ok)
case Left(error) => None
}
}
imagesDAO.insertOrUpdateImage(Image(ok.name, ok.content), ok.albums, imageAndBytesO.map(_._2)).map { _ =>
removeFromCache(ok.name)
Redirect(controllers.images.routes.ImagesAdmin.showAdminImageForm(ok.name)).flashing("success" -> s"${ok.name} изменен !")
}.recover {
case error =>
Redirect(controllers.images.routes.ImagesAdmin.showAdminImageForm(ok.name))
.flashing("error" -> s"Ошибка при попытке изменения ${ok.name}! ${error.getMessage}")
}
case "search" if ok.find._1.nonEmpty && ok.find._2.nonEmpty =>
imagesDAO.getImageInfo(ok.name).flatMap { imageOpt: Option[ImageInfo] =>
(imagesDAO.findImages _).tupled(ok.find).flatMap { foundImages: Seq[ImageInfo] =>
imageCategoriesDAO.listCategories.map { imagesCategories: Seq[Category] =>
val form = adminImageForm.fill(ok)
Ok(views.html.images.adminImages(
request.identity,
foundImages,
form,
imagesCategories
))
}
}
}
case "delete" =>
imagesDAO.deleteImage(ok.name).map { _ =>
removeFromCache(ok.name)
Redirect(controllers.images.routes.ImagesAdmin.showAdminImageForm(ok.name)).flashing("success" -> s"${ok.name} удален !")
}
case unknownAction =>
Logger.error(s"handleAdminImageForm=> unknownAction. Submitted form is " + ok)
Future(Redirect(controllers.images.routes.ImagesAdmin.showAdminImageForm(ok.name)).flashing("error" -> s"Неизвестная комманда $unknownAction"))
}
}
)
}
private def getImageCacheKey(name: String) = s"/img/${name}"
private def getThumbnailCacheKey(name: String) = s"/150x150/${name}"
private def removeFromCache(name: String) = {
cacheApi.remove(getImageCacheKey(name))
cacheApi.remove(getThumbnailCacheKey(name))
}
def jsonAllImageInfo = silhouette.SecuredAction(Roles.Admin).async { implicit request =>
imagesDAO.listAllImageInfoJSON.map { imagesAsJson =>
Ok(imagesAsJson)
}
}
// PHOTO ALBUMS CATEGORIES
def adminAlbumsCategories = silhouette.SecuredAction(Roles.Admin).async { implicit request =>
imageCategoriesDAO.listCategories.map { categories =>
val submitTo = controllers.images.routes.ImagesAdmin.adminAlbumsCategoriesHandle
Ok(views.html.adminCategories(request.identity, categories, submitTo))
}
}
def adminAlbumsCategoriesHandle = silhouette.SecuredAction(Roles.Admin).async { implicit request =>
categoryForm.bindFromRequest().fold(
error => Future(Redirect(controllers.images.routes.ImagesAdmin.adminAlbumsCategories)
.flashing("error" -> ("Инвалидные данные: " + error.errors.mkString("\n")))),
categoryEdit => {
val category = categoryEdit.category
(categoryEdit.action match {
case "save" =>
category.id match {
case Some(id) =>
imageCategoriesDAO.upsertCategory(category).map(_ => "ok" -> s"Внесены изменения в БД, категория: $category")
case None =>
imageCategoriesDAO.addCategory(category).map(_ => "ok" -> s"Новая категория добавлена в БД: $category")
}
case "del" =>
if (category.id.isDefined)
imageCategoriesDAO.deleteCategory(category.id.get).map(_ => "ok" -> s"Из БД удалена категория: $category")
else
Future("error" -> s"ID катерогии не указан, удаление не возможно !")
}).map {
case (status, msg) =>
Redirect(controllers.images.routes.ImagesAdmin.adminAlbumsCategories).flashing(status -> msg)
}
}
)
}
}
|
stanikol/walnuts
|
server/app/controllers/images/ImagesAdmin.scala
|
Scala
|
apache-2.0
| 6,729 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessactivities
import com.google.inject.Inject
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import forms.{EmptyForm, Form2, InvalidForm, ValidForm}
import models.businessactivities.{BusinessActivities, WhoIsYourAccountant, WhoIsYourAccountantIsUk}
import play.api.mvc.MessagesControllerComponents
import services.AutoCompleteService
import utils.{AuthAction, ControllerHelper}
import views.html.businessactivities.who_is_your_accountant_is_uk_address
class WhoIsYourAccountantIsUkController @Inject()(val dataCacheConnector: DataCacheConnector,
val autoCompleteService: AutoCompleteService,
val authAction: AuthAction,
val ds: CommonPlayDependencies,
val cc: MessagesControllerComponents,
who_is_your_accountant_is_uk_address: who_is_your_accountant_is_uk_address) extends AmlsBaseController(ds, cc) {
def get(edit: Boolean = false) = authAction.async {
implicit request =>
dataCacheConnector.fetch[BusinessActivities](request.credId, BusinessActivities.key) map {
response =>
val form = (for {
businessActivities <- response
isUk <- businessActivities.whoIsYourAccountant.flatMap(acc => acc.isUk)
} yield {
Form2[WhoIsYourAccountantIsUk](isUk)
}).getOrElse(EmptyForm)
Ok(who_is_your_accountant_is_uk_address(form, edit, ControllerHelper.accountantName(response)))
}
}
def post(edit : Boolean = false) = authAction.async {
implicit request =>
Form2[WhoIsYourAccountantIsUk](request.body) match {
case f: InvalidForm =>
dataCacheConnector.fetch[BusinessActivities](request.credId, BusinessActivities.key) map {
response => BadRequest(who_is_your_accountant_is_uk_address(f, edit, ControllerHelper.accountantName(response)))
}
case ValidForm(_, data) =>
for {
businessActivity <- dataCacheConnector.fetch[BusinessActivities](request.credId, BusinessActivities.key)
_ <- dataCacheConnector.save[BusinessActivities](request.credId, BusinessActivities.key, updateModel(businessActivity, data))
} yield if (data.isUk) {
Redirect(routes.WhoIsYourAccountantUkAddressController.get(edit))
} else {
Redirect(routes.WhoIsYourAccountantNonUkAddressController.get(edit))
}
}
}
private def updateModel(ba: BusinessActivities, data: WhoIsYourAccountantIsUk): BusinessActivities = {
ba.copy(whoIsYourAccountant = ba.whoIsYourAccountant.map(accountant =>
if(changedIsUk(accountant, data)) {
accountant.isUk(data).address(None)
} else {
accountant.isUk(data)
})
)
}
private def changedIsUk(accountant: WhoIsYourAccountant, newData: WhoIsYourAccountantIsUk): Boolean =
accountant.address.map(add => add.isUk).exists(isUk => isUk != newData.isUk)
}
|
hmrc/amls-frontend
|
app/controllers/businessactivities/WhoIsYourAccountantIsUkController.scala
|
Scala
|
apache-2.0
| 3,782 |
package achilles
/**
* Created by panda on 3/24/15.
*/
import breeze.linalg._
trait Message
case class DocTopicDist(docTopic: SparseVector[Double]) extends Message
case class TermTopicDist(termTopic: SparseVector[Double]) extends Message
case class UpdateDocTopic(from: Long, to: Long, timestamp: Long) extends Message
case class UpdateTermTopic(from: Long, to: Long, timestamp: Long) extends Message
case class GetDocTopic(docId: Long, topicId: Long, timestamp: Long) extends Message
case class GetTermTopic(termId: Long, topicId: Long, timestamp: Long) extends Message
case class SampleOneTopic(timestamp: Long) extends Message
case class RandomTopic(numTopics: Long) extends Message
case class ConsumeSourceBlock(sourceBlock: Iterator[Iterable[(String, Int)]]) extends Message
|
yinxusen/achilles
|
src/main/scala/achilles/Message.scala
|
Scala
|
apache-2.0
| 789 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.mongodb.planner
import slamdata.Predef.{Map => _, _}
import quasar.fp._
import quasar.qscript.{MapFuncsCore => MF, _}
import matryoshka.{Hole => _, _}
import matryoshka.implicits._
import scalaz._, Scalaz._
object mapBeforeSort {
def apply[T[_[_]]: CorecursiveT, M[_]: Applicative]: Trans[QScriptCore[T, ?], M] =
new Trans[QScriptCore[T, ?], M] {
private def projectIndex(i: Int): FreeMap[T] = Free.roll(MFC(MF.ProjectIndex(HoleF[T], MF.IntLit(i))))
def trans[A, G[_]: Functor]
(GtoF: PrismNT[G, QScriptCore[T, ?]])
(implicit TC: Corecursive.Aux[A, G], TR: Recursive.Aux[A, G])
: QScriptCore[T, A] => M[G[A]] = qs => doTrans[A, G](GtoF).apply(qs).point[M]
private def doTrans[A, G[_]: Functor]
(GtoF: PrismNT[G, QScriptCore[T, ?]])
(implicit TC: Corecursive.Aux[A, G], TR: Recursive.Aux[A, G])
: QScriptCore[T, A] => G[A] = {
case qs @ Map(src, fm) =>
GtoF.get(src.project) match {
case Some(Sort(innerSrc, bucket, order)) =>
val innerMap =
GtoF.reverseGet(Map(
innerSrc,
MapFuncCore.StaticArray(List(fm, HoleF[T])))).embed
val m = Map(
GtoF.reverseGet(Sort(innerMap,
bucket.map(_ >> projectIndex(1)),
order.map {
case (fm, dir) =>
(fm >> projectIndex(1), dir)
})).embed,
projectIndex(0))
GtoF.reverseGet(m)
case _ => GtoF.reverseGet(qs)
}
case x => GtoF.reverseGet(x)
}
}
}
|
jedesah/Quasar
|
mongodb/src/main/scala/quasar/physical/mongodb/planner/mapBeforeSort.scala
|
Scala
|
apache-2.0
| 2,277 |
package scalajsreact.template.routes
import japgolly.scalajs.react.ReactElement
import japgolly.scalajs.react.extra.router.RouterConfigDsl
import scalajsreact.template.components.items.{ItemsInfo, Item1Data, Item2Data}
import scalajsreact.template.pages.ItemsPage
sealed abstract class Item(val title: String,
val routerPath: String,
val render: () => ReactElement)
object Item {
case object Info extends Item("Info","info",() => ItemsInfo())
case object Item1 extends Item("Item1","item1",() => Item1Data())
case object Item2 extends Item("Item2","item2",() => Item2Data())
val menu = Vector(Info,Item1,Item2)
val routes = RouterConfigDsl[Item].buildRule { dsl =>
import dsl._
menu.map(i =>
staticRoute(i.routerPath, i) ~> renderR(r => ItemsPage(props = ItemsPage.Props(i, r)))
).reduce(_ | _)
}
}
|
christobill/scalajs-react-template
|
src/main/scala/scalajsreact/template/routes/Item.scala
|
Scala
|
apache-2.0
| 842 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package varys.ui
import javax.servlet.http.{HttpServletResponse, HttpServletRequest}
import net.liftweb.json.{JValue, pretty, render}
import org.eclipse.jetty.server.{Server, Request, Handler}
import org.eclipse.jetty.server.handler.{ResourceHandler, HandlerList, ContextHandler, AbstractHandler}
import org.eclipse.jetty.util.thread.QueuedThreadPool
import scala.annotation.tailrec
import scala.util.{Try, Success, Failure}
import scala.xml.Node
import varys.Logging
/** Utilities for launching a web server using Jetty's HTTP Server class */
private[varys] object JettyUtils extends Logging {
// Base type for a function that returns something based on an HTTP request. Allows for
// implicit conversion from many types of functions to jetty Handlers.
type Responder[T] = HttpServletRequest => T
// Conversions from various types of Responder's to jetty Handlers
implicit def jsonResponderToHandler(responder: Responder[JValue]): Handler =
createHandler(responder, "text/json", (in: JValue) => pretty(render(in)))
implicit def htmlResponderToHandler(responder: Responder[Seq[Node]]): Handler =
createHandler(responder, "text/html", (in: Seq[Node]) => "<!DOCTYPE html>" + in.toString)
implicit def textResponderToHandler(responder: Responder[String]): Handler =
createHandler(responder, "text/plain")
def createHandler[T <% AnyRef](responder: Responder[T], contentType: String,
extractFn: T => String = (in: Any) => in.toString): Handler = {
new AbstractHandler {
def handle(target: String,
baseRequest: Request,
request: HttpServletRequest,
response: HttpServletResponse) {
response.setContentType("%s;charset=utf-8".format(contentType))
response.setStatus(HttpServletResponse.SC_OK)
baseRequest.setHandled(true)
val result = responder(request)
response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate")
response.getWriter().println(extractFn(result))
}
}
}
/** Creates a handler that always redirects the user to a given path */
def createRedirectHandler(newPath: String): Handler = {
new AbstractHandler {
def handle(target: String,
baseRequest: Request,
request: HttpServletRequest,
response: HttpServletResponse) {
response.setStatus(302)
response.setHeader("Location", baseRequest.getRootURL + newPath)
baseRequest.setHandled(true)
}
}
}
/** Creates a handler for serving files from a static directory */
def createStaticHandler(resourceBase: String): ResourceHandler = {
val staticHandler = new ResourceHandler
Option(getClass.getClassLoader.getResource(resourceBase)) match {
case Some(res) =>
staticHandler.setResourceBase(res.toString)
case None =>
throw new Exception("Could not find resource path for Web UI: " + resourceBase)
}
staticHandler
}
/**
* Attempts to start a Jetty server at the supplied ip:port which uses the supplied handlers.
*
* If the desired port number is contented, continues incrementing ports until a free port is
* found. Returns the chosen port and the jetty Server object.
*/
def startJettyServer(ip: String, port: Int, handlers: Seq[(String, Handler)]): (Server, Int) = {
val handlersToRegister = handlers.map { case(path, handler) =>
val contextHandler = new ContextHandler(path)
contextHandler.setHandler(handler)
contextHandler.asInstanceOf[org.eclipse.jetty.server.Handler]
}
val handlerList = new HandlerList
handlerList.setHandlers(handlersToRegister.toArray)
@tailrec
def connect(currentPort: Int): (Server, Int) = {
val server = new Server(currentPort)
val pool = new QueuedThreadPool
pool.setDaemon(true)
server.setThreadPool(pool)
server.setHandler(handlerList)
Try { server.start() } match {
case s: Success[_] =>
(server, server.getConnectors.head.getLocalPort)
case f: Failure[_] =>
server.stop()
logInfo("Failed to create UI at port, %s. Trying again.".format(currentPort))
logInfo("Error was: " + f.toString)
connect((currentPort + 1) % 65536)
}
}
connect(port)
}
}
|
frankfzw/varys
|
core/src/main/scala/varys/ui/JettyUtils.scala
|
Scala
|
apache-2.0
| 5,168 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
import slamdata.Predef.{Map => SMap, _}
import quasar.IdStatus
import quasar.common.effect.NameGenerator
import quasar.contrib.iota._
import quasar.contrib.scalaz._
import quasar.contrib.scalaz.MonadState_
import quasar.fp._
import quasar.fp.ski.κ
import quasar.qscript.{FreeMapA, OnUndefined, RecFreeMap}
import monocle.macros.Lenses
import matryoshka._
import matryoshka.data._
import matryoshka.implicits._
import matryoshka.patterns.EnvT
import scalaz.{\\/-, -\\/, \\/, Cofree, DList, Id, Monad, Monoid, MonadState, Scalaz, Show, State, StateT}, Scalaz._
@Lenses
final case class QSUGraph[T[_[_]]](
root: Symbol,
vertices: QSUVerts[T]) {
/**
* Uniquely merge the graphs, retaining the root from the right.
*/
def ++:(left: QSUGraph[T]): QSUGraph[T] = QSUGraph(root, left.vertices ++ vertices)
/**
* Uniquely merge the graphs, retaining the root from the left.
*/
def :++(right: QSUGraph[T]): QSUGraph[T] = right ++: this
/** A bottom up (leaves first) monoidal fold of the graph. */
def foldMapUpM[F[_]: Monad, A: Monoid](f: QSUGraph[T] => F[A]): F[A] = {
type VisitedT[X[_], A] = StateT[X, Set[Symbol], A]
type G[A] = VisitedT[F, A]
val MS = MonadState[G, Set[Symbol]]
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def inner(g: QSUGraph[T]): G[A] =
for {
visited <- MS.get
a <- if (visited(g.root))
mzero[A].point[G]
else
for {
_ <- MS.put(visited + g.root)
aSub <- g.unfold.foldMapM(inner)
aG <- f(g).liftM[VisitedT]
} yield aSub |+| aG
} yield a
inner(this).eval(Set())
}
def foldMapDownM[F[_]: Monad, A: Monoid](f: QSUGraph[T] => F[A]): F[A] = {
type VisitedT[X[_], A] = StateT[X, Set[Symbol], A]
type G[A] = VisitedT[F, A]
val MS = MonadState[G, Set[Symbol]]
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def inner(g: QSUGraph[T]): G[A] =
for {
visited <- MS.get
a <- if (visited(g.root))
mzero[A].point[G]
else
for {
_ <- MS.put(visited + g.root)
aG <- f(g).liftM[VisitedT]
aSub <- g.unfold.foldMapM(inner)
} yield aG |+| aSub
} yield a
inner(this).eval(Set())
}
def foldMapUp[A: Monoid](f: QSUGraph[T] => A): A =
foldMapUpM[Id, A](f)
def foldMapDown[A: Monoid](f: QSUGraph[T] => A): A =
foldMapDownM[Id, A](f)
def refocus(node: Symbol): QSUGraph[T] =
copy(root = node)
/** Removes the `src` vertex, replacing any references to it with `target`. */
def replace(src: Symbol, target: Symbol): QSUGraph[T] = {
import QScriptUniform._
def replaceIfSrc(sym: Symbol): Symbol =
(sym === src) ? target | sym
if (vertices.isDefinedAt(src) && vertices.isDefinedAt(target))
QSUGraph(
replaceIfSrc(root),
(vertices - src) mapValues {
case JoinSideRef(s) => JoinSideRef(replaceIfSrc(s))
case other => other map replaceIfSrc
})
else
this
}
// the same as replace, but whenever a node is modified, it changes the name
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def replaceWithRename[
F[_]: Monad: NameGenerator: MonadState_[?[_], QSUGraph.RevIdx[T]]](
prefix: String,
src: Symbol,
target: Symbol): F[QSUGraph[T]] = {
import QScriptUniform._
val withName = QSUGraph.withName[T, F](prefix) _
if (src =/= target) {
if (root === src) {
refocus(target).point[F]
} else {
unfold match {
case JoinSideRef(`src`) =>
withName(JoinSideRef[T, Symbol](target)).map(_ :++ this)
case _ =>
for {
pattern <- unfold.traverse(_.replaceWithRename[F](prefix, src, target))
bare = pattern.map(_.root)
renamed <- withName(bare)
patternVerts = pattern.foldLeft(SMap[Symbol, QScriptUniform[T, Symbol]]())(_ ++ _.vertices)
back = this ++: renamed
} yield back.copy(vertices = back.vertices ++ patternVerts)
}
}
} else {
this.point[F]
}
}
def overwriteAtRoot(qsu: QScriptUniform[T, Symbol]): QSUGraph[T] =
QSUGraph(root, vertices.updated(root, qsu))
def corewriteM[F[_]: Monad](pf: PartialFunction[QSUGraph[T], F[QSUGraph[T]]]): F[QSUGraph[T]] = {
type ModifiedT[G[_], A] = StateT[G, QSUVerts[T], A]
val MS = MonadState[ModifiedT[F, ?], QSUVerts[T]]
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def inner(g: QSUGraph[T]): F[QSUGraph[T]] =
pf.applyOrElse[QSUGraph[T], F[QSUGraph[T]]](g, _ => g.point[F]) >>= { transformed =>
val changed: QSUVerts[T] = transformed.vertices
transformed.unfold.traverse[ModifiedT[F, ?], QSUGraph[T]] { g0 =>
for {
prevVertices <- MS.get
newGraph <- inner(g0.copy(vertices = g0.vertices ++ prevVertices)).liftM[ModifiedT]
_ <- MS.modify(_ ++ newGraph.vertices)
} yield newGraph
}.map(qsu => QSUGraph.refold(g.root, qsu)).eval(changed)
}
inner(this)
}
/**
* Allows rewriting of arbitrary subgraphs. Rewrites are
* applied in a bottom-up (leaves-first) order, which avoids
* ambiguities when changing nodes that are visible to subsequent
* rewrites. The one caveat here is that all "changes" to the graph
* must be additive in nature. You can never remove a node.
* Rewrites to the definition of a node are valid, but you cannot
* remove it from the graph, as other nodes will still point to it.
* This becomes relevant if you want to transform some subgraph
* g into f(g). When this happens, you must internally generate a
* new symbol for g and rewrite your subgraph to point to that new
* g, then the composite graph f(g) must be given the original symbol.
* As a safety check, because this case is just entirely unsound, the
* symbol you return at the root is entirely ignored in favor of the
* original root at that locus.
*/
def rewriteM[F[_]: Monad](pf: PartialFunction[QSUGraph[T], F[QSUGraph[T]]]): F[QSUGraph[T]] = {
type RewriteS = SMap[Symbol, QSUGraph[T]]
type VisitedT[X[_], A] = StateT[X, RewriteS, A]
type G[A] = VisitedT[F, A]
val MS = MonadState[G, RewriteS]
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def inner(g: QSUGraph[T]): G[QSUGraph[T]] = {
for {
visited <- MS.get
back <- visited.get(g.root) match {
case Some(result) =>
result.point[G]
case None =>
for {
recursive <- g.unfold traverse { sg =>
for {
previsM <- MS.get
previs = previsM.keySet
sg2 <- inner(sg)
postvisM <- MS.get
postvis = postvisM.keySet
} yield (sg2, postvis &~ previs)
}
index = recursive.foldLeft[SMap[Symbol, Set[Symbol]]](SMap()) {
case (acc, (sg, snapshot)) =>
val sym = sg.root
if (acc.contains(sym))
acc.updated(sym, acc(sym).union(snapshot))
else
acc + (sym -> snapshot)
}
rewritten = QSUGraph.refold[T](g.root, recursive.map(_._1))
sum = index.values.reduceOption(_ union _).getOrElse(Set())
// remove the keys which were touched from the original
preimage = rewritten.vertices -- sum
collapsed = recursive.foldLeft[QSUVerts[T]](preimage) {
case (acc, (sg, _)) =>
// we need to explicitly remove the rewritten.root
// to avoid it being overwritten by an old version of itself
acc ++ (sg.vertices -- (sum &~ index(sg.root)) - rewritten.root)
}
self2 = rewritten.copy(vertices = collapsed)
applied <- if (pf.isDefinedAt(self2))
pf(self2).liftM[VisitedT]
else
self2.point[G]
// prevent users from building invalid graphs
back = applied.copy(root = self2.root)
_ <- MS.modify(_ + (g.root -> back))
} yield back
}
} yield back
}
inner(this).eval(SMap())
}
def rewrite(pf: PartialFunction[QSUGraph[T], QSUGraph[T]]): QSUGraph[T] =
rewriteM[Id](pf)
// projects the root of the graph (which we assume exists)
def unfold: QScriptUniform[T, QSUGraph[T]] =
vertices(root).map(refocus)
/**
* Note that because this is using SMap, we cannot guarantee
* uniqueness is handled correctly if nodes include Free.
* Make sure you do not rely on this. Also please note that this
* function is linear in the number of nodes, so try not to call
* it too often.
*/
def generateRevIndex: QSUGraph.RevIdx[T] =
vertices map { case (key, value) => value -> key }
}
object QSUGraph extends QSUGraphInstances {
import quasar.qsu.{QScriptUniform => QSU}
// The pattern functor for `QSUGraph[T]`.
type QSUPattern[T[_[_]], A] = EnvT[Symbol, QSU[T, ?], A]
object QSUPattern {
def apply[T[_[_]], A](root: Symbol, qsu: QSU[T, A]): QSUPattern[T, A] =
EnvT.envT(root -> qsu)
def unapply[T[_[_]], A](pattern: QSUPattern[T, A]): Option[(Symbol, QSU[T, A])] =
pattern.run.some
}
type RevIdx[T[_[_]]] = SMap[QScriptUniform[T, Symbol], Symbol]
// we assume that the verticies of the constituents are the same
// refold is the inverse of unfold
def refold[T[_[_]]](sym: Symbol, qsu: QScriptUniform[T, QSUGraph[T]]): QSUGraph[T] = {
import scalaz.{Value, WriterT}
// left-Monoid, based on the assumption that all
// vertices changes are additive
implicit val vm: Monoid[QSUVerts[T]] = new Monoid[QSUVerts[T]] {
val zero = SMap[Symbol, QScriptUniform[T, Symbol]]()
def append(left: QSUVerts[T], right: => QSUVerts[T]) =
left ++ right
}
val collapse = qsu traverse { g =>
WriterT.put(Value(g.root))(g.vertices)
}
val (verts, pf) = collapse.run.value
QSUGraph[T](sym, verts + (sym -> pf))
}
def withName[T[_[_]], F[_]: Monad: NameGenerator](
prefix: String)(
node: QScriptUniform[T, Symbol])(
implicit MS: MonadState_[F, RevIdx[T]]): F[QSUGraph[T]] = {
for {
reverse <- MS.get
back <- reverse.get(node) match {
case Some(sym) =>
QSUGraph[T](root = sym, SMap(sym -> node)).point[F]
case None =>
for {
sym <- freshSymbol[F](prefix)
_ <- MS.put(reverse + (node -> sym))
} yield QSUGraph[T](root = sym, SMap(sym -> node))
}
} yield back
}
type NodeNames[T[_[_]]] = SMap[QSU[T, Symbol], Symbol]
type Renames = SMap[Symbol, Symbol]
type NameState[T[_[_]], F[_]] = MonadState_[F, (NodeNames[T], Renames)]
def NameState[T[_[_]], F[_]: NameState[T, ?[_]]] =
MonadState_[F, (NodeNames[T], Renames)]
/** Construct a QSUGraph from a tree of `QScriptUniform` by compacting
* common subtrees, providing a mapping from the provided attribute to
* the final graph vertex names.
*/
def fromAnnotatedTree[T[_[_]]: RecursiveT](qsu: Cofree[QSU[T, ?], Option[Symbol]])
: (Renames, QSUGraph[T]) = {
type F[A] = StateT[State[Long, ?], (NodeNames[T], Renames), A]
qsu.cataM(fromTreeƒ[T, F]).run((SMap(), SMap())).map {
case ((_, s), r) => (s, r)
}.eval(0)
}
/** Construct a QSUGraph from a tree of `QScriptUniform` by compacting
* common subtrees.
*/
def fromTree[T[_[_]]: RecursiveT](qsu: T[QSU[T, ?]]): QSUGraph[T] =
fromAnnotatedTree[T](
qsu.cata(attributeAlgebra[QSU[T, ?], Option[Symbol]](κ(None))))._2
private def fromTreeƒ[T[_[_]], F[_]: Monad: NameGenerator: NameState[T, ?[_]]]
: AlgebraM[F, EnvT[Option[Symbol], QSU[T, ?], ?], QSUGraph[T]] = {
case EnvT((sym, qsu)) =>
for {
pair <- NameState[T, F].get
(nodes, renames) = pair
node = qsu.map(_.root)
name <- nodes.get(node).getOrElseF(
NameGenerator[F] prefixedName "__fromTree" map (Symbol(_)))
_ <- NameState[T, F].put((nodes + (node -> name), sym.fold(renames)(s => renames + (s -> name))))
} yield qsu.foldRight(QSUGraph(name, SMap(name -> node)))(_ ++: _)
}
/**
* This object contains extraction helpers in terms of QSU nodes.
*/
object Extractors {
import quasar.common.data.Data
import quasar.qscript.{
FreeMap,
Hole,
JoinSide,
JoinSide3,
MapFunc,
MapFuncsCore,
MapFuncCore,
SrcHole
}
import pathy.Path
object AutoJoin2 {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.AutoJoin2[T, QSUGraph[T]] => QSU.AutoJoin2.unapply(g)
case _ => None
}
}
object AutoJoin3 {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.AutoJoin3[T, QSUGraph[T]] => QSU.AutoJoin3.unapply(g)
case _ => None
}
}
object QSAutoJoin {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.QSAutoJoin[T, QSUGraph[T]] => QSU.QSAutoJoin.unapply(g)
case _ => None
}
}
object GroupBy {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.GroupBy[T, QSUGraph[T]] => QSU.GroupBy.unapply(g)
case _ => None
}
}
object DimEdit {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.DimEdit[T, QSUGraph[T]] => QSU.DimEdit.unapply(g)
case _ => None
}
}
object LPJoin {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.LPJoin[T, QSUGraph[T]] => QSU.LPJoin.unapply(g)
case _ => None
}
}
object ThetaJoin {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.ThetaJoin[T, QSUGraph[T]] => QSU.ThetaJoin.unapply(g)
case _ => None
}
}
object Read {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.Read[T, QSUGraph[T]] => QSU.Read.unapply(g)
case _ => None
}
}
object Unary {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.Unary[T, QSUGraph[T]] => QSU.Unary.unapply(g)
case _ => None
}
}
object Map {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.Map[T, QSUGraph[T]] => QSU.Map.unapply(g)
case _ => None
}
}
object Transpose {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.Transpose[T, QSUGraph[T]] => QSU.Transpose.unapply(g)
case _ => None
}
}
object LeftShift {
def unapply[T[_[_]]](g: QSUGraph[T]): Option[(QSUGraph[T], RecFreeMap[T], IdStatus, OnUndefined, FreeMapA[T, QSU.ShiftTarget], QSU.Rotation)] = g.unfold match {
case g: QSU.LeftShift[T, QSUGraph[T]] => QSU.LeftShift.unapply(g)
case _ => None
}
}
object MultiLeftShift {
def unapply[T[_[_]]](g: QSUGraph[T]): Option[(QSUGraph[T], List[(FreeMap[T], IdStatus, QSU.Rotation)], OnUndefined, FreeMapA[T, Access[Hole] \\/ Int])] = g.unfold match {
case g: QSU.MultiLeftShift[T, QSUGraph[T]] => QSU.MultiLeftShift.unapply(g)
case _ => None
}
}
object LPReduce {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.LPReduce[T, QSUGraph[T]] => QSU.LPReduce.unapply(g)
case _ => None
}
}
object QSReduce {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.QSReduce[T, QSUGraph[T]] => QSU.QSReduce.unapply(g)
case _ => None
}
}
object Distinct {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.Distinct[T, QSUGraph[T]] => QSU.Distinct.unapply(g)
case _ => None
}
}
object LPSort {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.LPSort[T, QSUGraph[T]] => QSU.LPSort.unapply(g)
case _ => None
}
}
object QSSort {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.QSSort[T, QSUGraph[T]] => QSU.QSSort.unapply(g)
case _ => None
}
}
object Union {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.Union[T, QSUGraph[T]] => QSU.Union.unapply(g)
case _ => None
}
}
object Subset {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.Subset[T, QSUGraph[T]] => QSU.Subset.unapply(g)
case _ => None
}
}
object LPFilter {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.LPFilter[T, QSUGraph[T]] => QSU.LPFilter.unapply(g)
case _ => None
}
}
object QSFilter {
def unapply[T[_[_]]](g: QSUGraph[T]): Option[(QSUGraph[T], RecFreeMap[T])] = g.unfold match {
case g: QSU.QSFilter[T, QSUGraph[T]] => QSU.QSFilter.unapply(g)
case _ => None
}
}
object Unreferenced {
def unapply[T[_[_]]](g: QSUGraph[T]): Boolean = g.unfold match {
case g: QSU.Unreferenced[T, QSUGraph[T]] => QSU.Unreferenced.unapply(g)
case _ => false
}
}
object JoinSideRef {
def unapply[T[_[_]]](g: QSUGraph[T]) = g.unfold match {
case g: QSU.JoinSideRef[T, QSUGraph[T]] => QSU.JoinSideRef.unapply(g)
case _ => None
}
}
object AutoJoin2C {
def unapply[T[_[_]]](qgraph: QSUGraph[T])(
implicit IC: MapFuncCore[T, ?] :<<: MapFunc[T, ?])
: Option[(QSUGraph[T], QSUGraph[T], MapFuncCore[T, JoinSide])] = qgraph match {
case AutoJoin2(left, right, fm) =>
fm.resumeTwice match {
case \\/-(_) => None
case -\\/(IC(func)) => func.traverse {
case -\\/(_) => None
case \\/-(side) => Some(side)
}.flatMap(f => Some((left, right, f)))
}
case _ => None
}
}
object AutoJoin3C {
def unapply[T[_[_]]](qgraph: QSUGraph[T])(
implicit IC: MapFuncCore[T, ?] :<<: MapFunc[T, ?])
: Option[(QSUGraph[T], QSUGraph[T], QSUGraph[T], MapFuncCore[T, JoinSide3])] = qgraph match {
case AutoJoin3(left, center, right, fm) =>
fm.resumeTwice match {
case \\/-(_) => None
case -\\/(IC(func)) => func.traverse {
case -\\/(_) => None
case \\/-(side) => Some(side)
}.flatMap(f => Some((left, center, right, f)))
}
case _ => None
}
}
object DataConstant {
def unapply[T[_[_]]: BirecursiveT](qgraph: QSUGraph[T])(
implicit IC: MapFuncCore[T, ?] :<<: MapFunc[T, ?]): Option[Data] = qgraph match {
case Unary(Unreferenced(), IC(MapFuncsCore.Constant(ejson))) =>
Some(ejson.cata(Data.fromEJson))
case _ => None
}
}
object DataConstantMapped {
def unapply[T[_[_]]: BirecursiveT](qgraph: QSUGraph[T])(
implicit IC: MapFuncCore[T, ?] :<<: MapFunc[T, ?]): Option[Data] = qgraph match {
case Map(Unreferenced(), FMFC1(MapFuncsCore.Constant(ejson))) =>
Some(ejson.cata(Data.fromEJson))
case _ => None
}
}
// TODO doesn't guarantee only one function; could be more!
object FMFC1 {
def unapply[T[_[_]]](fm: RecFreeMap[T])(
implicit IC: MapFuncCore[T, ?] :<<: MapFunc[T, ?]): Option[MapFuncCore[T, Hole]] = {
fm.linearize.resume.swap.toOption collect {
case IC(mfc) => mfc.map(_ => SrcHole: Hole)
}
}
}
object TRead {
@SuppressWarnings(Array("org.wartremover.warts.Equals"))
def unapply[T[_[_]]: BirecursiveT](qgraph: QSUGraph[T])(
implicit IC: MapFuncCore[T, ?] :<<: MapFunc[T, ?]): Option[String] = qgraph match {
case Read(path, _) =>
for {
(front, end) <- Path.peel(path)
file <- end.toOption
if Path.peel(front).isEmpty
} yield file.value
case _ => None
}
}
}
}
sealed abstract class QSUGraphInstances extends QSUGraphInstances0 {
import QSUGraph._
implicit def corecursive[T[_[_]]]: Corecursive.Aux[QSUGraph[T], QSUPattern[T, ?]] =
birecursive[T]
implicit def recursive[T[_[_]]]: Recursive.Aux[QSUGraph[T], QSUPattern[T, ?]] =
birecursive[T]
implicit def show[T[_[_]]: ShowT]: Show[QSUGraph[T]] =
Show.shows { g =>
val assocs = g.foldMapDown(sg => DList((sg.root, sg.vertices(sg.root))))
s"QSUGraph(${g.root.shows})[\\n" +
printMultiline(assocs.toList) +
"\\n]"
}
}
sealed abstract class QSUGraphInstances0 {
import QSUGraph._
implicit def birecursive[T[_[_]]]: Birecursive.Aux[QSUGraph[T], QSUPattern[T, ?]] =
Birecursive.algebraIso(φ[T], ψ[T])
////
// only correct given compacted subgraphs which agree on names
private def φ[T[_[_]]]: Algebra[QSUPattern[T, ?], QSUGraph[T]] = {
case QSUPattern(root, qsu) =>
val initial: QSUGraph[T] = QSUGraph[T](root, SMap(root -> qsu.map(_.root)))
qsu.foldRight(initial) {
case (graph, acc) => graph ++: acc // retain the root from the right
}
}
private def ψ[T[_[_]]]: Coalgebra[QSUPattern[T, ?], QSUGraph[T]] = graph => {
QSUPattern[T, QSUGraph[T]](graph.root, graph.unfold)
}
}
|
slamdata/slamengine
|
qsu/src/main/scala/quasar/qsu/QSUGraph.scala
|
Scala
|
apache-2.0
| 22,315 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package lib.linalg
import com.github.fommil.netlib.BLAS.{getInstance => NativeBLAS}
import com.github.fommil.netlib.F2jBLAS
import com.github.fommil.netlib.{BLAS => NetlibBLAS}
import org.apache.spark.ml.linalg.DenseMatrix
import org.apache.spark.ml.linalg.DenseVector
import org.apache.spark.ml.linalg.Matrix
import org.apache.spark.ml.linalg.SparseMatrix
import org.apache.spark.ml.linalg.SparseVector
private[emmalanguage] object BLAS extends Serializable {
@transient private var _f2jBLAS: NetlibBLAS = _
@transient private var _nativeBLAS: NetlibBLAS = _
// For level-1 routines, we use Java implementation.
private def f2jBLAS: NetlibBLAS = {
if (_f2jBLAS == null) {
_f2jBLAS = new F2jBLAS
}
_f2jBLAS
}
/**
* y += a * x
*/
def axpy(a: Double, x: Vector, y: Vector): Unit = {
require(x.size == y.size)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
axpy(a, sx, dy)
case dx: DenseVector =>
axpy(a, dx, dy)
case _ =>
throw new UnsupportedOperationException(
s"axpy doesn't support x type ${x.getClass}.")
}
case _ =>
throw new IllegalArgumentException(
s"axpy only supports adding to a dense vector but got type ${y.getClass}.")
}
}
/**
* y += a * x
*/
private def axpy(a: Double, x: DenseVector, y: DenseVector): Unit = {
val n = x.size
f2jBLAS.daxpy(n, a, x.values, 1, y.values, 1)
}
/**
* y += a * x
*/
private def axpy(a: Double, x: SparseVector, y: DenseVector): Unit = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.length
if (a == 1.0) {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += xValues(k)
k += 1
}
} else {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += a * xValues(k)
k += 1
}
}
}
/** Y += a * x */
private[emmalanguage] def axpy(a: Double, X: DenseMatrix, Y: DenseMatrix): Unit = {
require(X.numRows == Y.numRows && X.numCols == Y.numCols, "Dimension mismatch: " +
s"size(X) = ${(X.numRows, X.numCols)} but size(Y) = ${(Y.numRows, Y.numCols)}.")
f2jBLAS.daxpy(X.numRows * X.numCols, a, X.values, 1, Y.values, 1)
}
/**
* dot(x, y)
*/
def dot(x: Vector, y: Vector): Double = {
require(x.size == y.size,
"BLAS.dot(x: Vector, y:Vector) was given Vectors with non-matching sizes:" +
" x.size = " + x.size + ", y.size = " + y.size)
(x, y) match {
case (dx: DenseVector, dy: DenseVector) =>
dot(dx, dy)
case (sx: SparseVector, dy: DenseVector) =>
dot(sx, dy)
case (dx: DenseVector, sy: SparseVector) =>
dot(sy, dx)
case (sx: SparseVector, sy: SparseVector) =>
dot(sx, sy)
case _ =>
throw new IllegalArgumentException(s"dot doesn't support (${x.getClass}, ${y.getClass}).")
}
}
/**
* dot(x, y)
*/
private def dot(x: DenseVector, y: DenseVector): Double = {
val n = x.size
f2jBLAS.ddot(n, x.values, 1, y.values, 1)
}
/**
* dot(x, y)
*/
private def dot(x: SparseVector, y: DenseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.length
var sum = 0.0
var k = 0
while (k < nnz) {
sum += xValues(k) * yValues(xIndices(k))
k += 1
}
sum
}
/**
* dot(x, y)
*/
private def dot(x: SparseVector, y: SparseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val yIndices = y.indices
val nnzx = xIndices.length
val nnzy = yIndices.length
var kx = 0
var ky = 0
var sum = 0.0
// y catching x
while (kx < nnzx && ky < nnzy) {
val ix = xIndices(kx)
while (ky < nnzy && yIndices(ky) < ix) {
ky += 1
}
if (ky < nnzy && yIndices(ky) == ix) {
sum += xValues(kx) * yValues(ky)
ky += 1
}
kx += 1
}
sum
}
/**
* y = x
*/
def copy(x: Vector, y: Vector): Unit = {
val n = y.size
require(x.size == n)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
val sxIndices = sx.indices
val sxValues = sx.values
val dyValues = dy.values
val nnz = sxIndices.length
var i = 0
var k = 0
while (k < nnz) {
val j = sxIndices(k)
while (i < j) {
dyValues(i) = 0.0
i += 1
}
dyValues(i) = sxValues(k)
i += 1
k += 1
}
while (i < n) {
dyValues(i) = 0.0
i += 1
}
case dx: DenseVector =>
Array.copy(dx.values, 0, dy.values, 0, n)
}
case _ =>
throw new IllegalArgumentException(s"y must be dense in copy but got ${y.getClass}")
}
}
/**
* x = a * x
*/
def scal(a: Double, x: Vector): Unit = {
x match {
case sx: SparseVector =>
f2jBLAS.dscal(sx.values.length, a, sx.values, 1)
case dx: DenseVector =>
f2jBLAS.dscal(dx.values.length, a, dx.values, 1)
case _ =>
throw new IllegalArgumentException(s"scal doesn't support vector type ${x.getClass}.")
}
}
// For level-3 routines, we use the native BLAS.
private def nativeBLAS: NetlibBLAS = {
if (_nativeBLAS == null) {
_nativeBLAS = NativeBLAS
}
_nativeBLAS
}
/**
* Adds alpha * x * x.t to a matrix in-place. This is the same as BLAS's ?SPR.
*
* @param U the upper triangular part of the matrix in a [[DenseVector]](column major)
*/
def spr(alpha: Double, v: Vector, U: DenseVector): Unit = {
spr(alpha, v, U.values)
}
/**
* y := alpha*A*x + beta*y
*
* @param n The order of the n by n matrix A.
* @param A The upper triangular part of A in a [[DenseVector]] (column major).
* @param x The [[DenseVector]] transformed by A.
* @param y The [[DenseVector]] to be modified in place.
*/
def dspmv(
n: Int,
alpha: Double,
A: DenseVector,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
f2jBLAS.dspmv("U", n, alpha, A.values, x.values, 1, beta, y.values, 1)
}
/**
* Adds alpha * x * x.t to a matrix in-place. This is the same as BLAS's ?SPR.
*
* @param U the upper triangular part of the matrix packed in an array (column major)
*/
def spr(alpha: Double, v: Vector, U: Array[Double]): Unit = {
val n = v.size
v match {
case DenseVector(values) =>
NativeBLAS.dspr("U", n, alpha, values, 1, U)
case SparseVector(_, indices, values) =>
val nnz = indices.length
var colStartIdx = 0
var prevCol = 0
var col = 0
var j = 0
var i = 0
var av = 0.0
while (j < nnz) {
col = indices(j)
// Skip empty columns.
colStartIdx += (col - prevCol) * (col + prevCol + 1) / 2
col = indices(j)
av = alpha * values(j)
i = 0
while (i <= j) {
U(colStartIdx + indices(i)) += av * values(i)
i += 1
}
j += 1
prevCol = col
}
}
}
/**
* A := alpha * x * x^T^ + A
*
* @param alpha a real scalar that will be multiplied to x * x^T^.
* @param x the vector x that contains the n elements.
* @param A the symmetric matrix A. Size of n x n.
*/
/*def syr(alpha: Double, x: Vector, A: DenseMatrix): Unit = {
val mA = A.numRows
val nA = A.numCols
require(mA == nA, s"A is not a square matrix (and hence is not symmetric). A: $mA x $nA")
require(mA == x.size, s"The size of x doesn't match the rank of A. A: $mA x $nA, x: ${x.size}")
x match {
case dv: DenseVector => syr(alpha, dv, A)
case sv: SparseVector => syr(alpha, sv, A)
case _ =>
throw new IllegalArgumentException(s"syr doesn't support vector type ${x.getClass}.")
}
}
private def syr(alpha: Double, x: DenseVector, A: DenseMatrix): Unit = {
val nA = A.numRows
val mA = A.numCols
nativeBLAS.dsyr("U", x.size, alpha, x.values, 1, A.values, nA)
// Fill lower triangular part of A
var i = 0
while (i < mA) {
var j = i + 1
while (j < nA) {
A(j, i) = A(i, j)
j += 1
}
i += 1
}
}
private def syr(alpha: Double, x: SparseVector, A: DenseMatrix): Unit = {
val mA = A.numCols
val xIndices = x.indices
val xValues = x.values
val nnz = xValues.length
val Avalues = A.values
var i = 0
while (i < nnz) {
val multiplier = alpha * xValues(i)
val offset = xIndices(i) * mA
var j = 0
while (j < nnz) {
Avalues(xIndices(j) + offset) += multiplier * xValues(j)
j += 1
}
i += 1
}
}*/
/**
* C := alpha * A * B + beta * C
*
* @param alpha a scalar to scale the multiplication A * B.
* @param A the matrix A that will be left multiplied to B. Size of m x k.
* @param B the matrix B that will be left multiplied by A. Size of k x n.
* @param beta a scalar that can be used to scale matrix C.
* @param C the resulting matrix C. Size of m x n. C.isTransposed must be false.
*/
def gemm(
alpha: Double,
A: Matrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
require(!C.isTransposed,
"The matrix C cannot be the product of a transpose() call. C.isTransposed must be false.")
if (alpha == 0.0 && beta == 1.0) {
// gemm: alpha is equal to 0 and beta is equal to 1. Returning C.
} else if (alpha == 0.0) {
f2jBLAS.dscal(C.values.length, beta, C.values, 1)
} else {
A match {
case sparse: SparseMatrix => gemm(alpha, sparse, B, beta, C)
case dense: DenseMatrix => gemm(alpha, dense, B, beta, C)
case _ =>
throw new IllegalArgumentException(s"gemm doesn't support matrix type ${A.getClass}.")
}
}
}
/**
* C := alpha * A * B + beta * C
* For `DenseMatrix` A.
*/
private def gemm(
alpha: Double,
A: DenseMatrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
val tAstr = if (A.isTransposed) "T" else "N"
val tBstr = if (B.isTransposed) "T" else "N"
val lda = if (!A.isTransposed) A.numRows else A.numCols
val ldb = if (!B.isTransposed) B.numRows else B.numCols
require(A.numCols == B.numRows,
s"The columns of A don't match the rows of B. A: ${A.numCols}, B: ${B.numRows}")
require(A.numRows == C.numRows,
s"The rows of C don't match the rows of A. C: ${C.numRows}, A: ${A.numRows}")
require(B.numCols == C.numCols,
s"The columns of C don't match the columns of B. C: ${C.numCols}, A: ${B.numCols}")
nativeBLAS.dgemm(tAstr, tBstr, A.numRows, B.numCols, A.numCols, alpha, A.values, lda,
B.values, ldb, beta, C.values, C.numRows)
}
/**
* C := alpha * A * B + beta * C
* For `SparseMatrix` A.
*/
private def gemm(
alpha: Double,
A: SparseMatrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
val mA: Int = A.numRows
val nB: Int = B.numCols
val kA: Int = A.numCols
val kB: Int = B.numRows
require(kA == kB, s"The columns of A don't match the rows of B. A: $kA, B: $kB")
require(mA == C.numRows, s"The rows of C don't match the rows of A. C: ${C.numRows}, A: $mA")
require(nB == C.numCols,
s"The columns of C don't match the columns of B. C: ${C.numCols}, A: $nB")
val Avals = A.values
val Bvals = B.values
val Cvals = C.values
val ArowIndices = A.rowIndices
val AcolPtrs = A.colPtrs
// Slicing is easy in this case. This is the optimal multiplication setting for sparse matrices
if (A.isTransposed) {
var colCounterForB = 0
if (!B.isTransposed) { // Expensive to put the check inside the loop
while (colCounterForB < nB) {
var rowCounterForA = 0
val Cstart = colCounterForB * mA
val Bstart = colCounterForB * kA
while (rowCounterForA < mA) {
var i = AcolPtrs(rowCounterForA)
val indEnd = AcolPtrs(rowCounterForA + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * Bvals(Bstart + ArowIndices(i))
i += 1
}
val Cindex = Cstart + rowCounterForA
Cvals(Cindex) = beta * Cvals(Cindex) + sum * alpha
rowCounterForA += 1
}
colCounterForB += 1
}
} else {
while (colCounterForB < nB) {
var rowCounterForA = 0
val Cstart = colCounterForB * mA
while (rowCounterForA < mA) {
var i = AcolPtrs(rowCounterForA)
val indEnd = AcolPtrs(rowCounterForA + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * B(ArowIndices(i), colCounterForB)
i += 1
}
val Cindex = Cstart + rowCounterForA
Cvals(Cindex) = beta * Cvals(Cindex) + sum * alpha
rowCounterForA += 1
}
colCounterForB += 1
}
}
} else {
// Scale matrix first if `beta` is not equal to 1.0
if (beta != 1.0) {
f2jBLAS.dscal(C.values.length, beta, C.values, 1)
}
// Perform matrix multiplication and add to C. The rows of A are multiplied by the columns of
// B, and added to C.
var colCounterForB = 0 // the column to be updated in C
if (!B.isTransposed) { // Expensive to put the check inside the loop
while (colCounterForB < nB) {
var colCounterForA = 0 // The column of A to multiply with the row of B
val Bstart = colCounterForB * kB
val Cstart = colCounterForB * mA
while (colCounterForA < kA) {
var i = AcolPtrs(colCounterForA)
val indEnd = AcolPtrs(colCounterForA + 1)
val Bval = Bvals(Bstart + colCounterForA) * alpha
while (i < indEnd) {
Cvals(Cstart + ArowIndices(i)) += Avals(i) * Bval
i += 1
}
colCounterForA += 1
}
colCounterForB += 1
}
} else {
while (colCounterForB < nB) {
var colCounterForA = 0 // The column of A to multiply with the row of B
val Cstart = colCounterForB * mA
while (colCounterForA < kA) {
var i = AcolPtrs(colCounterForA)
val indEnd = AcolPtrs(colCounterForA + 1)
val Bval = B(colCounterForA, colCounterForB) * alpha
while (i < indEnd) {
Cvals(Cstart + ArowIndices(i)) += Avals(i) * Bval
i += 1
}
colCounterForA += 1
}
colCounterForB += 1
}
}
}
}
/**
* y := alpha * A * x + beta * y
*
* @param alpha a scalar to scale the multiplication A * x.
* @param A the matrix A that will be left multiplied to x. Size of m x n.
* @param x the vector x that will be left multiplied by A. Size of n x 1.
* @param beta a scalar that can be used to scale vector y.
* @param y the resulting vector y. Size of m x 1.
*/
def gemv(
alpha: Double,
A: Matrix,
x: Vector,
beta: Double,
y: DenseVector): Unit = {
require(A.numCols == x.size,
s"The columns of A don't match the number of elements of x. A: ${A.numCols}, x: ${x.size}")
require(A.numRows == y.size,
s"The rows of A don't match the number of elements of y. A: ${A.numRows}, y:${y.size}")
if (alpha == 0.0 && beta == 1.0) {
// gemv: alpha is equal to 0 and beta is equal to 1. Returning y.
} else if (alpha == 0.0) {
scal(beta, y)
} else {
(A, x) match {
case (smA: SparseMatrix, dvx: DenseVector) =>
gemv(alpha, smA, dvx, beta, y)
case (smA: SparseMatrix, svx: SparseVector) =>
gemv(alpha, smA, svx, beta, y)
case (dmA: DenseMatrix, dvx: DenseVector) =>
gemv(alpha, dmA, dvx, beta, y)
case (dmA: DenseMatrix, svx: SparseVector) =>
gemv(alpha, dmA, svx, beta, y)
case _ =>
throw new IllegalArgumentException(s"gemv doesn't support running on matrix type " +
s"${A.getClass} and vector type ${x.getClass}.")
}
}
}
/**
* y := alpha * A * x + beta * y
* For `DenseMatrix` A and `DenseVector` x.
*/
private def gemv(
alpha: Double,
A: DenseMatrix,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
val tStrA = if (A.isTransposed) "T" else "N"
val mA = if (!A.isTransposed) A.numRows else A.numCols
val nA = if (!A.isTransposed) A.numCols else A.numRows
nativeBLAS.dgemv(tStrA, mA, nA, alpha, A.values, mA, x.values, 1, beta,
y.values, 1)
}
/**
* y := alpha * A * x + beta * y
* For `DenseMatrix` A and `SparseVector` x.
*/
private def gemv(
alpha: Double,
A: DenseMatrix,
x: SparseVector,
beta: Double,
y: DenseVector): Unit = {
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val xIndices = x.indices
val xNnz = xIndices.length
val xValues = x.values
val yValues = y.values
if (A.isTransposed) {
var rowCounterForA = 0
while (rowCounterForA < mA) {
var sum = 0.0
var k = 0
while (k < xNnz) {
sum += xValues(k) * Avals(xIndices(k) + rowCounterForA * nA)
k += 1
}
yValues(rowCounterForA) = sum * alpha + beta * yValues(rowCounterForA)
rowCounterForA += 1
}
} else {
var rowCounterForA = 0
while (rowCounterForA < mA) {
var sum = 0.0
var k = 0
while (k < xNnz) {
sum += xValues(k) * Avals(xIndices(k) * mA + rowCounterForA)
k += 1
}
yValues(rowCounterForA) = sum * alpha + beta * yValues(rowCounterForA)
rowCounterForA += 1
}
}
}
/**
* y := alpha * A * x + beta * y
* For `SparseMatrix` A and `SparseVector` x.
*/
private def gemv(
alpha: Double,
A: SparseMatrix,
x: SparseVector,
beta: Double,
y: DenseVector): Unit = {
val xValues = x.values
val xIndices = x.indices
val xNnz = xIndices.length
val yValues = y.values
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val Arows = if (!A.isTransposed) A.rowIndices else A.colPtrs
val Acols = if (!A.isTransposed) A.colPtrs else A.rowIndices
if (A.isTransposed) {
var rowCounter = 0
while (rowCounter < mA) {
var i = Arows(rowCounter)
val indEnd = Arows(rowCounter + 1)
var sum = 0.0
var k = 0
while (i < indEnd && k < xNnz) {
if (xIndices(k) == Acols(i)) {
sum += Avals(i) * xValues(k)
k += 1
i += 1
} else if (xIndices(k) < Acols(i)) {
k += 1
} else {
i += 1
}
}
yValues(rowCounter) = sum * alpha + beta * yValues(rowCounter)
rowCounter += 1
}
} else {
if (beta != 1.0) scal(beta, y)
var colCounterForA = 0
var k = 0
while (colCounterForA < nA && k < xNnz) {
if (xIndices(k) == colCounterForA) {
var i = Acols(colCounterForA)
val indEnd = Acols(colCounterForA + 1)
val xTemp = xValues(k) * alpha
while (i < indEnd) {
val rowIndex = Arows(i)
yValues(Arows(i)) += Avals(i) * xTemp
i += 1
}
k += 1
}
colCounterForA += 1
}
}
}
/**
* y := alpha * A * x + beta * y
* For `SparseMatrix` A and `DenseVector` x.
*/
private def gemv(
alpha: Double,
A: SparseMatrix,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
val xValues = x.values
val yValues = y.values
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val Arows = if (!A.isTransposed) A.rowIndices else A.colPtrs
val Acols = if (!A.isTransposed) A.colPtrs else A.rowIndices
// Slicing is easy in this case. This is the optimal multiplication setting for sparse matrices
if (A.isTransposed) {
var rowCounter = 0
while (rowCounter < mA) {
var i = Arows(rowCounter)
val indEnd = Arows(rowCounter + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * xValues(Acols(i))
i += 1
}
yValues(rowCounter) = beta * yValues(rowCounter) + sum * alpha
rowCounter += 1
}
} else {
if (beta != 1.0) scal(beta, y)
// Perform matrix-vector multiplication and add to y
var colCounterForA = 0
while (colCounterForA < nA) {
var i = Acols(colCounterForA)
val indEnd = Acols(colCounterForA + 1)
val xVal = xValues(colCounterForA) * alpha
while (i < indEnd) {
val rowIndex = Arows(i)
yValues(rowIndex) += Avals(i) * xVal
i += 1
}
colCounterForA += 1
}
}
}
}
|
emmalanguage/emma
|
emma-lib/src/main/scala/org/emmalanguage/lib/linalg/BLAS.scala
|
Scala
|
apache-2.0
| 22,062 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box.ValidatableBox._
import uk.gov.hmrc.ct.box._
case class AC7110A(value: Option[String]) extends CtBoxIdentifier(name = "Other accounting policies")
with CtOptionalString
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateStringMaxLength("AC7110A", value.getOrElse(""), StandardCohoTextFieldLimit),
validateCoHoStringReturnIllegalChars("AC7110A", this)
)
}
}
|
pncampbell/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC7110A.scala
|
Scala
|
apache-2.0
| 1,440 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import scala.concurrent.duration._
object DropByTimespanSuite extends BaseOperatorSuite {
val waitFirst = 2500.millis
val waitNext = 500.millis
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = None
def sum(sourceCount: Int) =
(0 until sourceCount).map(_ + 5).sum
def count(sourceCount: Int) =
sourceCount
def createObservable(sourceCount: Int) = Some {
require(sourceCount > 0, "sourceCount should be strictly positive")
val o = Observable.intervalAtFixedRate(500.millis)
.take(sourceCount + 5)
.dropByTimespan(2300.millis)
Sample(o, count(sourceCount), sum(sourceCount), waitFirst, waitNext)
}
def observableInError(sourceCount: Int, ex: Throwable) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val source = Observable.intervalAtFixedRate(500.millis)
.take(sourceCount + 5)
val o = createObservableEndingInError(source, ex)
.dropByTimespan(2300.millis)
Sample(o, count(sourceCount), sum(sourceCount), waitFirst, waitNext)
}
}
override def cancelableObservables(): Seq[Sample] = {
val o = Observable.intervalAtFixedRate(500.millis)
.dropByTimespan(2300.millis)
Seq(Sample(o, 0, 0, 0.seconds, 0.seconds))
}
}
|
Wogan/monix
|
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DropByTimespanSuite.scala
|
Scala
|
apache-2.0
| 2,030 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.io.{IOException, ObjectOutputStream}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.existentials
import scala.reflect.ClassTag
import org.apache.spark._
import org.apache.spark.util.Utils
/**
* Class that captures a coalesced RDD by essentially keeping track of parent partitions
* @param index of this coalesced partition
* @param rdd which it belongs to
* @param parentsIndices list of indices in the parent that have been coalesced into this partition
* @param preferredLocation the preferred location for this partition
*/
private[spark] case class CoalescedRDDPartition(
index: Int,
@transient rdd: RDD[_],
parentsIndices: Array[Int],
@transient preferredLocation: Option[String] = None) extends Partition {
var parents: Seq[Partition] = parentsIndices.map(rdd.partitions(_))
@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException {
// Update the reference to parent partition at the time of task serialization
parents = parentsIndices.map(rdd.partitions(_))
oos.defaultWriteObject()
}
/**
* Computes the fraction of the parents' partitions containing preferredLocation within
* their getPreferredLocs.
* @return locality of this coalesced partition between 0 and 1
*/
def localFraction: Double = {
val loc = parents.count { p =>
val parentPreferredLocations = rdd.context.getPreferredLocs(rdd, p.index).map(_.host)
preferredLocation.exists(parentPreferredLocations.contains)
}
if (parents.size == 0) 0.0 else (loc.toDouble / parents.size.toDouble)
}
}
/**
* Represents a coalesced RDD that has fewer partitions than its parent RDD
* This class uses the PartitionCoalescer class to find a good partitioning of the parent RDD
* so that each new partition has roughly the same number of parent partitions and that
* the preferred location of each new partition overlaps with as many preferred locations of its
* parent partitions
* @param prev RDD to be coalesced
* @param maxPartitions number of desired partitions in the coalesced RDD (must be positive)
* @param partitionCoalescer [[PartitionCoalescer]] implementation to use for coalescing
*/
private[spark] class CoalescedRDD[T: ClassTag](
@transient var prev: RDD[T],
maxPartitions: Int,
partitionCoalescer: Option[PartitionCoalescer] = None)
extends RDD[T](prev.context, Nil) { // Nil since we implement getDependencies
require(maxPartitions > 0 || maxPartitions == prev.partitions.length,
s"Number of partitions ($maxPartitions) must be positive.")
if (partitionCoalescer.isDefined) {
require(partitionCoalescer.get.isInstanceOf[Serializable],
"The partition coalescer passed in must be serializable.")
}
override def getPartitions: Array[Partition] = {
val pc = partitionCoalescer.getOrElse(new DefaultPartitionCoalescer())
pc.coalesce(maxPartitions, prev).zipWithIndex.map {
case (pg, i) =>
val ids = pg.partitions.map(_.index).toArray
new CoalescedRDDPartition(i, prev, ids, pg.prefLoc)
}
}
override def compute(partition: Partition, context: TaskContext): Iterator[T] = {
partition.asInstanceOf[CoalescedRDDPartition].parents.iterator.flatMap { parentPartition =>
firstParent[T].iterator(parentPartition, context)
}
}
override def getDependencies: Seq[Dependency[_]] = {
Seq(new NarrowDependency(prev) {
def getParents(id: Int): Seq[Int] =
partitions(id).asInstanceOf[CoalescedRDDPartition].parentsIndices
})
}
override def clearDependencies() {
super.clearDependencies()
prev = null
}
/**
* Returns the preferred machine for the partition. If split is of type CoalescedRDDPartition,
* then the preferred machine will be one which most parent splits prefer too.
* @param partition
* @return the machine most preferred by split
*/
override def getPreferredLocations(partition: Partition): Seq[String] = {
partition.asInstanceOf[CoalescedRDDPartition].preferredLocation.toSeq
}
}
/**
* Coalesce the partitions of a parent RDD (`prev`) into fewer partitions, so that each partition of
* this RDD computes one or more of the parent ones. It will produce exactly `maxPartitions` if the
* parent had more than maxPartitions, or fewer if the parent had fewer.
*
* This transformation is useful when an RDD with many partitions gets filtered into a smaller one,
* or to avoid having a large number of small tasks when processing a directory with many files.
*
* If there is no locality information (no preferredLocations) in the parent, then the coalescing
* is very simple: chunk parents that are close in the Array in chunks.
* If there is locality information, it proceeds to pack them with the following four goals:
*
* (1) Balance the groups so they roughly have the same number of parent partitions
* (2) Achieve locality per partition, i.e. find one machine which most parent partitions prefer
* (3) Be efficient, i.e. O(n) algorithm for n parent partitions (problem is likely NP-hard)
* (4) Balance preferred machines, i.e. avoid as much as possible picking the same preferred machine
*
* Furthermore, it is assumed that the parent RDD may have many partitions, e.g. 100 000.
* We assume the final number of desired partitions is small, e.g. less than 1000.
*
* The algorithm tries to assign unique preferred machines to each partition. If the number of
* desired partitions is greater than the number of preferred machines (can happen), it needs to
* start picking duplicate preferred machines. This is determined using coupon collector estimation
* (2n log(n)). The load balancing is done using power-of-two randomized bins-balls with one twist:
* it tries to also achieve locality. This is done by allowing a slack (balanceSlack, where
* 1.0 is all locality, 0 is all balance) between two bins. If two bins are within the slack
* in terms of balance, the algorithm will assign partitions according to locality.
* (contact alig for questions)
*/
private class DefaultPartitionCoalescer(val balanceSlack: Double = 0.10)
extends PartitionCoalescer {
def compare(o1: PartitionGroup, o2: PartitionGroup): Boolean = o1.numPartitions < o2.numPartitions
def compare(o1: Option[PartitionGroup], o2: Option[PartitionGroup]): Boolean =
if (o1 == None) false else if (o2 == None) true else compare(o1.get, o2.get)
val rnd = new scala.util.Random(7919) // keep this class deterministic
// each element of groupArr represents one coalesced partition
val groupArr = ArrayBuffer[PartitionGroup]()
// hash used to check whether some machine is already in groupArr
val groupHash = mutable.Map[String, ArrayBuffer[PartitionGroup]]()
// hash used for the first maxPartitions (to avoid duplicates)
val initialHash = mutable.Set[Partition]()
var noLocality = true // if true if no preferredLocations exists for parent RDD
// gets the *current* preferred locations from the DAGScheduler (as opposed to the static ones)
def currPrefLocs(part: Partition, prev: RDD[_]): Seq[String] = {
prev.context.getPreferredLocs(prev, part.index).map(tl => tl.host)
}
class PartitionLocations(prev: RDD[_]) {
// contains all the partitions from the previous RDD that don't have preferred locations
val partsWithoutLocs = ArrayBuffer[Partition]()
// contains all the partitions from the previous RDD that have preferred locations
val partsWithLocs = ArrayBuffer[(String, Partition)]()
getAllPrefLocs(prev)
// gets all the preferred locations of the previous RDD and splits them into partitions
// with preferred locations and ones without
def getAllPrefLocs(prev: RDD[_]): Unit = {
val tmpPartsWithLocs = mutable.LinkedHashMap[Partition, Seq[String]]()
// first get the locations for each partition, only do this once since it can be expensive
prev.partitions.foreach(p => {
val locs = currPrefLocs(p, prev)
if (locs.nonEmpty) {
tmpPartsWithLocs.put(p, locs)
} else {
partsWithoutLocs += p
}
}
)
// convert it into an array of host to partition
for (x <- 0 to 2) {
tmpPartsWithLocs.foreach { parts =>
val p = parts._1
val locs = parts._2
if (locs.size > x) partsWithLocs += ((locs(x), p))
}
}
}
}
/**
* Sorts and gets the least element of the list associated with key in groupHash
* The returned PartitionGroup is the least loaded of all groups that represent the machine "key"
*
* @param key string representing a partitioned group on preferred machine key
* @return Option of [[PartitionGroup]] that has least elements for key
*/
def getLeastGroupHash(key: String): Option[PartitionGroup] = {
groupHash.get(key).map(_.sortWith(compare).head)
}
def addPartToPGroup(part: Partition, pgroup: PartitionGroup): Boolean = {
if (!initialHash.contains(part)) {
pgroup.partitions += part // already assign this element
initialHash += part // needed to avoid assigning partitions to multiple buckets
true
} else { false }
}
/**
* Initializes targetLen partition groups. If there are preferred locations, each group
* is assigned a preferredLocation. This uses coupon collector to estimate how many
* preferredLocations it must rotate through until it has seen most of the preferred
* locations (2 * n log(n))
* @param targetLen
*/
def setupGroups(targetLen: Int, partitionLocs: PartitionLocations) {
// deal with empty case, just create targetLen partition groups with no preferred location
if (partitionLocs.partsWithLocs.isEmpty) {
(1 to targetLen).foreach(x => groupArr += new PartitionGroup())
return
}
noLocality = false
// number of iterations needed to be certain that we've seen most preferred locations
val expectedCoupons2 = 2 * (math.log(targetLen)*targetLen + targetLen + 0.5).toInt
var numCreated = 0
var tries = 0
// rotate through until either targetLen unique/distinct preferred locations have been created
// OR (we have went through either all partitions OR we've rotated expectedCoupons2 - in
// which case we have likely seen all preferred locations)
val numPartsToLookAt = math.min(expectedCoupons2, partitionLocs.partsWithLocs.length)
while (numCreated < targetLen && tries < numPartsToLookAt) {
val (nxt_replica, nxt_part) = partitionLocs.partsWithLocs(tries)
tries += 1
if (!groupHash.contains(nxt_replica)) {
val pgroup = new PartitionGroup(Some(nxt_replica))
groupArr += pgroup
addPartToPGroup(nxt_part, pgroup)
groupHash.put(nxt_replica, ArrayBuffer(pgroup)) // list in case we have multiple
numCreated += 1
}
}
tries = 0
// if we don't have enough partition groups, create duplicates
while (numCreated < targetLen) {
val (nxt_replica, nxt_part) = partitionLocs.partsWithLocs(tries)
tries += 1
val pgroup = new PartitionGroup(Some(nxt_replica))
groupArr += pgroup
groupHash.getOrElseUpdate(nxt_replica, ArrayBuffer()) += pgroup
addPartToPGroup(nxt_part, pgroup)
numCreated += 1
if (tries >= partitionLocs.partsWithLocs.length) tries = 0
}
}
/**
* Takes a parent RDD partition and decides which of the partition groups to put it in
* Takes locality into account, but also uses power of 2 choices to load balance
* It strikes a balance between the two using the balanceSlack variable
* @param p partition (ball to be thrown)
* @param balanceSlack determines the trade-off between load-balancing the partitions sizes and
* their locality. e.g., balanceSlack=0.10 means that it allows up to 10%
* imbalance in favor of locality
* @return partition group (bin to be put in)
*/
def pickBin(
p: Partition,
prev: RDD[_],
balanceSlack: Double,
partitionLocs: PartitionLocations): PartitionGroup = {
val slack = (balanceSlack * prev.partitions.length).toInt
// least loaded pref locs
val pref = currPrefLocs(p, prev).map(getLeastGroupHash(_)).sortWith(compare)
val prefPart = if (pref == Nil) None else pref.head
val r1 = rnd.nextInt(groupArr.size)
val r2 = rnd.nextInt(groupArr.size)
val minPowerOfTwo = {
if (groupArr(r1).numPartitions < groupArr(r2).numPartitions) {
groupArr(r1)
}
else {
groupArr(r2)
}
}
if (prefPart.isEmpty) {
// if no preferred locations, just use basic power of two
return minPowerOfTwo
}
val prefPartActual = prefPart.get
// more imbalance than the slack allows
if (minPowerOfTwo.numPartitions + slack <= prefPartActual.numPartitions) {
minPowerOfTwo // prefer balance over locality
} else {
prefPartActual // prefer locality over balance
}
}
def throwBalls(
maxPartitions: Int,
prev: RDD[_],
balanceSlack: Double, partitionLocs: PartitionLocations) {
if (noLocality) { // no preferredLocations in parent RDD, no randomization needed
if (maxPartitions > groupArr.size) { // just return prev.partitions
for ((p, i) <- prev.partitions.zipWithIndex) {
groupArr(i).partitions += p
}
} else { // no locality available, then simply split partitions based on positions in array
for (i <- 0 until maxPartitions) {
val rangeStart = ((i.toLong * prev.partitions.length) / maxPartitions).toInt
val rangeEnd = (((i.toLong + 1) * prev.partitions.length) / maxPartitions).toInt
(rangeStart until rangeEnd).foreach{ j => groupArr(i).partitions += prev.partitions(j) }
}
}
} else {
// It is possible to have unionRDD where one rdd has preferred locations and another rdd
// that doesn't. To make sure we end up with the requested number of partitions,
// make sure to put a partition in every group.
// if we don't have a partition assigned to every group first try to fill them
// with the partitions with preferred locations
val partIter = partitionLocs.partsWithLocs.iterator
groupArr.filter(pg => pg.numPartitions == 0).foreach { pg =>
while (partIter.hasNext && pg.numPartitions == 0) {
var (nxt_replica, nxt_part) = partIter.next()
if (!initialHash.contains(nxt_part)) {
pg.partitions += nxt_part
initialHash += nxt_part
}
}
}
// if we didn't get one partitions per group from partitions with preferred locations
// use partitions without preferred locations
val partNoLocIter = partitionLocs.partsWithoutLocs.iterator
groupArr.filter(pg => pg.numPartitions == 0).foreach { pg =>
while (partNoLocIter.hasNext && pg.numPartitions == 0) {
var nxt_part = partNoLocIter.next()
if (!initialHash.contains(nxt_part)) {
pg.partitions += nxt_part
initialHash += nxt_part
}
}
}
// finally pick bin for the rest
for (p <- prev.partitions if (!initialHash.contains(p))) { // throw every partition into group
pickBin(p, prev, balanceSlack, partitionLocs).partitions += p
}
}
}
def getPartitions: Array[PartitionGroup] = groupArr.filter( pg => pg.numPartitions > 0).toArray
/**
* Runs the packing algorithm and returns an array of PartitionGroups that if possible are
* load balanced and grouped by locality
*
* @return array of partition groups
*/
def coalesce(maxPartitions: Int, prev: RDD[_]): Array[PartitionGroup] = {
val partitionLocs = new PartitionLocations(prev)
// setup the groups (bins)
setupGroups(math.min(prev.partitions.length, maxPartitions), partitionLocs)
// assign partitions (balls) to each group (bins)
throwBalls(maxPartitions, prev, balanceSlack, partitionLocs)
getPartitions
}
}
|
mike0sv/spark
|
core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
|
Scala
|
apache-2.0
| 16,974 |
package org.apache.spot.utilities.data.validation
import org.apache.spark.sql.types.{DataType, StructType}
import scala.collection.mutable.ListBuffer
/**
* Input schema routines
*/
object InputSchema {
/**
* Validate the incoming data schema matches the schema required for model creation and scoring.
*
* @param inSchema incoming data frame
* @param expectedSchema schema expected by model training and scoring methods
* @return
*/
def validate(inSchema: StructType, expectedSchema: StructType): InputSchemaValidationResponse = {
val response: ListBuffer[String] = ListBuffer("Schema not compatible:")
// reduce schema from struct field to only field name and type
val inSchemaMap: Map[String, DataType] = inSchema.map(field => (field.name -> field.dataType)).toMap
expectedSchema
.map(field => (field.name, field.dataType))
.foreach({ case (expectedFieldName: String, expectedDataType: DataType) => {
val inFieldDataType = inSchemaMap.getOrElse(expectedFieldName, None)
inFieldDataType match {
case None => response.append(s"Field $expectedFieldName is not present. $expectedFieldName is required for " +
s"model training and scoring.")
case inputDataType: DataType =>
if (inputDataType != expectedDataType)
response.append(s"Field $expectedFieldName type ${inputDataType.typeName} is not the expected type " +
s"${expectedDataType.typeName}")
}
}
})
response.length match {
case 1 => InputSchemaValidationResponse(isValid = true, Seq())
case _ => InputSchemaValidationResponse(isValid = false, response)
}
}
case class InputSchemaValidationResponse(final val isValid: Boolean, final val errorMessages: Seq[String])
}
|
brandon-edwards/incubator-spot
|
spot-ml/src/main/scala/org/apache/spot/utilities/data/validation/InputSchema.scala
|
Scala
|
apache-2.0
| 1,832 |
package provingground.library
import provingground._
import HoTT._
import induction._
import implicits._
import shapeless._
import Fold._
object iffInd {
lazy val value = Subst.Lambda(
"$abel" :: Prop,
Subst.Lambda(
"$abem" :: Prop,
ConstructorSeqTL(
ConstructorSeqDom.Cons(
ApplnSym(
("iff.intro" :: piDefn("'f_803143475" :: Prop)(
piDefn("'g_561098509" :: Prop)(FuncTyp(
FuncTyp("'f_803143475" :: Prop, "'g_561098509" :: Prop),
FuncTyp(FuncTyp("'g_561098509" :: Prop, "'f_803143475" :: Prop),
("iff" :: FuncTyp(Prop, FuncTyp(Prop, Prop)))(
"'f_803143475" :: Prop)("'g_561098509" :: Prop))
))))("$abel" :: Prop),
"$abem" :: Prop
),
ConstructorShape.CnstFuncConsShape(
FuncTyp("$abel" :: Prop, "$abem" :: Prop),
ConstructorShape.CnstFuncConsShape(
FuncTyp("$abem" :: Prop, "$abel" :: Prop),
ConstructorShape.IdShape.byTyp(
("iff" :: FuncTyp(Prop, FuncTyp(Prop, Prop)))("$abel" :: Prop)(
"$abem" :: Prop)))
),
ConstructorSeqDom.Empty.byTyp(
("iff" :: FuncTyp(Prop, FuncTyp(Prop, Prop)))("$abel" :: Prop)(
"$abem" :: Prop))
),
("iff" :: FuncTyp(Prop, FuncTyp(Prop, Prop)))("$abel" :: Prop)(
"$abem" :: Prop)
)
)
)
}
|
siddhartha-gadgil/ProvingGround
|
leanlib/src/main/scala/provingground/library/inductive-types/iffInd.scala
|
Scala
|
mit
| 1,478 |
package com.twitter.finagle.service
import RetryPolicy._
import com.twitter.conversions.time._
import com.twitter.finagle.{ChannelClosedException, Failure, TimeoutException, WriteException}
import com.twitter.util._
import org.junit.runner.RunWith
import org.scalatest.FunSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RetryPolicyTest extends FunSpec {
def getBackoffs(
policy: RetryPolicy[Try[Nothing]],
exceptions: Stream[Exception]
): Stream[Duration] =
exceptions match {
case Stream.Empty => Stream.empty
case e #:: tail =>
policy(Throw(e)) match {
case None => Stream.empty
case Some((backoff, p2)) => backoff #:: getBackoffs(p2, tail)
}
}
describe("RetryPolicy") {
val NoExceptions: PartialFunction[Try[Nothing], Boolean] = {
case _ => false
}
val timeoutExc = new TimeoutException {
protected val timeout = 0.seconds
protected val explanation = "!"
}
it("should WriteExceptionsOnly") {
val weo = WriteExceptionsOnly orElse NoExceptions
assert(weo(Throw(new Exception)) === false)
assert(weo(Throw(WriteException(new Exception))) === true)
assert(weo(Throw(Failure.InterruptedBy(new Exception))) === false)
assert(weo(Throw(Failure.InterruptedBy(new Exception).withRetryable(true))) === true)
assert(weo(Throw(timeoutExc)) === false)
}
it("should TimeoutAndWriteExceptionsOnly") {
val taweo = TimeoutAndWriteExceptionsOnly orElse NoExceptions
assert(taweo(Throw(new Exception)) === false)
assert(taweo(Throw(WriteException(new Exception))) === true)
assert(taweo(Throw(Failure.InterruptedBy(new Exception))) === false)
assert(taweo(Throw(Failure.InterruptedBy(timeoutExc))) === true)
assert(taweo(Throw(timeoutExc)) === true)
assert(taweo(Throw(new com.twitter.util.TimeoutException(""))) === true)
}
}
case class IException(i: Int) extends Exception
val iExceptionsOnly: PartialFunction[Try[Nothing], Boolean] = {
case Throw(IException(_)) => true
}
val iGreaterThan1: Try[Nothing] => Boolean = {
case Throw(IException(i)) if i > 1 => true
case _ => false
}
describe("RetryPolicy.filter/filterEach") {
val backoffs = Stream(10.milliseconds, 20.milliseconds, 30.milliseconds)
val policy = RetryPolicy.backoff(backoffs)(iExceptionsOnly).filter(iGreaterThan1)
it("returns None if filter rejects") {
val actual = getBackoffs(policy, Stream(IException(0), IException(1)))
assert(actual === Stream.empty)
}
it("returns underlying result if filter accepts first") {
val actual = getBackoffs(policy, Stream(IException(2), IException(0)))
assert(actual === backoffs.take(2))
}
}
describe("RetryPolicy.filterEach") {
val backoffs = Stream(10.milliseconds, 20.milliseconds, 30.milliseconds)
val policy = RetryPolicy.backoff(backoffs)(iExceptionsOnly).filterEach(iGreaterThan1)
it("returns None if filterEach rejects") {
val actual = getBackoffs(policy, Stream(IException(0), IException(1)))
assert(actual === Stream.empty)
}
it("returns underlying result if filterEach accepts") {
val actual = getBackoffs(policy, Stream(IException(2), IException(2), IException(0)))
assert(actual === backoffs.take(2))
}
}
describe("RetryPolicy.limit") {
var currentMaxRetries: Int = 0
val maxBackoffs = Stream.fill(3)(10.milliseconds)
val policy =
RetryPolicy.backoff(maxBackoffs)(RetryPolicy.ChannelClosedExceptionsOnly)
.limit(currentMaxRetries)
it("limits retries dynamically") {
for (i <- 0 until 5) {
currentMaxRetries = i
val backoffs = getBackoffs(policy, Stream.fill(3)(new ChannelClosedException()))
assert(backoffs === maxBackoffs.take(i min 3))
}
}
}
describe("RetryPolicy.combine") {
val channelClosedBackoff = 10.milliseconds
val writeExceptionBackoff = 0.milliseconds
val combinedPolicy =
RetryPolicy.combine(
RetryPolicy.tries(3, RetryPolicy.WriteExceptionsOnly),
RetryPolicy.backoff(Stream.fill(3)(channelClosedBackoff))(RetryPolicy.ChannelClosedExceptionsOnly)
)
it("return None for unmatched exception") {
val backoffs = getBackoffs(combinedPolicy, Stream(new UnsupportedOperationException))
assert(backoffs === Stream.empty)
}
it("mimicks first policy") {
val backoffs = getBackoffs(combinedPolicy, Stream.fill(4)(WriteException(new Exception)))
assert(backoffs === Stream.fill(2)(writeExceptionBackoff))
}
it("mimicks second policy") {
val backoffs = getBackoffs(combinedPolicy, Stream.fill(4)(new ChannelClosedException()))
assert(backoffs === Stream.fill(3)(channelClosedBackoff))
}
it("interleaves backoffs") {
val exceptions = Stream(
new ChannelClosedException(),
WriteException(new Exception),
WriteException(new Exception),
new ChannelClosedException(),
WriteException(new Exception)
)
val backoffs = getBackoffs(combinedPolicy, exceptions)
val expectedBackoffs = Stream(
channelClosedBackoff,
writeExceptionBackoff,
writeExceptionBackoff,
channelClosedBackoff
)
assert(backoffs === expectedBackoffs)
}
}
}
|
kristofa/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/service/RetryPolicyTest.scala
|
Scala
|
apache-2.0
| 5,395 |
package org.jetbrains.plugins.scala.lang
package transformation
package annotations
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.extensions.{&&, Parent}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScFunctionExpr
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameterClause}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaCode._
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* @author Pavel Fatin
*/
class AddTypeToFunctionParameter extends AbstractTransformer {
def transformation(implicit project: ProjectContext): PartialFunction[PsiElement, Unit] = {
case (p: ScParameter) && Parent(e @ Parent(Parent(_: ScFunctionExpr))) if p.paramType.isEmpty =>
appendTypeAnnotation(p.getRealParameterType.get) { annotation =>
val replacement = code"(${p.getText}: ${annotation.getText}) => ()"
.getFirstChild.getFirstChild
val result = e.replace(replacement).asInstanceOf[ScParameterClause]
result.parameters.head.typeElement.get
}
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/transformation/annotations/AddTypeToFunctionParameter.scala
|
Scala
|
apache-2.0
| 1,140 |
package me.yingrui.segment.neural
import me.yingrui.segment.math.Matrix
object SoftmaxLayer {
class BPSoftmaxLayer(var weight: Matrix, var bias: Matrix, val immutable: Boolean) extends BPLayer {
def layer = new SingleLayer(weight, Softmax(), bias, false)
def size = layer.size
def calculateDelta(actual: Matrix, error: Matrix): Matrix = error
}
def apply(weight: Matrix): BPLayer = new BPSoftmaxLayer(weight, Matrix(1, weight.col), false)
def apply(weight: Matrix, immutable: Boolean): BPLayer = new BPSoftmaxLayer(weight, Matrix(1, weight.col), immutable)
}
|
yingrui/mahjong
|
lib-segment/src/main/scala/me/yingrui/segment/neural/SoftmaxLayer.scala
|
Scala
|
gpl-3.0
| 587 |
/*
* Copyright 2013 - 2015, Daniel Krzywicki <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package pl.edu.agh.scalamas.random
import org.apache.commons.math3.random.RandomDataGenerator
import pl.edu.agh.scalamas.app.AgentRuntimeComponent
import scala.util.Random
/**
* Random Generator Component that can be used in concurrent applications.
* Separate RNG are kept in thread locals for every accessing thread.
*
* They are seeded from a separate, shared RNG at first access. Therefore, the results of the application
* can be repeatable if the thread access pattern to the RNGs are repeatable.
*/
trait ConcurrentRandomGeneratorComponent extends RandomGeneratorComponent {
this: AgentRuntimeComponent =>
def randomData = ConcurrentRandomGeneratorComponent.current
object ConcurrentRandomGeneratorComponent {
private[this] val seedSource = new Random(globalSeed)
private[this] val localRandom = new ThreadLocal[RandomDataGenerator] {
override protected def initialValue() = new RandomDataGenerator(randomGeneratorFactory(seedSource.nextLong()))
}
def current = localRandom.get
}
}
|
ros3n/IntOb
|
core/src/main/scala/pl/edu/agh/scalamas/random/ConcurrentRandomGeneratorComponent.scala
|
Scala
|
mit
| 2,187 |
package services.crunch
import controllers.ArrivalGenerator
import drt.shared.FlightsApi.Flights
import drt.shared.PaxTypesAndQueues._
import drt.shared.SplitRatiosNs.{SplitRatio, SplitRatios, SplitSources}
import drt.shared.Terminals.T1
import drt.shared._
import server.feeds.ArrivalsFeedSuccess
import services.{OptimiserWithFlexibleProcessors, SDate}
import scala.collection.immutable.{List, Seq, SortedMap}
import scala.concurrent.duration._
class CrunchEgateBanksSpec extends CrunchTestLike {
sequential
isolated
"Egate banks handling " >> {
"Given flights with 20 very expensive passengers and splits to eea desk & egates " +
"When I ask for desk recs " +
"Then I should see lower egates recs by a factor of 7 (rounded up)" >> {
val scheduled00 = "2017-01-01T00:00Z"
val scheduled = "2017-01-01T00:00Z"
val flights = Flights(List(
ArrivalGenerator.arrival(schDt = scheduled00, iata = "BA0001", terminal = T1, actPax = Option(20))
))
val fiveMinutes = 600d / 60
val crunch = runCrunchGraph(TestConfig(
now = () => SDate(scheduled),
airportConfig = defaultAirportConfig.copy(
queuesByTerminal = SortedMap(T1 -> Seq(Queues.EeaDesk, Queues.EGate)),
terminalPaxSplits = Map(T1 -> SplitRatios(
SplitSources.TerminalAverage,
SplitRatio(eeaMachineReadableToDesk, 0.5),
SplitRatio(eeaMachineReadableToEGate, 0.5)
)),
terminalProcessingTimes = Map(T1 -> Map(
eeaMachineReadableToDesk -> fiveMinutes,
eeaMachineReadableToEGate -> fiveMinutes
)),
minMaxDesksByTerminalQueue24Hrs = Map(T1 -> Map(
Queues.EeaDesk -> ((List.fill[Int](24)(0), List.fill[Int](24)(20))),
Queues.EGate -> ((List.fill[Int](24)(0), List.fill[Int](24)(20))))),
slaByQueue = Map(Queues.EeaDesk -> 25, Queues.EGate -> 25),
minutesToCrunch = 30
),
cruncher = OptimiserWithFlexibleProcessors.crunch
))
offerAndWait(crunch.liveArrivalsInput, ArrivalsFeedSuccess(flights))
val expected = Map(T1 -> Map(
Queues.EeaDesk -> Seq.fill(15)(7),
Queues.EGate -> Seq.fill(15)(1)
))
crunch.portStateTestProbe.fishForMessage(1 seconds) {
case ps: PortState =>
val resultSummary = deskRecsFromPortState(ps, 15)
resultSummary == expected
}
success
}
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
server/src/test/scala/services/crunch/CrunchEgateBanksSpec.scala
|
Scala
|
apache-2.0
| 2,473 |
package chessagents
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import swing._
import event._
import Swing._
import swing.Publisher
import swing.BorderPanel.Position._
import swing.event.Event
import swing.Swing.onEDT
import javax.swing.ImageIcon;
import javax.swing.JOptionPane;
import java.awt.image.BufferStrategy;
import java.awt.Color
import chessagents.protocols.ControllerGuiProtocol._
import chessagents.protocols.GuiHumanPlayerProtocol._
class GuiActor(gameControllerRef: ActorRef) extends Actor {
var gameInProgress = false
var choosingMove = false
var from_x = 0
var from_y = 0
var to_x = 0
var to_y = 0
var playerWhite: ActorRef = null
var playerBlack: ActorRef = null
var currentHumanPlayer: ActorRef = null
object PlayerType extends Enumeration {
type PlayerType = Value
val Human, AI = Value
}
import PlayerType._
val mainFrame = new MainFrame {
title = "Chezz Agentz"
val gameTypeLabel = new Label {
text = "Start game: "
}
val startHumanVsHumanBtn = Button("Human vs Human!") {
stopCurrentGame
setupNewGameWithPlayers(Human, Human)
}
val startHumanVsAIBtn = Button("Human vs AI!") {
stopCurrentGame
setupNewGameWithPlayers(Human, AI)
}
val startAIVsAIBtn = Button("AI vs AI!") {
stopCurrentGame
setupNewGameWithPlayers(AI, AI)
}
val speedLabel = new Label {
text = "Move delay: "
}
object speedSlider extends Slider {
min = 0
max = 5000
value = 1000
majorTickSpacing = 500
}
val textArea = new TextArea(5, 30) {
}
val menuPanel = new FlowPanel {
contents += gameTypeLabel
contents += startHumanVsHumanBtn
contents += startHumanVsAIBtn
contents += startAIVsAIBtn
contents += speedLabel
contents += speedSlider
contents += new ScrollPane(textArea)
preferredSize = new Dimension(300, 150)
}
val gridPanel = new GridPanel(8, 8) {
val dim = new Dimension(60, 60)
for(i <- 0 to 7; j <- 0 to 7) {
contents += new Button {
text = ""
preferredSize = dim
if((i + j) % 2 == 0)
background = Color.gray
else
background = Color.darkGray
action = Action("") {
clickBtn(i, j)
}
}
}
}
contents = new BorderPanel {
layout(menuPanel) = North
layout(gridPanel) = South
}
}
def receive = {
// Gui Controller protocol:
case ShowGui =>
showGui
log("Welcome to chess agents!")
case Check =>
log("Check!")
case GameOverGui =>
log("Game over!")
gameInProgress = false
case ResetChessboardGui =>
for(x <- 0 to 7; y <- 0 to 7) {
val b = mainFrame.gridPanel.contents(8 * x + y).asInstanceOf[Button]
b.icon = null
}
sender() ! GuiUpdated
case MovePieceGui(from_x: Int, from_y: Int, to_x: Int, to_y: Int) =>
val from = mainFrame.gridPanel.contents(8 * (from_x) + from_y)
.asInstanceOf[Button]
val to = mainFrame.gridPanel.contents(8 * (to_x) + to_y)
.asInstanceOf[Button]
to.icon = from.icon
from.icon = null
Thread.sleep(mainFrame.speedSlider.value)
sender() ! GuiUpdated
case SpawnPieceGui(ch, isWhite, x, y) =>
val b = mainFrame.gridPanel.contents(8 * x + y).asInstanceOf[Button]
if(ch != " ") {
var color: String = null
var pieceType = ch.toUpperCase()
if(isWhite == true) {
color = "white"
} else {
color = "black"
}
b.icon = new ImageIcon("resources/" + pieceType + "_" + color + ".png")
}
sender() ! GuiUpdated
// Gui Human Player protocol:
case ShowWinMsg => log("You win!")
case ShowLoseMsg => log("You lose.")
//case ShowCheckMsg => log("Checked!")
case ShowYourMoveMsg =>
currentHumanPlayer = sender()
log("Your turn, "
+ currentHumanPlayer.path.name)
case ShowInvalidMoveMsg => log("Invalid move.")
}
def log(msg: String) = {
mainFrame.textArea.text += msg + "\\n"
}
def showGui = {
mainFrame.visible = true
}
def clickBtn(x: Int, y: Int) = {
println("Clicked " + x + ", " + y)
if(choosingMove == false) {
choosingMove = true
from_x = x
from_y = y
}
else {
choosingMove = false
to_x = x
to_y = y
currentHumanPlayer ! MoveViaGui(from_x, from_y, to_x, to_y)
}
}
def stopCurrentGame = {
if(playerWhite != null) context.stop(playerWhite)
if(playerBlack != null) context.stop(playerBlack)
}
def setupNewGameWithPlayers(whiteHumanity: PlayerType,
blackHumanity: PlayerType) = {
setupPlayers(whiteHumanity, blackHumanity)
gameControllerRef ! StartGameController(playerWhite, playerBlack)
gameInProgress = true
}
def setupPlayers(whiteHumanity: PlayerType,
blackHumanity: PlayerType) = {
playerWhite = spawnPlayer("white", whiteHumanity == Human)
playerBlack = spawnPlayer("black", blackHumanity == Human)
}
def spawnPlayer(name: String, isHuman: Boolean) = {
if(isHuman)
context.actorOf(Props(new HumanPlayer), name)
else
context.actorOf(Props(new Player), name)
}
}
|
m-kostrzewa/ChessAgents
|
src/chessagents/GuiActor.scala
|
Scala
|
mit
| 6,403 |
package org.retistruen
import java.math.BigInteger
/** Type-class for things that can be read from String */
abstract class ReadableFromString[T] {
def read(s: String): T
}
/** Basic [[org.retistruen.ReadableFromString]] implementations */
object ReadableFromString {
implicit val DoubleIsReadableFromString = new ReadableFromString[Double] { def read(s: String) = s.toDouble }
implicit val IntIsReadableFromString = new ReadableFromString[Int] { def read(s: String) = s.toInt }
implicit val BigDecimalIsReadableFromString = new ReadableFromString[BigDecimal] { def read(s: String) = BigDecimal(s) }
implicit val BigIntFromString = new ReadableFromString[BigInt] { def read(s: String) = BigInt(s) }
}
abstract class OpenSource[T: ReadableFromString] extends Source[T] {
val readableFromString = implicitly[ReadableFromString[T]]
def <<(value: String) = emit(readableFromString.read(value))
}
|
plalloni/retistruen
|
src/main/scala/org/retistruen/OpenSource.scala
|
Scala
|
mit
| 913 |
package com.codebook.akka
import akka.event.NoLogging
import akka.http.scaladsl.model.ContentTypes._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.util.Timeout
import com.codebook.akka.model.User
import org.scalatest.{Matchers, WordSpec}
import scala.concurrent.duration.DurationInt
class ServicesSpec extends WordSpec with Matchers with ScalatestRouteTest with Services {
implicit val timeout = Timeout(1 minute)
implicit val log = NoLogging
"User" should {
s"return JSON for GET request to /api/user" in {
Get(s"/api/user") ~> routes ~> check {
status shouldBe OK
contentType shouldBe `application/json`
responseAs[User] shouldBe JohnDoe
}
}
s"return String for POST request to /api/user" in {
Post(s"/api/user", JohnDoe) ~> routes ~> check {
status shouldBe OK
contentType shouldBe `text/plain(UTF-8)`
responseAs[String].length should be > 0
}
}
}
}
|
flopezlasanta/akka-services
|
src/test/scala/com/codebook/akka/ServicesSpec.scala
|
Scala
|
mit
| 1,019 |
package im.actor.server.api.rpc.service
import im.actor.api.rpc._
import im.actor.api.rpc.counters.UpdateCountersChanged
import im.actor.api.rpc.groups._
import im.actor.api.rpc.messaging._
import im.actor.api.rpc.misc.ResponseSeqDate
import im.actor.api.rpc.peers.{ ApiPeer, ApiOutPeer, ApiPeerType, ApiUserOutPeer }
import im.actor.server._
import im.actor.server.acl.ACLUtils
import im.actor.server.api.rpc.service.groups.{ GroupInviteConfig, GroupRpcErrors, GroupsServiceImpl }
import im.actor.server.group.GroupServiceMessages
import im.actor.server.model.PeerType
import im.actor.server.persist.HistoryMessageRepo
import slick.dbio.DBIO
import scala.util.Random
final class GroupsServiceSpec
extends BaseAppSuite
with GroupsServiceHelpers
with MessageParsing
with MessagingSpecHelpers
with ImplicitSequenceService
with ImplicitAuthService
with ImplicitSessionRegion
with SeqUpdateMatchers
with PeersImplicits {
behavior of "GroupsService"
it should "send invites on group creation" in sendInvitesOnCreate
it should "send updates on group invite" in sendUpdatesOnInvite
it should "send updates ot title change" in sendUpdatesOnTitleChange
it should "persist service messages in history" in e4
it should "generate invite url for group member" in e5
it should "not generate invite url for group non members" in e6
it should "revoke invite token and generate new token for group member" in e7
it should "allow user to join group by correct invite link and send correct updates" in e8
it should "not allow group member to join group by invite link" in e9
it should "send updates on user join" in e10
it should "send UserInvited and UserJoined on user's first MessageRead" in e11
it should "receive userJoined once" in userJoinedOnce
it should "not allow to create group with empty name" in e13
it should "send UpdateChatGroupsChanged to all group members on group creation" in updateChatGroupsChanged
"Creator of group" should "be groupAdminColor" in e14
"MakeUserAdmin" should "allow group member to become admin" in e15
it should "forbid to perform action by non-admin" in e16
it should "return error when user is already admin" in e17
"EditGroupAbout" should "allow group admin to change 'about'" in e18
it should "forbid to change 'about' by non-admin" in e19
it should "set 'about' to empty when None comes" in e20
it should "forbid to set invalid 'about' field (empty, or longer than 255 characters)" in e21
"EditGroupTopic" should "allow any group member to change topic" in e22
it should "forbid to set invalid topic (empty, or longer than 255 characters)" in e23
it should "set topic to empty when None comes" in e24
"Leave group" should "mark messages read in left user dialog" in e25
"Kick user" should "mark messages read in kicked user dialog" in e26
"Kick user" should "mark messages read in public group" in markReadOnKickInPublic
"Kicked user" should "not be able to write to group" in e27
val groupInviteConfig = GroupInviteConfig("http://actor.im")
implicit val messagingService = messaging.MessagingServiceImpl()
implicit val service = new GroupsServiceImpl(groupInviteConfig)
def sendInvitesOnCreate() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, _, _, _) = createUser()
val sessionId = createSessionId()
implicit val clientData = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer
expectUpdate(classOf[UpdateChatGroupsChanged])(identity)
expectUpdate(classOf[UpdateGroupUserInvitedObsolete])(identity)
expectUpdate(classOf[UpdateGroupInviteObsolete])(identity)
whenReady(db.run(persist.GroupUserRepo.findUserIds(groupOutPeer.groupId))) { userIds ⇒
userIds.toSet shouldEqual Set(user1.id, user2.id)
}
}
def sendUpdatesOnInvite() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
val clientData1 = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
{
implicit val clientData = clientData1
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong(), user2OutPeer, Vector.empty)) { resp ⇒
resp should matchPattern {
case Ok(ResponseSeqDate(3, _, _)) ⇒
}
}
expectUpdate(classOf[UpdateGroupUserInvitedObsolete])(identity)
expectUpdate(classOf[UpdateGroupInviteObsolete])(identity)
expectUpdate(classOf[UpdateChatGroupsChanged])(identity)
}
{
implicit val clientData = clientData2
expectUpdate(classOf[UpdateGroupInviteObsolete])(identity)
//UpdateChatGroupsChanged will come after creation of dialog
}
}
def sendUpdatesOnTitleChange() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
val clientData1 = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
{
implicit val clientData = clientData1
val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer
whenReady(service.handleEditGroupTitle(groupOutPeer, Random.nextLong(), "Very fun group", Vector.empty)) { resp ⇒
resp should matchPattern {
case Ok(ResponseSeqDate(4, _, _)) ⇒
}
}
expectUpdate(classOf[UpdateChatGroupsChanged])(identity)
expectUpdate(classOf[UpdateGroupUserInvitedObsolete])(identity)
expectUpdate(classOf[UpdateGroupInviteObsolete])(identity)
expectUpdate(classOf[UpdateGroupTitleChangedObsolete])(identity)
}
{
implicit val clientData = clientData2
expectUpdate(classOf[UpdateGroupInviteObsolete])(identity)
expectUpdate(classOf[UpdateGroupTitleChangedObsolete])(identity)
//UpdateChatGroupsChanged will come after creation of dialog
}
}
def e4() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
implicit val clientData = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
whenReady(db.run(persist.HistoryMessageRepo.find(user1.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 1
serviceMessages
.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual
Vector(Right(GroupServiceMessages.groupCreated))
}
whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong(), user2OutPeer, Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
whenReady(db.run(persist.HistoryMessageRepo.find(user1.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 2
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual
Vector(
Right(GroupServiceMessages.userInvited(user2.id)),
Right(GroupServiceMessages.groupCreated)
)
}
whenReady(db.run(persist.HistoryMessageRepo.find(user2.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 1
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual
Vector(Right(GroupServiceMessages.userInvited(user2.id)))
}
}
//TODO: is it ok to remove avatar of group without avatar
whenReady(service.handleRemoveGroupAvatar(groupOutPeer, Random.nextLong(), Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
Thread.sleep(500)
whenReady(db.run(persist.HistoryMessageRepo.find(user1.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 3
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual
Vector(
Right(GroupServiceMessages.changedAvatar(None)),
Right(GroupServiceMessages.userInvited(user2.id)),
Right(GroupServiceMessages.groupCreated)
)
}
whenReady(db.run(persist.HistoryMessageRepo.find(user2.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 2
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual
Vector(
Right(GroupServiceMessages.changedAvatar(None)),
Right(GroupServiceMessages.userInvited(user2.id))
)
}
}
whenReady(service.handleEditGroupTitle(groupOutPeer, Random.nextLong(), "Not fun group", Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
whenReady(db.run(persist.HistoryMessageRepo.find(user1.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 4
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.changedTitle("Not fun group"))
}
}
whenReady(service.handleLeaveGroup(groupOutPeer, Random.nextLong(), Vector.empty)(ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42))))) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
whenReady(db.run(persist.HistoryMessageRepo.find(user1.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 5
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.userLeft(user2.id))
}
}
whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong(), user2OutPeer, Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
whenReady(db.run(persist.HistoryMessageRepo.find(user1.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 6
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.userInvited(user2.id))
}
}
whenReady(service.handleKickUser(groupOutPeer, Random.nextLong(), user2OutPeer, Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
whenReady(db.run(persist.HistoryMessageRepo.find(user1.id, model.Peer(PeerType.Group, groupOutPeer.groupId)))) { serviceMessages ⇒
serviceMessages should have length 7
serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.userKicked(user2.id))
}
}
}
def e5() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
implicit val clientData = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer
{
implicit val clientData = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
var expUrl: String = ""
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
expUrl = url
}
}
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
url shouldEqual expUrl
}
}
}
{
implicit val clientData = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
var expUrl: String = ""
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
expUrl = url
}
}
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
url shouldEqual expUrl
}
}
}
val findTokens =
for {
tokens ← DBIO.sequence(List(
persist.GroupInviteTokenRepo.find(groupOutPeer.groupId, user1.id),
persist.GroupInviteTokenRepo.find(groupOutPeer.groupId, user2.id)
))
} yield tokens.flatten
whenReady(db.run(findTokens)) { tokens ⇒
tokens should have length 2
tokens.foreach(_.groupId shouldEqual groupOutPeer.groupId)
tokens.map(_.creatorId) should contain allOf (user1.id, user2.id)
}
}
def e6() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
implicit val clientData = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
{
implicit val clientData = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
resp should matchForbidden
}
}
}
def e7() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
implicit val clientData = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
var expUrl: String = ""
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
expUrl = url
}
}
whenReady(service.handleRevokeInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
url should not equal expUrl
}
}
whenReady(db.run(persist.GroupInviteTokenRepo.find(groupOutPeer.groupId, user1.id))) { tokens ⇒
tokens should have length 1
}
}
def e8() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
implicit val clientData = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Invite Fun group", Set.empty).groupPeer
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
{
implicit val clientData = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
whenReady(service.handleJoinGroup(url, Vector.empty)) { resp ⇒
resp should matchPattern {
case Ok(ResponseJoinGroup(_, _, _, _, _, _, _)) ⇒
}
}
}
}
}
whenReady(db.run(persist.GroupUserRepo.findUserIds(groupOutPeer.groupId))) { userIds ⇒
userIds should have length 2
userIds should contain allOf (user1.id, user2.id)
}
}
def e9() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
implicit val clientData = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
{
implicit val clientData = ClientData(authId2, createSessionId(), Some(AuthData(user2.id, authSid2, 42)))
val outPeer = ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash)
whenReady(messagingService.handleMessageRead(outPeer, System.currentTimeMillis()))(_ ⇒ ())
whenReady(service.handleJoinGroup(url, Vector.empty)) { resp ⇒
inside(resp) {
case Error(err) ⇒ err shouldEqual GroupRpcErrors.YouAlreadyAMember
}
}
}
}
}
}
def e10() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
implicit val clientData1 = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val createGroupResponse = createGroup("Invite Fun group", Set.empty)
val groupOutPeer = createGroupResponse.groupPeer
whenReady(service.handleGetGroupInviteUrl(groupOutPeer)(clientData1)) { resp ⇒
inside(resp) {
case Ok(ResponseInviteUrl(url)) ⇒
url should startWith(groupInviteConfig.baseUrl)
whenReady(service.handleJoinGroup(url, Vector.empty)(clientData2))(_ ⇒ ())
expectUpdate(createGroupResponse.seq, classOf[UpdateMessage]) { upd ⇒
upd.message shouldEqual GroupServiceMessages.userJoined
}
expectUpdate(createGroupResponse.seq, classOf[UpdateCountersChanged])(identity)
}
}
whenReady(db.run(persist.GroupUserRepo.findUserIds(groupOutPeer.groupId))) { userIds ⇒
userIds should have length 2
userIds should contain allOf (user1.id, user2.id)
}
}
def e11() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
val clientData1 = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = {
implicit val clientData = clientData1
val groupOutPeer = createGroup("Invite Fun group", Set.empty).groupPeer
whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong, user2OutPeer, Vector.empty)) { _ ⇒ }
sendMessageToGroup(groupOutPeer.groupId, ApiTextMessage("This is message to initialize group dialog", Vector.empty, None))
groupOutPeer
}
{
implicit val clientData = clientData2
// send it twice to ensure that ServiceMessage isn't sent twice
whenReady(messagingService.handleMessageRead(ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash), System.currentTimeMillis))(identity)
whenReady(messagingService.handleMessageRead(ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash), System.currentTimeMillis))(identity)
}
{
implicit val clientData = clientData1
expectUpdate(classOf[UpdateGroupInviteObsolete])(identity)
expectUpdate(classOf[UpdateGroupUserInvitedObsolete])(identity)
expectUpdate(classOf[UpdateMessageRead])(identity)
expectUpdate(classOf[UpdateCountersChanged])(identity)
expectUpdate(classOf[UpdateMessage])(identity)
}
}
def userJoinedOnce() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, createSessionId(), Some(AuthData(user2.id, authSid2, 42)))
val groupOutPeer = {
implicit val clientData = clientData1
createGroup("Fun group", Set.empty).groupPeer
}
val url = whenReady(service.handleGetGroupInviteUrl(groupOutPeer)(clientData1)) { _.toOption.get.url }
whenReady(service.handleJoinGroup(url, Vector.empty)(clientData2)) { resp ⇒
resp should matchPattern {
case Ok(ResponseJoinGroup(_, _, _, _, _, _, _)) ⇒
}
}
val peer = ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash)
whenReady(messagingService.handleSendMessage(peer, 22324L, ApiTextMessage("hello", Vector.empty, None), None, None)(clientData1)) { _ ⇒ }
whenReady(messagingService.handleMessageRead(peer, System.currentTimeMillis)(clientData2)) { _ ⇒ }
{
implicit val clientData = clientData1
expectUpdate(classOf[UpdateChatGroupsChanged])(identity)
expectUpdate(classOf[UpdateGroupInviteObsolete])(identity)
expectUpdate(classOf[UpdateMessageSent])(identity)
expectUpdate(classOf[UpdateMessage])(identity)
expectUpdate(classOf[UpdateCountersChanged])(identity)
expectUpdate(classOf[UpdateMessageRead])(identity)
}
}
def e13() = {
val (user1, authId1, authSid1, _) = createUser()
implicit val clientData = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
whenReady(service.handleCreateGroupObsolete(1L, "", Vector.empty)) { resp ⇒
inside(resp) {
case Error(GroupRpcErrors.WrongGroupTitle) ⇒
}
}
}
def updateChatGroupsChanged() = {
val sessionId = createSessionId()
val users = for (i ← 1 to 5) yield createUser()
val userIds = (users map (_._1.id)).toSet
val groupPeer = {
val (user, authId, authSid, _) = users.head
implicit val cd = ClientData(authId, sessionId, Some(AuthData(user.id, authSid, 42)))
createGroup("Fun group", userIds).groupPeer
}
users foreach {
case (user, authId, authSid, _) ⇒
implicit val cd = ClientData(authId, sessionId, Some(AuthData(user.id, authSid, 42)))
expectUpdate(classOf[UpdateChatGroupsChanged]) { u ⇒
val groupDialogs = u.dialogs.find(_.key == "groups")
groupDialogs shouldBe defined
val dialogsShort = groupDialogs.get.dialogs
dialogsShort should have length 1
dialogsShort.head.peer shouldEqual ApiPeer(ApiPeerType.Group, groupPeer.groupId)
}
}
}
def e14() = {
val (user1, authId1, authSid1, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
whenReady(db.run(persist.GroupUserRepo.find(groupOutPeer.groupId, user1.id))) { groupUser ⇒
groupUser shouldBe defined
groupUser.get.isAdmin shouldEqual true
}
}
def e15() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer
whenReady(service.handleMakeUserAdminObsolete(groupOutPeer, user2OutPeer)) { resp ⇒
inside(resp) {
case Ok(ResponseMakeUserAdminObsolete(members, _, _)) ⇒
members.find(_.userId == user2.id) foreach (_.isAdmin shouldEqual Some(true))
}
}
}
def e16() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val (user3, authId3, authSid3, _) = createUser()
val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, createSessionId(), Some(AuthData(user2.id, authSid2, 42)))
val user3Model = getUserModel(user3.id)
val user3AccessHash = ACLUtils.userAccessHash(clientData2.authId, user3.id, user3Model.accessSalt)
val user3OutPeer = ApiUserOutPeer(user3.id, user3AccessHash)
val groupOutPeer = {
implicit val clientData = clientData1
createGroup("Fun group", Set(user2.id)).groupPeer
}
whenReady(service.handleMakeUserAdmin(groupOutPeer, user3OutPeer)(clientData2)) { resp ⇒
resp shouldEqual Error(CommonRpcErrors.forbidden("Only admin can perform this action."))
}
}
def e17() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer
whenReady(service.handleMakeUserAdminObsolete(groupOutPeer, user2OutPeer)) { resp ⇒
resp should matchPattern {
case Ok(_: ResponseMakeUserAdminObsolete) ⇒
}
}
whenReady(service.handleMakeUserAdmin(groupOutPeer, user2OutPeer)) { resp ⇒
resp shouldEqual Error(GroupRpcErrors.UserAlreadyAdmin)
}
}
def e18() = {
val (user1, authId1, authSid1, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
val about = Some("It is group for fun")
whenReady(service.handleEditGroupAbout(groupOutPeer, 1L, about, Vector.empty)) { resp ⇒
resp should matchPattern {
case Ok(_: ResponseSeqDate) ⇒
}
}
whenReady(db.run(persist.GroupRepo.find(groupOutPeer.groupId))) { group ⇒
group.get.about shouldEqual about
}
}
def e19() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, createSessionId(), Some(AuthData(user2.id, authSid2, 42)))
val groupOutPeer = {
implicit val clientData = clientData1
createGroup("Fun group", Set(user2.id)).groupPeer
}
whenReady(service.handleEditGroupAbout(groupOutPeer, 1L, Some("It is group for fun"), Vector.empty)(clientData2)) { resp ⇒
resp shouldEqual Error(CommonRpcErrors.forbidden("Only admin can perform this action."))
}
}
def e20() = {
val (user1, authId1, authSid1, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
whenReady(service.handleEditGroupAbout(groupOutPeer, 1L, None, Vector.empty)) { resp ⇒
resp should matchPattern {
case Ok(_: ResponseSeqDate) ⇒
}
}
whenReady(db.run(persist.GroupRepo.find(groupOutPeer.groupId))) { group ⇒
group.get.about shouldEqual None
}
}
def e21() = {
val (user1, authId1, authSid1, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
val longAbout = 1 to 300 map (e ⇒ ".") mkString ""
whenReady(service.handleEditGroupAbout(groupOutPeer, 1L, Some(longAbout), Vector.empty)) { resp ⇒
resp shouldEqual Error(GroupRpcErrors.AboutTooLong)
}
val emptyAbout = ""
whenReady(service.handleEditGroupAbout(groupOutPeer, 1L, Some(emptyAbout), Vector.empty)) { resp ⇒
resp shouldEqual Error(GroupRpcErrors.AboutTooLong)
}
whenReady(db.run(persist.GroupRepo.find(groupOutPeer.groupId))) { group ⇒
group.get.about shouldEqual None
}
}
def e22() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, createSessionId(), Some(AuthData(user2.id, authSid2, 42)))
val groupOutPeer = {
implicit val cd = clientData1
createGroup("Fun group", Set(user2.id)).groupPeer
}
val topic1 = Some("Fun stufff")
whenReady(service.handleEditGroupTopic(groupOutPeer, 1L, topic1, Vector.empty)(clientData1)) { resp ⇒
resp should matchPattern {
case Ok(_: ResponseSeqDate) ⇒
}
}
val topic2 = Some("Fun stuff. Typo!")
whenReady(service.handleEditGroupTopic(groupOutPeer, 2L, topic2, Vector.empty)(clientData2)) { resp ⇒
resp should matchPattern {
case Ok(_: ResponseSeqDate) ⇒
}
}
whenReady(db.run(persist.GroupRepo.find(groupOutPeer.groupId))) { group ⇒
group.get.topic shouldEqual topic2
}
}
def e23() = {
val (user1, authId1, authSid1, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
val longTopic = 1 to 300 map (e ⇒ ".") mkString ""
whenReady(service.handleEditGroupTopic(groupOutPeer, 1L, Some(longTopic), Vector.empty)) { resp ⇒
resp shouldEqual Error(GroupRpcErrors.TopicTooLong)
}
val emptyTopic = ""
whenReady(service.handleEditGroupTopic(groupOutPeer, 2L, Some(emptyTopic), Vector.empty)) { resp ⇒
resp shouldEqual Error(GroupRpcErrors.TopicTooLong)
}
whenReady(db.run(persist.GroupRepo.find(groupOutPeer.groupId))) { group ⇒
group.get.topic shouldEqual None
}
}
def e24() = {
val (user1, authId1, authSid1, _) = createUser()
implicit val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer
whenReady(service.handleEditGroupTopic(groupOutPeer, 1L, None, Vector.empty)) { resp ⇒
resp should matchPattern {
case Ok(_: ResponseSeqDate) ⇒
}
}
whenReady(db.run(persist.GroupRepo.find(groupOutPeer.groupId))) { group ⇒
group.get.topic shouldEqual None
}
}
def e25() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val clientData1 = ClientData(authId1, createSessionId(), Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, createSessionId(), Some(AuthData(user2.id, authSid2, 42)))
val groupOutPeer = {
implicit val clientData = clientData1
createGroup("Fun group", Set(user2.id)).groupPeer
}
val outPeer = ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash)
for (_ ← 1 to 6) {
implicit val clientData = clientData1
whenReady(messagingService.handleSendMessage(outPeer, Random.nextLong(), ApiTextMessage("hello", Vector.empty, None), None, None)) { _ ⇒ }
}
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount should be > 6
}
whenReady(service.handleLeaveGroup(groupOutPeer, Random.nextLong(), Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
}
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldEqual 0
}
}
for (_ ← 1 to 6) {
implicit val clientData = clientData1
whenReady(messagingService.handleSendMessage(outPeer, Random.nextLong(), ApiTextMessage("bye left user", Vector.empty, None), None, None)) { _ ⇒ }
}
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldEqual 0
}
}
}
def e26() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
val clientData1 = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(authId1, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = {
implicit val clientData = clientData1
createGroup("Fun group", Set(user2.id)).groupPeer
}
val outPeer = ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash)
for (_ ← 1 to 6) {
implicit val clientData = clientData1
whenReady(messagingService.handleSendMessage(outPeer, Random.nextLong(), ApiTextMessage("hello", Vector.empty, None), None, None)) { _ ⇒ }
}
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount > 6 shouldEqual true
}
}
{
implicit val clientData = clientData1
whenReady(service.handleKickUser(groupOutPeer, Random.nextLong(), user2OutPeer, Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
}
}
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldEqual 0
}
}
for (_ ← 1 to 6) {
implicit val clientData = clientData1
whenReady(messagingService.handleSendMessage(outPeer, Random.nextLong(), ApiTextMessage("bye kicked user", Vector.empty, None), None, None)) { _ ⇒ }
}
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldEqual 0
}
}
}
def markReadOnKickInPublic() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
val clientData1 = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(authId1, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = {
implicit val clientData = clientData1
createPubGroup("Public group", "desc", Set(user2.id)).groupPeer
}
val outPeer = ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash)
Thread.sleep(500)
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldBe 2
}
}
for (_ ← 1 to 6) {
implicit val clientData = clientData1
whenReady(messagingService.handleSendMessage(outPeer, Random.nextLong(), ApiTextMessage("hello public", Vector.empty, None), None, None)) { _ ⇒ }
}
Thread.sleep(2000)
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldBe 8
}
}
{
implicit val clientData = clientData1
whenReady(service.handleKickUser(groupOutPeer, Random.nextLong(), user2OutPeer, Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
}
}
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldEqual 0
}
}
for (_ ← 1 to 6) {
implicit val clientData = clientData1
whenReady(messagingService.handleSendMessage(outPeer, Random.nextLong(), ApiTextMessage("bye kicked user", Vector.empty, None), None, None)) { _ ⇒ }
}
{
implicit val clientData = clientData2
whenReady(messagingService.handleLoadDialogs(Long.MaxValue, 100, Vector.empty)) { resp ⇒
val dialog = resp.toOption.get.dialogs.head
dialog.unreadCount shouldEqual 0
}
}
}
def e27() = {
val (user1, authId1, authSid1, _) = createUser()
val (user2, authId2, authSid2, _) = createUser()
val sessionId = createSessionId()
val clientData1 = ClientData(authId1, sessionId, Some(AuthData(user1.id, authSid1, 42)))
val clientData2 = ClientData(authId2, sessionId, Some(AuthData(user2.id, authSid2, 42)))
val user2Model = getUserModel(user2.id)
val user2AccessHash = ACLUtils.userAccessHash(authId1, user2.id, user2Model.accessSalt)
val user2OutPeer = ApiUserOutPeer(user2.id, user2AccessHash)
val groupOutPeer = {
implicit val clientData = clientData1
createGroup("Fun group", Set(user2.id)).groupPeer
}
val outPeer = ApiOutPeer(ApiPeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash)
for (_ ← 1 to 6) {
implicit val clientData = clientData2
sendMessageToGroup(groupOutPeer.groupId, ApiTextMessage("hello", Vector.empty, None))
}
{
implicit val clientData = clientData1
whenReady(service.handleKickUser(groupOutPeer, Random.nextLong(), user2OutPeer, Vector.empty)) { resp ⇒
resp should matchPattern { case Ok(_) ⇒ }
}
}
val user1Seq = whenReady(sequenceService.handleGetState(Vector.empty)(clientData1))(_.toOption.get.seq)
val user2Seq = whenReady(sequenceService.handleGetState(Vector.empty)(clientData2))(_.toOption.get.seq)
{
implicit val clientData = clientData2
val randomId = Random.nextLong()
whenReady(messagingService.handleSendMessage(outPeer, randomId, ApiTextMessage("WTF? am i kicked?!!?!?!?!?!?!?!?!??!?!?!", Vector.empty, None), None, None)) { resp ⇒
inside(resp) {
case Error(err) ⇒ err.code shouldEqual 403
}
expectNoUpdate(user2Seq, classOf[UpdateMessageSent])
whenReady(db.run(HistoryMessageRepo.find(user2.id, outPeer.asModel, Set(randomId)))) { ms ⇒
ms shouldBe empty
}
whenReady(db.run(HistoryMessageRepo.find(user1.id, outPeer.asModel, Set(randomId)))) { ms ⇒
ms shouldBe empty
}
}
}
{
implicit val clientData = clientData1
expectNoUpdate(user1Seq, classOf[UpdateMessage])
expectNoUpdate(user1Seq, classOf[UpdateCountersChanged])
}
}
}
|
ljshj/actor-platform
|
actor-server/actor-tests/src/test/scala/im/actor/server/api/rpc/service/GroupsServiceSpec.scala
|
Scala
|
mit
| 41,633 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js tools **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.tools.classpath.builder
import scala.collection.mutable
import org.scalajs.core.tools.io._
import org.scalajs.core.tools.jsdep.JSDependencyManifest
import org.scalajs.core.tools.classpath._
/** reads a ScalaJS library JAR into a CP
* - IR files go to scalaJSCode
* - JS files go to availableLibs
* - Reads a potential top-level JS_DEPENDENCIES file
*/
trait AbstractJarLibClasspathBuilder extends JarTraverser {
private val irFiles = mutable.ListBuffer.empty[VirtualScalaJSIRFile]
private val jsFiles = mutable.Map.empty[String, VirtualJSFile]
private var dependency: Option[JSDependencyManifest] = None
def build(jar: File): PartialClasspath = {
val v = traverseJar(jar)
new PartialClasspath(dependency.toList,
jsFiles.toMap, irFiles.toList, Some(v))
}
override protected def handleIR(relPath: String,
ir: => VirtualScalaJSIRFile): Unit = {
// We don't need to implement shadowing here: We have only a single JAR
irFiles += ir
}
override protected def handleJS(relPath: String,
js: => VirtualJSFile): Unit = {
val file = js
if (!jsFiles.contains(relPath))
jsFiles += relPath -> file
}
override protected def handleDepManifest(m: => JSDependencyManifest): Unit = {
if (dependency.isDefined)
sys.error("A JAR cannot have multiple JS dependency manifests")
dependency = Some(m)
}
}
|
colinrgodsey/scala-js
|
tools/shared/src/main/scala/org/scalajs/core/tools/classpath/builder/AbstractJarLibClasspathBuilder.scala
|
Scala
|
bsd-3-clause
| 1,934 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License,
*
* Contributors:
* Hao Jiang - initial API and implementation
*
*/
package edu.uchicago.cs.encsel.dataset.feature
import java.util.Comparator
import edu.uchicago.cs.encsel.common.Conversions._
import edu.uchicago.cs.encsel.dataset.column.Column
import scala.io.Source
import scala.util.Random
/**
* This feature computes how much the dataset is sorted by compute the number
* of inverted pairs
*/
class Sortness(val windowSize: Int) extends FeatureExtractor {
def featureType: String = "Sortness"
def supportFilter: Boolean = true
def extract(input: Column, prefix: String): Iterable[Feature] = {
val lineCount = FeatureExtractorUtils.lineCount(input)
val selection = lineCount > windowSize * windowSize match {
case true => 2.0 / windowSize
case false => 2.0 * windowSize / lineCount
}
val source = Source.fromFile(input.colFile)
// The ratio of selection makes sure the operation is n
try {
var total_pair = 0
var inverted_pair = 0
var counter = 0
var rankdiff = 0
if (windowSize != -1) {
source.getLines().sliding(windowSize, windowSize)
.filter(p => Random.nextDouble() <= selection)
.foreach(group => {
val (invert, total) = Sortness.computeInvertPair(group, input.dataType.comparator())
total_pair += total
inverted_pair += invert
val diffrank = Sortness.computeDiffRank(group, input.dataType.comparator())
counter += group.length
rankdiff += diffrank
})
} else {
val group = source.getLines().toSeq
val (invert, total) = Sortness.computeInvertPair(group, input.dataType.comparator())
total_pair += total
inverted_pair += invert
rankdiff += Sortness.computeDiffRank(group, input.dataType.comparator())
counter += group.size
}
val fType = featureType(prefix)
if (0 != total_pair) {
// 1 - abs(2x-1)
val ratio = (total_pair - inverted_pair).toDouble / total_pair
val ivpair = 1 - Math.abs(2 * ratio - 1)
// Kendall's Tau
val ktau = (total_pair - 2 * inverted_pair).toDouble / total_pair
// Spearman's Rho
val srho = 1 - 6.0 * rankdiff / (counter * (counter * counter - 1))
Iterable(
new Feature(fType, "totalpair_%d".format(windowSize), total_pair),
new Feature(fType, "ivpair_%d".format(windowSize), ivpair),
new Feature(fType, "kendalltau_%d".format(windowSize), ktau),
new Feature(fType, "numitem_%d".format(windowSize), counter),
new Feature(fType, "spearmanrho_%d".format(windowSize), srho)
)
} else {
Iterable(
new Feature(fType, "totalpair_%d".format(windowSize), 0),
new Feature(fType, "ivpair_%d".format(windowSize), 0),
new Feature(fType, "kendalltau_%d".format(windowSize), 0),
new Feature(fType, "numitem_%d".format(windowSize), 0),
new Feature(fType, "spearmanrho_%d".format(windowSize), 0)
)
}
} finally {
source.close()
}
}
}
object Sortness {
def computeInvertPair(input: Seq[String],
comparator: Comparator[String]): (Int, Int) = {
if (input.isEmpty)
return (0, 0)
var invert = 0
input.indices.foreach(i => {
input.indices.drop(i + 1).foreach(j => {
if (comparator.compare(input(i), input(j)) > 0) {
invert += 1
}
})
})
return (invert, input.length * (input.length - 1) / 2)
}
def computeDiffRank(input: Seq[String], comparator: Comparator[String]): Int = {
if (input.isEmpty)
return 0
var counter =
input.zipWithIndex.sortBy(_._1)(comparator).zipWithIndex.map(item => {
val d = item._1._2 - item._2
d * d
}).sum
counter
}
}
|
harperjiang/enc-selector
|
src/main/scala/edu/uchicago/cs/encsel/dataset/feature/Sortness.scala
|
Scala
|
apache-2.0
| 4,692 |
package controllers
import java.io.File
import java.net.URLDecoder
import java.util.UUID
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import notebook.NBSerializer.Metadata
import notebook._
import notebook.server._
import play.api.Play.current
import play.api._
import play.api.http.HeaderNames
import play.api.libs.iteratee.Concurrent.Channel
import play.api.libs.iteratee._
import play.api.libs.json._
import play.api.mvc._
import utils.AppUtils
import utils.Const.UTF_8
import scala.concurrent.duration._
import scala.concurrent.{Future, Promise}
import scala.language.postfixOps
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
case class Crumb(url: String = "", name: String = "")
case class Breadcrumbs(home: String = "/", crumbs: List[Crumb] = Nil)
object Application extends Controller {
private lazy val config = AppUtils.config
private lazy val nbm = AppUtils.nbm
private val kernelIdToCalcService = collection.mutable.Map[String, CalcWebSocketService]()
private val clustersActor = kernelSystem.actorOf(Props(NotebookClusters(AppUtils.clustersConf)))
private implicit def kernelSystem: ActorSystem = AppUtils.kernelSystem
private implicit val GetClustersTimeout = Timeout(60 seconds)
val project = nbm.name
val base_project_url = current.configuration.getString("application.context").getOrElse("/")
val base_kernel_url = "/"
val base_observable_url = "observable"
val read_only = false.toString
// TODO: Ugh...
val terminals_available = false.toString // TODO
def configTree() = Action {
Ok(Json.obj())
}
def configCommon() = Action {
Ok(Json.obj())
}
def configNotebook() = Action {
Ok(Json.obj())
}
private val kernelDef = Json.parse(
s"""
|{
| "kernelspecs": {
| "spark": {
| "name": "spark",
| "resources": {},
| "spec" : {
| "language": "scala",
| "display_name": "Scala [${notebook.BuildInfo.scalaVersion}] Spark [${notebook.BuildInfo.xSparkVersion}] Hadoop [${notebook.BuildInfo.xHadoopVersion}] ${if (notebook.BuildInfo.xWithHive) " {Hive ✓}" else ""} ${if (notebook.BuildInfo.xWithParquet) " {Parquet ✓}" else ""}",
| "language_info": {
| "name" : "scala",
| "file_extension" : "scala",
| "codemirror_mode" : "text/x-scala"
| }
| }
| }
| }
|}
|""".stripMargin.trim
)
def kernelSpecs() = Action {
Ok(kernelDef)
}
private[this] def newSession(kernelId: Option[String] = None,
notebookPath: Option[String] = None) = {
val existing = for {
path <- notebookPath
(id, kernel) <- KernelManager.atPath(path)
} yield (id, kernel, kernelIdToCalcService(id))
val (kId, kernel, service) = existing.getOrElse {
Logger.info(s"Starting kernel/session because nothing for $kernelId and $notebookPath")
val kId = kernelId.getOrElse(UUID.randomUUID.toString)
val compilerArgs = config.kernel.compilerArgs.toList
val initScripts = config.kernel.initScripts.toList
val r = Reads.map[JsValue]
// Load the notebook → get the metadata
val md: Option[Metadata] = for {
p <- notebookPath
n <- nbm.load(p)
m <- n.metadata
} yield m
val customLocalRepo: Option[String] = md.flatMap(_.customLocalRepo)
val customRepos: Option[List[String]] = md.flatMap(_.customRepos)
val customDeps: Option[List[String]] = md.flatMap(_.customDeps)
val customImports: Option[List[String]] = md.flatMap(_.customImports)
val customArgs: Option[List[String]] = md.flatMap(_.customArgs)
val customSparkConf: Option[Map[String, String]] = for {
m <- md
c <- m.customSparkConf
_ = Logger.info("customSparkConf >> " + c)
map <- r.reads(c).asOpt
} yield map.map {
case (k, a@JsArray(v)) => k → a.toString
case (k, JsBoolean(v)) => k → v.toString
case (k, JsNull) => k → "null"
case (k, JsNumber(v)) => k → v.toString
case (k, o@JsObject(v)) => k → o.toString
case (k, JsString(v)) => k → v
case (k, v:JsUndefined) => k → s"Undefined: ${v.error}"
}
val kernel = new Kernel(config.kernel.config.underlying,
kernelSystem,
kId,
notebookPath,
customArgs)
KernelManager.add(kId, kernel)
val service = new CalcWebSocketService(kernelSystem,
md.map(_.name).getOrElse("Spark Notebook"),
customLocalRepo,
customRepos,
customDeps,
customImports,
customArgs,
customSparkConf,
initScripts,
compilerArgs,
kernel.remoteDeployFuture,
config.tachyonInfo
)
kernelIdToCalcService += kId -> service
(kId, kernel, service)
}
// todo add MD?
Json.parse(
s"""
|{
|"id": "$kId",
|"name": "spark",
|"language_info": {
| "name" : "Scala",
| "file_extension" : "scala",
| "codemirror_mode" : "text/x-scala"
|}
|}
|""".stripMargin.trim
)
}
def createSession() = Action(parse.tolerantJson) /* → posted as urlencoded form oO */ { request =>
val json: JsValue = request.body
val kernelId = Try((json \ "kernel" \ "id").as[String]).toOption
val notebookPath = Try((json \ "notebook" \ "path").as[String]).toOption
val k = newSession(kernelId, notebookPath)
Ok(Json.obj("kernel" → k))
}
def sessions() = Action {
Ok(JsArray(kernelIdToCalcService.keys
.map { k =>
KernelManager.get(k).map(l => (k, l))
}.collect {
case Some(x) => x
}.map { case (k, kernel) =>
val path: String = kernel.notebookPath.getOrElse(s"KERNEL '$k' SHOULD HAVE A PATH ACTUALLY!")
Json.obj(
"notebook" → Json.obj("path" → path),
"id" → k
)
}.toSeq)
)
}
def profiles() = Action.async {
implicit val ec = kernelSystem.dispatcher
(clustersActor ? NotebookClusters.Profiles).map { case all: List[JsObject] =>
Ok(JsArray(all))
}
}
def clusters() = Action.async {
implicit val ec = kernelSystem.dispatcher
(clustersActor ? NotebookClusters.All).map { case all: List[JsObject] =>
Ok(JsArray(all))
}
}
/**
* add a spark cluster by json meta
*/
def addCluster() = Action.async(parse.tolerantJson) { request =>
val json = request.body
implicit val ec = kernelSystem.dispatcher
json match {
case o: JsObject =>
(clustersActor ? NotebookClusters.Add((json \ "name").as[String], o)).map { case cluster: JsObject =>
Ok(cluster)
}
case _ => Future {
BadRequest("Add cluster needs an object, got: " + json)
}
}
}
/**
* add a spark cluster by json meta
*/
def deleteCluster(clusterName:String) = Action.async { request =>
Logger.debug("Delete a cluster")
implicit val ec = kernelSystem.dispatcher
(clustersActor ? NotebookClusters.Remove(clusterName, null)).map{ item => Ok(Json.obj("result" → s"Cluster $clusterName deleted"))}
}
def contents(tpe: String, uri: String = "/") = Action { request =>
val path = URLDecoder.decode(uri, UTF_8)
val lengthToRoot = config.notebooksDir.getAbsolutePath.length
def dropRoot(f: java.io.File) = f.getAbsolutePath.drop(lengthToRoot).dropWhile(_ == '/')
val baseDir = new java.io.File(config.notebooksDir, path)
if (tpe == "directory") {
val content = Option(baseDir.listFiles).getOrElse(Array.empty).map { f =>
val n = f.getName
if (f.isFile && n.endsWith(".snb")) {
Json.obj(
"type" -> "notebook",
"name" -> n.dropRight(".snb".length),
"path" -> dropRoot(f) //todo → build relative path
)
} else if (f.isFile) {
Json.obj(
"type" -> "file",
"name" -> n,
"path" -> dropRoot(f) //todo → build relative path
)
} else {
Json.obj(
"type" -> "directory",
"name" -> n,
"path" -> dropRoot(f) //todo → build relative path
)
}
}
Ok(Json.obj("content" → content))
} else if (tpe == "notebook") {
Logger.debug("content: " + path)
val name = if (path.endsWith(".snb")) path.dropRight(".snb".length) else path
getNotebook(name, path, "json")
} else {
BadRequest("Dunno what to do with contents for " + tpe + "at " + path)
}
}
def createNotebook(p: String, custom: JsObject) = {
val path = URLDecoder.decode(p, UTF_8)
Logger.info(s"Creating notebook at $path")
val customLocalRepo = Try((custom \ "customLocalRepo").as[String]).toOption.map(_.trim()).filterNot(_.isEmpty)
val customRepos = Try((custom \ "customRepos").as[List[String]]).toOption.filterNot(_.isEmpty)
val customDeps = Try((custom \ "customDeps").as[List[String]]).toOption.filterNot(_.isEmpty)
val customImports = Try((custom \ "customImports").as[List[String]]).toOption.filterNot(_.isEmpty)
val customArgs = Try((custom \ "customArgs").as[List[String]]).toOption.filterNot(_.isEmpty)
val customMetadata = (for {
j <- Try(custom \ "customSparkConf") if j.isInstanceOf[JsObject]
} yield j.asInstanceOf[JsObject]).toOption
val fpath = nbm.newNotebook(
path,
customLocalRepo orElse config.localRepo,
customRepos orElse config.repos,
customDeps orElse config.deps,
customImports orElse config.imports,
customArgs orElse config.args,
customMetadata orElse config.sparkConf)
Try(Redirect(routes.Application.contents("notebook", fpath)))
}
def copyingNb(fp: String) = {
val fromPath = URLDecoder.decode(fp, UTF_8)
Logger.info("Copying notebook:" + fromPath)
val np = nbm.copyNotebook(fromPath)
Try(Ok(Json.obj("path" → np)))
}
def newNotebook(path: String, tryJson: Try[JsValue]) = {
def findkey[T](x: JsValue, k: String)(init: T)(implicit m: ClassTag[T]): Try[T] =
(x \ k) match {
case j: JsUndefined => Failure(new IllegalArgumentException("No " + k))
case JsNull => Success(init)
case o if m.runtimeClass == o.getClass => Success(o.asInstanceOf[T])
case x => Failure(new IllegalArgumentException("Bad type: " + x))
}
lazy val custom = for {
x <- tryJson
t <- findkey[JsObject](x, "custom")(Json.obj())
n <- createNotebook(path, t)
} yield n
lazy val copyFrom = for {
x <- tryJson
t <- findkey[JsString](x, "copy_from")(JsString(""))
n <- copyingNb(t.value)
} yield n
custom orElse copyFrom
}
def newDirectory(path: String, name:String) = {
Logger.info("New dir: " + path)
val base = new File(config.notebooksDir, path)
val parent = base
val newDir = new File(parent, name)
newDir.mkdirs()
Try(Ok(Json.obj("path" → newDir.getAbsolutePath.drop(parent.getAbsolutePath.length))))
}
def newFile(path: String) = {
Logger.info("New file:" + path)
val base = new File(config.notebooksDir, path)
val parent = base.getParentFile
val newF = new File(parent, "file")
newF.createNewFile()
Try(Ok(Json.obj("path" → newF.getAbsolutePath.drop(parent.getAbsolutePath.length))))
}
def newContent(p: String = "/") = Action(parse.tolerantText) { request =>
val path = URLDecoder.decode(p, UTF_8)
val text = request.body
val tryJson = Try(Json.parse(request.body))
tryJson.flatMap { json =>
(json \ "type").as[String] match {
case "directory" => newDirectory(path, (json \ "name").as[String])
case "notebook" => newNotebook(path, tryJson)
case "file" => newFile(path)
}
}.get
}
def openNotebook(p: String) = Action { implicit request =>
val path = URLDecoder.decode(p, UTF_8)
Logger.info(s"View notebook '$path'")
val wsPath = base_project_url match {
case "/" => "/ws"
case x if x.endsWith("/") => x + "ws"
case x => x + "/ws"
}
val prefix = if (request.secure) "wss" else "ws"
def ws_url(path: Option[String] = None) = {
s"""
|window.notebookWsUrl = function() {
|return '$prefix:/'+window.location.host+'$wsPath${path.map(x => "/" + x).getOrElse("")}'
|};
""".stripMargin.replaceAll("\n", " ")
}
Ok(views.html.notebook(
project + ":" + path,
project,
Map(
"base-url" -> base_project_url,
"ws-url" -> ws_url(),
"base-project-url" -> base_project_url,
"base-kernel-url" -> base_kernel_url,
"base-observable-url" -> ws_url(Some(base_observable_url)),
"read-only" -> read_only,
"notebook-name" -> nbm.name,
"notebook-path" -> path,
"notebook-writable" -> "true"
),
Some("notebook")
))
}
private[this] def closeKernel(kernelId: String) = {
kernelIdToCalcService -= kernelId
KernelManager.get(kernelId).foreach { k =>
Logger.info(s"Closing kernel $kernelId")
k.shutdown()
KernelManager.remove(kernelId)
}
}
def openKernel(kernelId: String, sessionId: String) = ImperativeWebsocket.using[JsValue](
onOpen = channel => WebSocketKernelActor.props(channel, kernelIdToCalcService(kernelId), sessionId),
onMessage = (msg, ref) => ref ! msg,
onClose = ref => {
// try to not close the kernel to allow long live sessions
// closeKernel(kernelId)
Logger.info(s"Closing websockets for kernel $kernelId")
ref ! akka.actor.PoisonPill
}
)
def terminateKernel(kernelId: String) = Action { request =>
closeKernel(kernelId)
Ok(s"""{"$kernelId": "closed"}""")
}
def restartKernel(kernelId: String) = Action { request =>
val k = KernelManager.get(kernelId)
closeKernel(kernelId)
Ok(newSession(notebookPath = k.flatMap(k => k.notebookPath)))
}
def listCheckpoints(snb: String) = Action { request =>
Ok(Json.parse(
"""
|[
| { "id": "TODO", "last_modified": "2015-01-02T13:22:01.751Z" }
|]
| """.stripMargin.trim
))
}
def saveCheckpoint(snb: String) = Action { request =>
//TODO
Ok(Json.parse(
"""
|[
| { "id": "TODO", "last_modified": "2015-01-02T13:22:01.751Z" }
|]
| """.stripMargin.trim
))
}
def renameNotebook(p: String) = Action(parse.tolerantJson) { request =>
val path = URLDecoder.decode(p, UTF_8)
val notebook = (request.body \ "path").as[String]
Logger.info("RENAME → " + path + " to " + notebook)
try {
val (newname, newpath) = nbm.rename(path, notebook)
KernelManager.atPath(path).foreach { case (_, kernel) =>
kernel.moveNotebook(newpath)
}
Ok(Json.obj(
"type" → "notebook",
"name" → newname,
"path" → newpath
))
} catch {
case _: NotebookExistsException => Conflict
}
}
def saveNotebook(p: String) = Action(parse.tolerantJson(config.maxBytesInFlight)) {
request =>
val path = URLDecoder.decode(p, UTF_8)
Logger.info("SAVE → " + path)
val notebook = NBSerializer.fromJson(request.body \ "content")
try {
val (name, savedPath) = nbm.save(path, notebook, overwrite = true)
Ok(Json.obj(
"type" → "notebook",
"name" → name,
"path" → savedPath
))
} catch {
case _: NotebookExistsException => Conflict
}
}
def deleteNotebook(p: String) = Action { request =>
val path = URLDecoder.decode(p, UTF_8)
Logger.info("DELETE → " + path)
try {
nbm.deleteNotebook(path)
Ok(Json.obj(
"type" → "notebook",
"path" → path
))
} catch {
case _: NotebookExistsException => Conflict
}
}
def dlNotebookAs(p: String, format: String) = Action {
val path = URLDecoder.decode(p, UTF_8)
Logger.info("DL → " + path + " as " + format)
getNotebook(path.dropRight(".snb".length), path, format, dl = true)
}
def dash(p: String = base_kernel_url) = Action {
val path = URLDecoder.decode(p, UTF_8)
Logger.debug("DASH → " + path)
Ok(views.html.projectdashboard(
nbm.name,
project,
Map(
"project" → project,
"base-project-url" → base_project_url,
"base-kernel-url" → base_kernel_url,
"read-only" → read_only,
"base-url" → base_project_url,
"notebook-path" → path,
"terminals-available" → terminals_available
),
Breadcrumbs(
"/",
path.split("/").toList.scanLeft(("", "")) {
case ((accPath, accName), p) => (accPath + "/" + p, p)
}.tail.map { case (p, x) =>
Crumb(controllers.routes.Application.dash(p.tail).url, x)
}
),
Some("dashboard")
))
}
def openObservable(contextId: String) = ImperativeWebsocket.using[JsValue](
onOpen = channel => WebSocketObservableActor.props(channel, contextId),
onMessage = (msg, ref) => ref ! msg,
onClose = ref => {
Logger.info(s"Closing observable $contextId")
ref ! akka.actor.PoisonPill
}
)
def getNotebook(name: String, path: String, format: String, dl: Boolean = false) = {
try {
Logger.debug(s"getNotebook: name is '$name', path is '$path' and format is '$format'")
val response = nbm.getNotebook(path).map { case (lastMod, nbname, data, fpath) =>
format match {
case "json" =>
val j = Json.parse(data)
val json = if (!dl) {
Json.obj(
"content" → j,
"name" → nbname,
"path" → fpath, //FIXME
"writable" -> true //TODO
)
} else {
j
}
Ok(json).withHeaders(
HeaderNames.CONTENT_DISPOSITION → s"""attachment; filename="$path" """,
HeaderNames.LAST_MODIFIED → lastMod
)
case "scala" =>
val nb = NBSerializer.fromJson(Json.parse(data))
val code = nb.cells.map { cells =>
val cs = cells.collect {
case NBSerializer.CodeCell(md, "code", i, Some("scala"), _, _) => i
case NBSerializer.CodeCell(md, "code", i, None, _, _) => i
}
val fc = cs.map(_.split("\n").map { s => s" $s" }.mkString("\n")).mkString("\n\n /* ... new cell ... */\n\n").trim
val code = s"""
|object Cells {
| $fc
|}
""".stripMargin
code
}.getOrElse("//NO CELLS!")
Ok(code).withHeaders(
HeaderNames.CONTENT_DISPOSITION → s"""attachment; filename="$name.scala" """,
HeaderNames.LAST_MODIFIED → lastMod
)
case _ => InternalServerError(s"Unsupported format $format")
}
}
response getOrElse NotFound(s"Notebook '$name' not found at $path.")
} catch {
case e: Exception =>
Logger.error("Error accessing notebook [%s]".format(name), e)
InternalServerError
}
}
// docker
val docker /*:Option[tugboat.Docker]*/ = None // SEE dockerlist branch! → still some issues due to tugboat
def dockerAvailable = Action {
Ok(Json.obj("available" → docker.isDefined)).withHeaders(
HeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN -> "*",
HeaderNames.ACCESS_CONTROL_ALLOW_METHODS -> "GET, POST, PUT, DELETE, OPTIONS",
HeaderNames.ACCESS_CONTROL_ALLOW_METHODS -> "Accept, Origin, Content-type",
HeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS -> "true"
)
}
def dockerList = TODO
// SEE dockerlist branch! → still some isues due to tugboat
// util
object ImperativeWebsocket {
def using[E: WebSocket.FrameFormatter](
onOpen: Channel[E] => ActorRef,
onMessage: (E, ActorRef) => Unit,
onClose: ActorRef => Unit,
onError: (String, Input[E]) => Unit = (e: String, _: Input[E]) => Logger.error(e)
): WebSocket[E, E] = {
implicit val sys = kernelSystem.dispatcher
val promiseIn = Promise[Iteratee[E, Unit]]()
val out = Concurrent.unicast[E](
onStart = channel => {
val ref = onOpen(channel)
val in = Iteratee.foreach[E] { message =>
onMessage(message, ref)
} map (_ => onClose(ref))
promiseIn.success(in)
},
onError = onError
)
WebSocket.using[E](_ => (Iteratee.flatten(promiseIn.future), out))
}
}
}
|
shankar-reddy/spark-notebook
|
app/controllers/Application.scala
|
Scala
|
apache-2.0
| 20,860 |
package com.twitter.scalding
import cascading.flow.{ FlowDef, FlowProcess }
import cascading.stats.CascadingStats
import java.util.{ Collections, WeakHashMap }
import org.slf4j.{ Logger, LoggerFactory }
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.ref.WeakReference
/*
* This can be a bit tricky to use, but it is important that incBy and inc
* are called INSIDE any map or reduce functions.
* Like:
* val stat = Stat("test")
* .map { x =>
* stat.inc
* 2 * x
* }
* NOT: map( { stat.inc; { x => 2*x } } )
* which increments on the submitter before creating the function. See the difference?
*/
trait Stat extends java.io.Serializable {
def incBy(amount: Long): Unit
def inc: Unit
}
case class StatKey(counter: String, group: String) extends java.io.Serializable
object StatKey {
// This is implicit to allow Stat("c", "g") to work.
implicit def fromCounterGroup(counterGroup: (String, String)): StatKey = counterGroup match {
case (c, g) => StatKey(c, g)
}
// Create a Stat in the ScaldingGroup
implicit def fromCounterDefaultGroup(counter: String): StatKey =
StatKey(counter, Stats.ScaldingGroup)
}
object Stat {
def apply(key: StatKey)(implicit uid: UniqueID): Stat = new Stat {
// This is materialized on the mappers, and will throw an exception if users incBy before then
private[this] lazy val flowProcess: FlowProcess[_] = RuntimeStats.getFlowProcessForUniqueId(uid)
def incBy(amount: Long): Unit = flowProcess.increment(key.group, key.counter, amount)
def inc: Unit = incBy(1L)
}
}
object Stats {
// This is the group that we assign all custom counters to
val ScaldingGroup = "Scalding Custom"
// When getting a counter value, cascadeStats takes precedence (if set) and
// flowStats is used after that. Returns None if neither is defined.
def getCounterValue(key: StatKey)(implicit cascadingStats: CascadingStats): Long =
cascadingStats.getCounterValue(key.group, key.counter)
// Returns a map of all custom counter names and their counts.
def getAllCustomCounters()(implicit cascadingStats: CascadingStats): Map[String, Long] = {
val counts = for {
counter <- cascadingStats.getCountersFor(ScaldingGroup).asScala
value = getCounterValue(counter)
} yield (counter, value)
counts.toMap
}
}
/**
* Used to inject a typed unique identifier to uniquely name each scalding flow.
* This is here mostly to deal with the case of testing where there are many
* concurrent threads running Flows. Users should never have to worry about
* these
*/
case class UniqueID(get: String) {
assert(get.indexOf(',') == -1, "UniqueID cannot contain ,: " + get)
}
object UniqueID {
val UNIQUE_JOB_ID = "scalding.job.uniqueId"
private val id = new java.util.concurrent.atomic.AtomicInteger(0)
def getRandom: UniqueID = {
// This number is unique as long as we don't create more than 10^6 per milli
// across separate jobs. which seems very unlikely.
val unique = (System.currentTimeMillis << 20) ^ (id.getAndIncrement.toLong)
UniqueID(unique.toString)
}
implicit def getIDFor(implicit fd: FlowDef): UniqueID =
/*
* In real deploys, this can even be a constant, but for testing
* we need to allocate unique IDs to prevent different jobs running
* at the same time from touching each other's counters.
*/
UniqueID(System.identityHashCode(fd).toString)
}
/**
* Wrapper around a FlowProcess useful, for e.g. incrementing counters.
*/
object RuntimeStats extends java.io.Serializable {
@transient private lazy val logger: Logger = LoggerFactory.getLogger(this.getClass)
private val flowMappingStore: mutable.Map[String, WeakReference[FlowProcess[_]]] =
Collections.synchronizedMap(new WeakHashMap[String, WeakReference[FlowProcess[_]]])
def getFlowProcessForUniqueId(uniqueId: UniqueID): FlowProcess[_] = {
(for {
weakFlowProcess <- flowMappingStore.get(uniqueId.get)
flowProcess <- weakFlowProcess.get
} yield {
flowProcess
}).getOrElse {
sys.error("Error in job deployment, the FlowProcess for unique id %s isn't available".format(uniqueId))
}
}
def addFlowProcess(fp: FlowProcess[_]) {
val uniqueJobIdObj = fp.getProperty(UniqueID.UNIQUE_JOB_ID)
if (uniqueJobIdObj != null) {
uniqueJobIdObj.asInstanceOf[String].split(",").foreach { uniqueId =>
logger.debug("Adding flow process id: " + uniqueId)
flowMappingStore.put(uniqueId, new WeakReference(fp))
}
}
}
/**
* For serialization, you may need to do:
* val keepAlive = RuntimeStats.getKeepAliveFunction
* outside of a closure passed to map/etc..., and then call:
* keepAlive()
* inside of your closure (mapping, reducing function)
*/
def getKeepAliveFunction(implicit flowDef: FlowDef): () => Unit = {
// Don't capture the flowDef, just the id
val id = UniqueID.getIDFor(flowDef)
() => {
val flowProcess = RuntimeStats.getFlowProcessForUniqueId(id)
flowProcess.keepAlive
}
}
}
|
wanyifu/scaldingtest
|
scalding-core/src/main/scala/com/twitter/scalding/Stats.scala
|
Scala
|
apache-2.0
| 5,122 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms
import org.junit.Test
import org.orbeon.oxf.test.DocumentTestBase
import org.orbeon.oxf.xforms.event.ClientEvents
import org.orbeon.oxf.xml.dom.Converter._
import org.scalatestplus.junit.AssertionsForJUnit
class ClientEventsTest extends DocumentTestBase with AssertionsForJUnit {
@Test def adjustIdForRepeatIteration(): Unit = {
this setupDocument
<xh:html xmlns:xf="http://www.w3.org/2002/xforms"
xmlns:xh="http://www.w3.org/1999/xhtml"
xmlns:xxf="http://orbeon.org/oxf/xml/xforms">
<xh:head>
<xf:model>
<xf:instance id="instance">
<instance>
<outer>
<inner/>
<inner/>
</outer>
<outer>
<inner/>
<inner/>
<inner/>
</outer>
<outer/>
</instance>
</xf:instance>
</xf:model>
</xh:head>
<xh:body>
<xf:repeat id="my-outer-repeat" ref="outer">
<xf:repeat id="my-inner-repeat" ref="inner">
<xf:input id="my-input" ref="."/>
</xf:repeat>
</xf:repeat>
</xh:body>
</xh:html>.toDocument
assert("my-outer-repeat" === ClientEvents.adjustIdForRepeatIteration(document, "my-outer-repeat"))
assert("my-outer-repeat~iteration⊙2" === ClientEvents.adjustIdForRepeatIteration(document, "my-outer-repeat⊙2"))
assert("my-inner-repeat⊙2" === ClientEvents.adjustIdForRepeatIteration(document, "my-inner-repeat⊙2"))
assert("my-inner-repeat~iteration⊙2-3" === ClientEvents.adjustIdForRepeatIteration(document, "my-inner-repeat⊙2-3"))
assert("my-input⊙2-3" === ClientEvents.adjustIdForRepeatIteration(document, "my-input⊙2-3"))
}
}
|
orbeon/orbeon-forms
|
xforms/jvm/src/test/scala/org/orbeon/oxf/xforms/ClientEventsTest.scala
|
Scala
|
lgpl-2.1
| 2,540 |
/**
* Created on: Mar 2, 2013
*/
package com.tubros.constraints.core.spi.solver.heuristic
import scala.language.{
higherKinds,
postfixOps
}
import scalaz._
import com.tubros.constraints.api.solver._
/**
* The '''EstimatedSearchSpace''' type is a heuristic for approximating how
* large an exhaustive search of a solution space would be.
*
* @author svickers
*
*/
object EstimatedSearchSpace
{
/// Class Imports
import std.anyVal._
import std.tuple._
import Reducer._
import Tags._
def apply[A, C[_] : Foldable, DomainT[X] <: Domain[X]] (
domains : C[DomainT[A]]
) : Long =
{
implicit val r = countAndProduct[A, DomainT];
foldReduce[C, DomainT[A], (Long, Long @@ Multiplication)] (domains) match {
case (0L, _) => 0L;
case e @ (_, size) => size;
}
}
private def countAndProduct[A, DomainT[X] <: Domain[X]]
: Reducer[DomainT[A], (Long, Long @@ Multiplication)] =
unitReducer {
domain =>
(domain.size.toLong, Tag[Long, Multiplication] (domain.size.toLong));
}
}
|
osxhacker/smocs
|
smocs-core/src/main/scala/com/tubros/constraints/core/spi/solver/heuristic/EstimatedSearchSpace.scala
|
Scala
|
bsd-3-clause
| 1,028 |
/**
* Created by Variant on 16/4/8.
*/
object ListBuffer_Internals {
def main(args: Array[String]) {
val list =List(1,2,3,4,5,6,7,8,9)
increment(list,1)
increment_MoreEffective(list,2)
increment_MostEffective(list,3)
}
/**
* 会创建很多新的List堆栈,百万数据分分钟撑爆
*
* @param list
* @param i
* @return
*/
def increment(list: List[Int], i: Int): List[Int] = list match{
case List() => List()
case head :: tail =>head +i :: increment(tail,i)
}
/**
* 会产生很多中间结果,也是效率低下
*
* @param list
* @param i
* @return
*/
def increment_MoreEffective(list: List[Int], i: Int): List[Int] ={
var result = List[Int]()
for(element <- list) result = result ::: List(element + i)
result
}
def increment_MostEffective(list: List[Int], i: Int): List[Int] = {
import scala.collection.mutable.ListBuffer
var buffer = new ListBuffer[Int]
for(element <- list){ buffer += element + 1}
buffer.toList
}
}
|
sparkLiwei/ProgrammingNote
|
scalaLearning/scalaInSpark/ListBuffer_Internals.scala
|
Scala
|
cc0-1.0
| 1,052 |
sealed trait List[+A]
case object Nil extends List[Nothing]
case class Cons[+A](head: A, tail: List[A]) extends List[A]
object List {
def apply[A](as: A*): List[A] = {
if(as.isEmpty) Nil
else Cons(as.head, apply(as.tail: _*))
}
def tail[A](items: List[A]): List[A] = ???
def setHead[A](items: List[A], n: A): List[A] = ???
def drop[A](l: List[A], n: Int): List[A] = ???
def dropWhile[A](l: List[A])(f: A => Boolean): List[A] = ???
def foldRight[A,B](as: List[A], z: B)(f:(A,B) => B): B = as match {
case Nil => z
case Cons(x, xs) => f(x, foldRight(xs,z)(f))
}
def sum(ns: List[Int]) = foldLeft(ns, 0)((x,y) => x + y)
def product(ns: List[Double]) = foldLeft(ns, 1.0) (_ * _)
def length[A](as: List[A]): Int = foldLeft(as, 0)((x,y) => 1 + x)
@annotation.tailrec
def foldLeft[A,B](as: List[A], z: B)(f: (B, A) => B): B = as match {
case Nil => z
case Cons(x, xs) => foldLeft(xs,f(z,x))(f)
}
/*
Write a function that transforms a list of integers by adding 1 to each element.
(Reminder: this should be a pure function that returns a new List!)
*/
def addOne(as: List[Int]): List[Int] = as match {
case Nil => Nil
case Cons(x, xs) => Cons(x + 1, addOne(xs))
}
}
/*
tests
*/
assert(List.addOne(Nil) == Nil)
assert(List.addOne(List(1)) == List(2))
assert(List.addOne(List(1,2,3,4)) == List(2,3,4,5))
|
wkimeria/fp_scala_for_mortals
|
chapter_3/exercises/exercise_16.scala
|
Scala
|
mit
| 1,353 |
package qidong.runtime
import org.scalatest.FunSuite
import scalaz.{ \/-, -\/ }
import scalaz.Scalaz._
import scalaz.concurrent.Task
class StateUpdateTest extends FunSuite {
import qidong.pipeline.{ MCompleted, MFailNode, MSuccNode ,MRecoveredByErrorHandlerNode}
import qidong.pipeline.ops._
val m0 = ((i: Int) => { i + 1 }).name("m0")
val m1 = ((i: Int) => { i + 1 }).name("m1")
val m2 = ((i: Int) => { i + 1 }).name("m2")
val m3 = ((i: Int) => { throw new Exception("oops"); i + 1 }).name("m3")
val m4 = ((i: Int) => { i + 1 }).name("m4")
val m5 = ((i: Int) => { i + 1 }).name("m5")
test("state update listener should be attachable and be called when mission is completed sucessfully") {
var mComplete: MCompleted = null
val mm = m0.stateUpdate(x => mComplete = x)
val underTest = mm =>: (m1 =>: m2).name("group1") =>: m5 =>: (m3 =>: m4).name("group2") =>: m5
val -\/(ret) = underTest.run[Task](1).unsafePerformSync
assert(underTest.drawTree == ret.trace.map(_.name).drawTree)
assert(mComplete.name == "m0")
assert(mComplete.isInstanceOf[MSuccNode])
}
test("exception thrown inside state update listener should not affect the main process") {
var mComplete: MCompleted = null
val mm = m0.stateUpdate(x => { mComplete = x; throw new Exception("exception in handler") })
val underTest = mm =>: (m1 =>: m2).name("group1") =>: m5 =>: (m3 =>: m4).name("group2") =>: m5
val -\/(ret) = underTest.run[Task](1).unsafePerformSync
assert(underTest.drawTree == ret.trace.map(_.name).drawTree)
assert(mComplete.name == "m0")
assert(mComplete.isInstanceOf[MSuccNode])
}
test("exception thrown inside state update listener should not affect the main process 2") {
var mComplete: MCompleted = null
val mm = m0.stateUpdate(x => { throw new Exception("exception in handler"); mComplete = x })
val underTest = mm =>: (m1 =>: m2).name("group1") =>: m5 =>: (m3 =>: m4).name("group2") =>: m5
val -\/(ret) = underTest.run[Task](1).unsafePerformSync
assert(underTest.drawTree == ret.trace.map(_.name).drawTree)
assert(mComplete == null)
}
test("state update listener should be attachable and be called when mission is completed with failure") {
var mComplete: MCompleted = null
val mm = m3.stateUpdate(x => mComplete = x)
val underTest = m0 =>: (m1 =>: m2).name("group1") =>: m5 =>: (mm =>: m4).name("group2") =>: m5
val -\/(ret) = underTest.run[Task](1).unsafePerformSync
assert(underTest.drawTree == ret.trace.map(_.name).drawTree)
assert(mComplete.name == "m3")
assert(mComplete.isInstanceOf[MFailNode])
}
test("state update listener should notify MRecoveredByErrorHandlerNode when recovered") {
var mComplete: MCompleted = null
val mm = m3.stateUpdate(x => { mComplete = x }).handleError((_, _) => 1)
val underTest = m0 =>: (m1 =>: m2).name("group1") =>: m5 =>: (mm =>: m4).name("group2") =>: m5
val \/-(ret) = underTest.run[Task](1).unsafePerformSync
assert(underTest.drawTree == ret.trace.map(_.name).drawTree)
assert(mComplete.isInstanceOf[MRecoveredByErrorHandlerNode])
}
}
|
chenharryhua/qidong
|
src/test/scala/qidong/runtime/StateUpdateTest.scala
|
Scala
|
apache-2.0
| 3,136 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate
import java.util.function.BiFunction
import java.util.{ List => JList }
import scala.collection.JavaConverters._
import scala.collection.immutable.Seq
/**
* A versioned value.
*
* @param value The value.
* @param vectorTimestamp Update vector timestamp of the event that caused this version.
* @param systemTimestamp Update system timestamp of the event that caused this version.
* @param creator Creator of the event that caused this version.
*/
case class Versioned[A](value: A, vectorTimestamp: VectorTime, systemTimestamp: Long = 0L, creator: String = "")
/**
* Tracks concurrent [[Versioned]] values which arise from concurrent updates.
*
* @tparam A Versioned value type
* @tparam B Update type
*/
trait ConcurrentVersions[A, B] extends Serializable {
/**
* Updates that [[Versioned]] value with `b` that is a predecessor of `vectorTimestamp`. If
* there is no such predecessor, a new concurrent version is created (optionally derived
* from an older entry in the version history, in case of incremental updates).
*/
def update(b: B, vectorTimestamp: VectorTime, systemTimestamp: Long = 0L, creator: String = ""): ConcurrentVersions[A, B]
/**
* Resolves multiple concurrent versions to a single version. For the resolution to be successful,
* one of the concurrent versions must have a `vectorTimestamp` that is equal to `selectedTimestamp`.
* Only those concurrent versions with a `vectorTimestamp` less than the given `vectorTimestamp`
* participate in the resolution process (which allows for resolutions to be concurrent to other
* updates).
*/
def resolve(selectedTimestamp: VectorTime, vectorTimestamp: VectorTime, systemTimestamp: Long = 0L): ConcurrentVersions[A, B]
/**
* Experimental ...
*/
def resolve(selectedTimestamp: VectorTime): ConcurrentVersions[A, B] =
resolve(selectedTimestamp, all.map(_.vectorTimestamp).reduce(_ merge _), all.map(_.systemTimestamp).max)
/**
* Returns all (un-resolved) concurrent versions.
*/
def all: Seq[Versioned[A]]
/**
* Java API of [[all]].
*/
def getAll: JList[Versioned[A]] = all.asJava
/**
* Returns `true` if there is more than one version available i.e. if there are multiple
* concurrent (= conflicting) versions.
*/
def conflict: Boolean = all.length > 1
/**
* Owner of versioned values.
*/
def owner: String
/**
* Updates the owner.
*/
def withOwner(owner: String): ConcurrentVersions[A, B]
}
object ConcurrentVersions {
/**
* Creates a new [[ConcurrentVersionsTree]] that uses projection function `f` to compute
* new (potentially concurrent) versions from a parent version.
*
* @param initial Value of the initial version.
* @param f Projection function for updates.
* @tparam A Versioned value type
* @tparam B Update type
*/
def apply[A, B](initial: A, f: (A, B) => A): ConcurrentVersions[A, B] =
ConcurrentVersionsTree[A, B](initial)(f)
}
/**
* A [[ConcurrentVersions]] implementation that shall be used if updates replace current
* versioned values (= full updates). `ConcurrentVersionsList` is an immutable data structure.
*/
class ConcurrentVersionsList[A](vs: List[Versioned[A]], val owner: String = "") extends ConcurrentVersions[A, A] {
def update(na: A, vectorTimestamp: VectorTime, systemTimestamp: Long = 0L, creator: String = ""): ConcurrentVersionsList[A] = {
val r = vs.foldRight[(List[Versioned[A]], Boolean)]((Nil, false)) {
case (a, (acc, true)) => (a :: acc, true)
case (a, (acc, false)) =>
if (vectorTimestamp > a.vectorTimestamp)
// regular update on that version
(Versioned(na, vectorTimestamp, systemTimestamp, creator) :: acc, true)
else if (vectorTimestamp < a.vectorTimestamp)
// conflict already resolved, ignore
(a :: acc, true)
else
// conflicting update, try next
(a :: acc, false)
}
r match {
case (updates, true) => new ConcurrentVersionsList(updates, owner)
case (original, false) => new ConcurrentVersionsList(Versioned(na, vectorTimestamp, systemTimestamp, creator) :: original, owner)
}
}
def resolve(selectedTimestamp: VectorTime, vectorTimestamp: VectorTime, systemTimestamp: Long = 0L): ConcurrentVersionsList[A] = {
new ConcurrentVersionsList(vs.foldRight(List.empty[Versioned[A]]) {
case (v, acc) if v.vectorTimestamp == selectedTimestamp => v.copy(vectorTimestamp = vectorTimestamp, systemTimestamp = systemTimestamp) :: acc
case (v, acc) if v.vectorTimestamp.conc(vectorTimestamp) => v :: acc
case (v, acc) => acc
})
}
def all: List[Versioned[A]] = vs.reverse
def withOwner(owner: String) = new ConcurrentVersionsList(vs, owner)
}
case object ConcurrentVersionsList {
/**
* Creates an empty [[ConcurrentVersionsList]].
*/
def apply[A]: ConcurrentVersionsList[A] =
new ConcurrentVersionsList(Nil)
/**
* Creates a new [[ConcurrentVersionsList]] with a single [[Versioned]] value from `a` and `vectorTimestamp`.
*/
def apply[A](a: A, vectorTimestamp: VectorTime): ConcurrentVersionsList[A] =
new ConcurrentVersionsList(List(Versioned(a, vectorTimestamp)))
/**
* Java API that creates an empty [[ConcurrentVersionsList]].
*/
def create[A]: ConcurrentVersionsList[A] =
apply
/**
* Java API that creates a new [[ConcurrentVersionsList]] with a single [[Versioned]] value from `a` and `vectorTimestamp`.
*/
def create[A](a: A, vectorTimestamp: VectorTime): ConcurrentVersionsList[A] =
apply(a, vectorTimestamp)
}
/**
* A [[ConcurrentVersions]] implementation that shall be used if updates are incremental.
* `ConcurrentVersionsTree` is a mutable data structure. Therefore, it is recommended not
* to share instances of `ConcurrentVersionsTree` directly but rather the [[Versioned]]
* sequence returned by [[ConcurrentVersionsTree#all]]. Later releases will be based on
* an immutable data structure.
*
* '''Please note:''' This implementation does not purge old versions at the moment (which
* shouldn't be a problem if the number of incremental updates to a versioned aggregate is
* rather small). In later releases, manual and automated purging of old versions will be
* supported.
*/
class ConcurrentVersionsTree[A, B](private[eventuate] val root: ConcurrentVersionsTree.Node[A]) extends ConcurrentVersions[A, B] {
import ConcurrentVersionsTree._
@transient
private var _projection: (A, B) => A = (s, _) => s
private var _owner: String = ""
override def update(b: B, vectorTimestamp: VectorTime, systemTimestamp: Long = 0L, creator: String = ""): ConcurrentVersionsTree[A, B] = {
val p = pred(vectorTimestamp)
p.addChild(new Node(Versioned(_projection(p.versioned.value, b), vectorTimestamp, systemTimestamp, creator)))
this
}
override def resolve(selectedTimestamp: VectorTime, vectorTimestamp: VectorTime, systemTimestamp: Long = 0L): ConcurrentVersionsTree[A, B] = {
leaves.foreach {
case n if n.rejected => // ignore rejected leaf
case n if n.versioned.vectorTimestamp.conc(vectorTimestamp) => // ignore concurrent update
case n if n.versioned.vectorTimestamp == selectedTimestamp => n.stamp(vectorTimestamp, systemTimestamp)
case n => n.reject()
}
this
}
override def all: Seq[Versioned[A]] =
leaves.filterNot(_.rejected).map(_.versioned)
override def owner: String =
_owner
override def withOwner(owner: String): ConcurrentVersionsTree[A, B] = {
_owner = owner
this
}
def withProjection(f: (A, B) => A): ConcurrentVersionsTree[A, B] = {
_projection = f
this
}
def withProjection(f: BiFunction[A, B, A]): ConcurrentVersionsTree[A, B] =
withProjection((a, b) => f.apply(a, b))
private[eventuate] def copy(): ConcurrentVersionsTree[A, B] =
new ConcurrentVersionsTree[A, B](root.copy()).withOwner(_owner).withProjection(_projection)
private[eventuate] def nodes: Seq[Node[A]] = foldLeft(root, Vector.empty[Node[A]]) {
case (acc, n) => acc :+ n
}
private[eventuate] def leaves: Seq[Node[A]] = foldLeft(root, Vector.empty[Node[A]]) {
case (leaves, n) => if (n.leaf) leaves :+ n else leaves
}
private[eventuate] def pred(timestamp: VectorTime): Node[A] = foldLeft(root, root) {
case (candidate, n) => if (timestamp > n.versioned.vectorTimestamp && n.versioned.vectorTimestamp > candidate.versioned.vectorTimestamp) n else candidate
}
// TODO: make tail recursive or create a trampolined version
private[eventuate] def foldLeft[C](node: Node[A], acc: C)(f: (C, Node[A]) => C): C = {
val acc2 = f(acc, node)
node.children match {
case Seq() => acc2
case ns => ns.foldLeft(acc2) {
case (acc, n) => foldLeft(n, acc)(f)
}
}
}
}
object ConcurrentVersionsTree {
/**
* Creates a new [[ConcurrentVersionsTree]] that uses projection function `f` to compute
* new (potentially concurrent) versions from a parent version.
*
* @param initial Value of the initial version.
* @param f Projection function for updates.
* @tparam A Versioned value type
* @tparam B Update type
*/
def apply[A, B](initial: A)(f: (A, B) => A): ConcurrentVersionsTree[A, B] =
new ConcurrentVersionsTree[A, B](new ConcurrentVersionsTree.Node(Versioned(initial, VectorTime.Zero))).withProjection(f)
/**
* Creates a new [[ConcurrentVersionsTree]] that uses projection function `f` to compute
* new (potentially concurrent) versions from a parent version.
*
* @param f Projection function for updates.
* @tparam A Versioned value type
* @tparam B Update type
*/
def apply[A, B](f: (A, B) => A): ConcurrentVersionsTree[A, B] =
apply(null.asInstanceOf[A] /* FIXME: use Monoid[A].zero */ )(f).withProjection(f)
/**
* Java API that creates a new [[ConcurrentVersionsTree]].
*
* The [[ConcurrentVersionsTree]] uses projection function `f` to compute
* new (potentially concurrent) versions from a parent version.
*
* @param initial Value of the initial version.
* @param f Projection function for updates.
* @tparam A Versioned value type
* @tparam B Update type
*/
def create[A, B](initial: A, f: BiFunction[A, B, A]): ConcurrentVersionsTree[A, B] =
apply(initial)((a, b) => f.apply(a, b))
/**
* Java API that creates a new [[ConcurrentVersionsTree]].
*
* The [[ConcurrentVersionsTree]] uses projection function `f` to compute
* new (potentially concurrent) versions from a parent version.
*
* @param f Projection function for updates.
* @tparam A Versioned value type
* @tparam B Update type
*/
def create[A, B](f: BiFunction[A, B, A]): ConcurrentVersionsTree[A, B] =
create(null.asInstanceOf[A] /* FIXME: use Monoid[A].zero */ , f)
private[eventuate] class Node[A](var versioned: Versioned[A]) extends Serializable {
var rejected: Boolean = false
var children: Vector[Node[A]] = Vector.empty
var parent: Node[A] = this
def leaf: Boolean = children.isEmpty
def root: Boolean = parent == this
def addChild(node: Node[A]): Unit = {
node.parent = this
children = children :+ node
}
def reject(): Unit = {
rejected = true
if (parent.children.size == 1) parent.reject()
}
def stamp(vt: VectorTime, st: Long): Unit = {
versioned = versioned.copy(vectorTimestamp = vt, systemTimestamp = st)
}
// TODO: make tail recursive or create a trampolined version
def copy(): Node[A] = {
val cn = new Node[A](versioned)
cn.rejected = rejected
cn.children = children.map(_.copy())
cn
}
}
}
|
RBMHTechnology/eventuate
|
eventuate-core/src/main/scala/com/rbmhtechnology/eventuate/Versioned.scala
|
Scala
|
apache-2.0
| 12,395 |
import org.apache.spark.sql.SparkSession
object Spark {
val spark = SparkSession
.builder()
.appName("BDD Example")
.master("local[8]")
.config("spark.sql.shuffle.partitions", "8")
.config("spark.default.parallelism", "8")
.config("spark.sql.shuffle.partitions", "8")
.getOrCreate()
}
|
DanteLore/bdd-spark
|
src/main/scala/Spark.scala
|
Scala
|
mit
| 316 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.jmx
import wvlet.airframe.surface.{ParameterBase, Surface}
sealed trait MBeanParameter {
def name: String
def description: String
def get(obj: AnyRef): AnyRef
def valueType: Surface
}
case class MBeanObjectParameter(name: String, description: String, param: ParameterBase) extends MBeanParameter {
def valueType = param.surface
override def get(obj: AnyRef): AnyRef = {
param.call(obj).asInstanceOf[AnyRef]
}
}
case class NestedMBeanParameter(
name: String,
description: String,
parentParam: ParameterBase,
nestedParam: ParameterBase
) extends MBeanParameter {
def valueType = nestedParam.surface
override def get(obj: AnyRef): AnyRef = {
nestedParam.call(parentParam.call(obj)).asInstanceOf[AnyRef]
}
}
|
wvlet/airframe
|
airframe-jmx/src/main/scala/wvlet/airframe/jmx/MBeanParameter.scala
|
Scala
|
apache-2.0
| 1,336 |
/*
* Copyright 2011-2020 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mongodb
package record
import net.liftweb.common._
import java.util.prefs.BackingStoreException
import java.util.regex.Pattern
import scala.collection.JavaConverters._
import net.liftweb.mongodb.record.codecs.{RecordCodec, RecordTypedCodec}
import net.liftweb.mongodb.record.field._
import net.liftweb.record.{Field, MetaRecord, Record}
import net.liftweb.record.field._
import org.bson._
import org.bson.codecs.{BsonTypeClassMap, Codec, DecoderContext, EncoderContext}
import org.bson.codecs.configuration.{CodecRegistries, CodecRegistry}
import org.bson.conversions.Bson
import com.mongodb._
/** Specialized Record that can be encoded and decoded from BSON (DBObject) */
trait BsonRecord[MyType <: BsonRecord[MyType]] extends Record[MyType] {
self: MyType =>
/** Refines meta to require a BsonMetaRecord */
def meta: BsonMetaRecord[MyType]
/**
* Encode a record instance into a DBObject
*/
@deprecated("RecordCodec is now used instead.", "3.4.3")
def asDBObject: DBObject = meta.asDBObject(this)
@deprecated("RecordCodec is now used instead.", "3.4.3")
def asDocument: Document = meta.asDocument(this)
/**
* Set the fields of this record from the given DBObject
*/
@deprecated("RecordCodec is now used instead.", "3.4.3")
def setFieldsFromDBObject(dbo: DBObject): Unit = meta.setFieldsFromDBObject(this, dbo)
/**
* Save the instance and return the instance
*/
override def saveTheRecord(): Box[MyType] = throw new BackingStoreException("BSON Records don't save themselves")
/**
* Pattern.equals doesn't work properly so it needs a special check. If you use PatternField, be sure to override equals with this.
*/
@deprecated("PatternField now has a properly functioning `equals` method.", "3.4.1")
protected def equalsWithPatternCheck(other: Any): Boolean = {
other match {
case that: BsonRecord[MyType] =>
that.fields.corresponds(this.fields) { (a,b) =>
(a.name == b.name) && ((a.valueBox, b.valueBox) match {
case (Full(ap: Pattern), Full(bp: Pattern)) => ap.pattern == bp.pattern && ap.flags == bp.flags
case _ => a.valueBox == b.valueBox
})
}
case _ => false
}
}
}
/** Specialized MetaRecord that deals with BsonRecords */
trait BsonMetaRecord[BaseRecord <: BsonRecord[BaseRecord]] extends MetaRecord[BaseRecord] with JsonFormats with MongoCodecs {
self: BaseRecord =>
def codecRegistry: CodecRegistry = MongoRecordRules.defaultCodecRegistry.vend
/**
* The `BsonTypeClassMap` to use with this record.
*/
def bsonTypeClassMap: BsonTypeClassMap = MongoRecordRules.defaultBsonTypeClassMap.vend
def bsonTransformer: Transformer = MongoRecordRules.defaultTransformer.vend
def codec: RecordTypedCodec[BaseRecord] =
RecordCodec(this, introspectedCodecRegistry, bsonTypeClassMap, bsonTransformer)
/**
* Check this record's fields and add any Codecs needed.
*/
protected lazy val introspectedCodecRegistry: CodecRegistry = {
val fields = metaFields()
val codecs: List[Codec[_]] = fields.map { field => field match {
case f: BsonRecordTypedField[BaseRecord, _] =>
f.valueMeta.codec :: Nil
case f: BsonRecordListField[BaseRecord, _] =>
f.valueMeta.codec :: Nil
case f: BsonRecordMapField[BaseRecord, _] =>
f.valueMeta.codec :: Nil
case _ =>
Nil
}}.flatten
CodecRegistries.fromRegistries(
CodecRegistries.fromCodecs(codecs.distinct.asJava),
codecRegistry
)
}
/**
* Create a BasicDBObject from the field names and values.
* - MongoFieldFlavor types (List) are converted to DBObjects
* using asDBObject
*/
@deprecated("RecordCodec is now used instead.", "3.4.3")
def asDBObject(inst: BaseRecord): DBObject = {
val dbo = BasicDBObjectBuilder.start // use this so regex patterns can be stored.
for {
field <- fields(inst)
dbValue <- fieldDbValue(field)
} { dbo.add(field.name, dbValue) }
dbo.get
}
@deprecated("RecordCodec is now used instead.", "3.4.3")
def asDocument(inst: BaseRecord): Document = {
val dbo = new Document()
for {
field <- fields(inst)
dbValue <- fieldDbValue(field)
} { dbo.append(field.name, dbValue) }
dbo
}
/**
* Return the value of a field suitable to be put in a DBObject
*/
@deprecated("RecordCodec is now used instead.", "3.4.3")
def fieldDbValue(f: Field[_, BaseRecord]): Box[Any] = {
import Meta.Reflection._
import field.MongoFieldFlavor
f match {
case field if (field.optional_? && field.valueBox.isEmpty) => Empty // don't add to DBObject
case field: EnumTypedField[_] =>
field.asInstanceOf[EnumTypedField[Enumeration]].valueBox map {
v => v.id
}
case field: EnumNameTypedField[_] =>
field.asInstanceOf[EnumNameTypedField[Enumeration]].valueBox map {
v => v.toString
}
case field: MongoFieldFlavor[_] =>
Full(field.asInstanceOf[MongoFieldFlavor[Any]].asDBObject)
case field => field.valueBox map (_.asInstanceOf[AnyRef] match {
case null => null
case x if primitive_?(x.getClass) => x
case x if mongotype_?(x.getClass) => x
case x if datetype_?(x.getClass) => datetype2dbovalue(x)
case x: BsonRecord[_] => x.asDBObject
case x: Array[Byte] => x
case o => o.toString
})
}
}
/**
* Creates a new record, then sets the fields with the given DBObject.
*
* @param dbo - the DBObject
* @return Box[BaseRecord]
*/
@deprecated("RecordCodec is now used instead.", "3.4.3")
def fromDBObject(dbo: DBObject): BaseRecord = {
val inst: BaseRecord = createRecord
setFieldsFromDBObject(inst, dbo)
inst
}
/**
* Populate the inst's fields with the values from a DBObject. Values are set
* using setFromAny passing it the DBObject returned from Mongo.
*
* @param inst - the record that will be populated
* @param dbo - The DBObject
* @return Unit
*/
@deprecated("RecordCodec is now used instead.", "3.4.3")
def setFieldsFromDBObject(inst: BaseRecord, dbo: DBObject): Unit = {
for (k <- dbo.keySet.asScala; field <- inst.fieldByName(k.toString)) {
field.setFromAny(dbo.get(k.toString))
}
inst.runSafe {
inst.fields.foreach(_.resetDirty)
}
}
def setFieldsFromDocument(inst: BaseRecord, doc: Document): Unit = {
for (k <- doc.keySet.asScala; field <- inst.fieldByName(k.toString)) {
field.setFromAny(doc.get(k.toString))
}
inst.runSafe {
inst.fields.foreach(_.resetDirty)
}
}
def fromDocument(doc: Document): BaseRecord = {
val inst: BaseRecord = createRecord
setFieldsFromDocument(inst, doc)
inst
}
def diff(inst: BaseRecord, other: BaseRecord): Seq[(String, Any, Any)] = {
fields(inst).flatMap(field => {
val otherValue = other.fieldByName(field.name).flatMap(_.valueBox)
if (otherValue != field.valueBox) {
Seq((field.name, field.valueBox, otherValue))
} else {
Seq.empty[(String, String, String)]
}
})
}
}
|
lift/framework
|
persistence/mongodb-record/src/main/scala/net/liftweb/mongodb/record/BsonRecord.scala
|
Scala
|
apache-2.0
| 7,825 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.containerpool
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Cancellable
import java.time.Instant
import akka.actor.Status.{Failure => FailureMessage}
import akka.actor.{FSM, Props, Stash}
import akka.event.Logging.InfoLevel
import akka.io.IO
import akka.io.Tcp
import akka.io.Tcp.Close
import akka.io.Tcp.CommandFailed
import akka.io.Tcp.Connect
import akka.io.Tcp.Connected
import akka.pattern.pipe
import pureconfig._
import pureconfig.generic.auto._
import akka.stream.ActorMaterializer
import java.net.InetSocketAddress
import java.net.SocketException
import org.apache.openwhisk.common.MetricEmitter
import org.apache.openwhisk.common.TransactionId.systemPrefix
import scala.collection.immutable
import spray.json.DefaultJsonProtocol._
import spray.json._
import org.apache.openwhisk.common.{AkkaLogging, Counter, LoggingMarkers, TransactionId}
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.connector.{
ActivationMessage,
CombinedCompletionAndResultMessage,
CompletionMessage,
ResultMessage
}
import org.apache.openwhisk.core.containerpool.logging.LogCollectingException
import org.apache.openwhisk.core.database.UserContext
import org.apache.openwhisk.core.entity.ExecManifest.ImageName
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entity.size._
import org.apache.openwhisk.core.invoker.InvokerReactive.{ActiveAck, LogsCollector}
import org.apache.openwhisk.http.Messages
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}
// States
sealed trait ContainerState
case object Uninitialized extends ContainerState
case object Starting extends ContainerState
case object Started extends ContainerState
case object Running extends ContainerState
case object Ready extends ContainerState
case object Pausing extends ContainerState
case object Paused extends ContainerState
case object Removing extends ContainerState
// Data
/** Base data type */
sealed abstract class ContainerData(val lastUsed: Instant, val memoryLimit: ByteSize, val activeActivationCount: Int) {
/** When ContainerProxy in this state is scheduled, it may result in a new state (ContainerData)*/
def nextRun(r: Run): ContainerData
/**
* Return Some(container) (for ContainerStarted instances) or None(for ContainerNotStarted instances)
* Useful for cases where all ContainerData instances are handled, vs cases where only ContainerStarted
* instances are handled */
def getContainer: Option[Container]
/** String to indicate the state of this container after scheduling */
val initingState: String
/** Inidicates whether this container can service additional activations */
def hasCapacity(): Boolean
}
/** abstract type to indicate an unstarted container */
sealed abstract class ContainerNotStarted(override val lastUsed: Instant,
override val memoryLimit: ByteSize,
override val activeActivationCount: Int)
extends ContainerData(lastUsed, memoryLimit, activeActivationCount) {
override def getContainer = None
override val initingState = "cold"
}
/** abstract type to indicate a started container */
sealed abstract class ContainerStarted(val container: Container,
override val lastUsed: Instant,
override val memoryLimit: ByteSize,
override val activeActivationCount: Int)
extends ContainerData(lastUsed, memoryLimit, activeActivationCount) {
override def getContainer = Some(container)
}
/** trait representing a container that is in use and (potentially) usable by subsequent or concurrent activations */
sealed abstract trait ContainerInUse {
val activeActivationCount: Int
val action: ExecutableWhiskAction
def hasCapacity() =
activeActivationCount < action.limits.concurrency.maxConcurrent
}
/** trait representing a container that is NOT in use and is usable by subsequent activation(s) */
sealed abstract trait ContainerNotInUse {
def hasCapacity() = true
}
/** type representing a cold (not running) container */
case class NoData(override val activeActivationCount: Int = 0)
extends ContainerNotStarted(Instant.EPOCH, 0.B, activeActivationCount)
with ContainerNotInUse {
override def nextRun(r: Run) = WarmingColdData(r.msg.user.namespace.name, r.action, Instant.now, 1)
}
/** type representing a cold (not running) container with specific memory allocation */
case class MemoryData(override val memoryLimit: ByteSize, override val activeActivationCount: Int = 0)
extends ContainerNotStarted(Instant.EPOCH, memoryLimit, activeActivationCount)
with ContainerNotInUse {
override def nextRun(r: Run) = WarmingColdData(r.msg.user.namespace.name, r.action, Instant.now, 1)
}
/** type representing a prewarmed (running, but unused) container (with a specific memory allocation) */
case class PreWarmedData(override val container: Container,
kind: String,
override val memoryLimit: ByteSize,
override val activeActivationCount: Int = 0)
extends ContainerStarted(container, Instant.EPOCH, memoryLimit, activeActivationCount)
with ContainerNotInUse {
override val initingState = "prewarmed"
override def nextRun(r: Run) =
WarmingData(container, r.msg.user.namespace.name, r.action, Instant.now, 1)
}
/** type representing a prewarm (running, but not used) container that is being initialized (for a specific action + invocation namespace) */
case class WarmingData(override val container: Container,
invocationNamespace: EntityName,
action: ExecutableWhiskAction,
override val lastUsed: Instant,
override val activeActivationCount: Int = 0)
extends ContainerStarted(container, lastUsed, action.limits.memory.megabytes.MB, activeActivationCount)
with ContainerInUse {
override val initingState = "warming"
override def nextRun(r: Run) = copy(lastUsed = Instant.now, activeActivationCount = activeActivationCount + 1)
}
/** type representing a cold (not yet running) container that is being initialized (for a specific action + invocation namespace) */
case class WarmingColdData(invocationNamespace: EntityName,
action: ExecutableWhiskAction,
override val lastUsed: Instant,
override val activeActivationCount: Int = 0)
extends ContainerNotStarted(lastUsed, action.limits.memory.megabytes.MB, activeActivationCount)
with ContainerInUse {
override val initingState = "warmingCold"
override def nextRun(r: Run) = copy(lastUsed = Instant.now, activeActivationCount = activeActivationCount + 1)
}
/** type representing a warm container that has already been in use (for a specific action + invocation namespace) */
case class WarmedData(override val container: Container,
invocationNamespace: EntityName,
action: ExecutableWhiskAction,
override val lastUsed: Instant,
override val activeActivationCount: Int = 0,
resumeRun: Option[Run] = None)
extends ContainerStarted(container, lastUsed, action.limits.memory.megabytes.MB, activeActivationCount)
with ContainerInUse {
override val initingState = "warmed"
override def nextRun(r: Run) = copy(lastUsed = Instant.now, activeActivationCount = activeActivationCount + 1)
//track the resuming run for easily referring to the action being resumed (it may fail and be resent)
def withoutResumeRun() = this.copy(resumeRun = None)
def withResumeRun(job: Run) = this.copy(resumeRun = Some(job))
}
// Events received by the actor
case class Start(exec: CodeExec[_], memoryLimit: ByteSize)
case class Run(action: ExecutableWhiskAction, msg: ActivationMessage, retryLogDeadline: Option[Deadline] = None)
case object Remove
case class HealthPingEnabled(enabled: Boolean)
// Events sent by the actor
case class NeedWork(data: ContainerData)
case object ContainerPaused
case object ContainerRemoved // when container is destroyed
case object RescheduleJob // job is sent back to parent and could not be processed because container is being destroyed
case class PreWarmCompleted(data: PreWarmedData)
case class InitCompleted(data: WarmedData)
case object RunCompleted
/**
* A proxy that wraps a Container. It is used to keep track of the lifecycle
* of a container and to guarantee a contract between the client of the container
* and the container itself.
*
* The contract is as follows:
* 1. If action.limits.concurrency.maxConcurrent == 1:
* Only one job is to be sent to the ContainerProxy at one time. ContainerProxy
* will delay all further jobs until a previous job has finished.
*
* 1a. The next job can be sent to the ContainerProxy after it indicates available
* capacity by sending NeedWork to its parent.
*
* 2. If action.limits.concurrency.maxConcurrent > 1:
* Parent must coordinate with ContainerProxy to attempt to send only data.action.limits.concurrency.maxConcurrent
* jobs for concurrent processing.
*
* Since the current job count is only periodically sent to parent, the number of jobs
* sent to ContainerProxy may exceed data.action.limits.concurrency.maxConcurrent,
* in which case jobs are buffered, so that only a max of action.limits.concurrency.maxConcurrent
* are ever sent into the container concurrently. Parent will NOT be signalled to send more jobs until
* buffered jobs are completed, but their order is not guaranteed.
*
* 2a. The next job can be sent to the ContainerProxy after ContainerProxy has "concurrent capacity",
* indicated by sending NeedWork to its parent.
*
* 3. A Remove message can be sent at any point in time. Like multiple jobs though,
* it will be delayed until the currently running job finishes.
*
* @constructor
* @param factory a function generating a Container
* @param sendActiveAck a function sending the activation via active ack
* @param storeActivation a function storing the activation in a persistent store
* @param unusedTimeout time after which the container is automatically thrown away
* @param pauseGrace time to wait for new work before pausing the container
*/
class ContainerProxy(factory: (TransactionId,
String,
ImageName,
Boolean,
ByteSize,
Int,
Option[ExecutableWhiskAction]) => Future[Container],
sendActiveAck: ActiveAck,
storeActivation: (TransactionId, WhiskActivation, UserContext) => Future[Any],
collectLogs: LogsCollector,
instance: InvokerInstanceId,
poolConfig: ContainerPoolConfig,
healtCheckConfig: ContainerProxyHealthCheckConfig,
unusedTimeout: FiniteDuration,
pauseGrace: FiniteDuration,
testTcp: Option[ActorRef])
extends FSM[ContainerState, ContainerData]
with Stash {
implicit val ec = context.system.dispatcher
implicit val logging = new AkkaLogging(context.system.log)
implicit val ac = context.system
implicit val materializer = ActorMaterializer()
var rescheduleJob = false // true iff actor receives a job but cannot process it because actor will destroy itself
var runBuffer = immutable.Queue.empty[Run] //does not retain order, but does manage jobs that would have pushed past action concurrency limit
//track buffer processing state to avoid extra transitions near end of buffer - this provides a pseudo-state between Running and Ready
var bufferProcessing = false
//keep a separate count to avoid confusion with ContainerState.activeActivationCount that is tracked/modified only in ContainerPool
var activeCount = 0;
var healthPingActor: Option[ActorRef] = None //setup after prewarm starts
val tcp: ActorRef = testTcp.getOrElse(IO(Tcp)) //allows to testing interaction with Tcp extension
startWith(Uninitialized, NoData())
when(Uninitialized) {
// pre warm a container (creates a stem cell container)
case Event(job: Start, _) =>
factory(
TransactionId.invokerWarmup,
ContainerProxy.containerName(instance, "prewarm", job.exec.kind),
job.exec.image,
job.exec.pull,
job.memoryLimit,
poolConfig.cpuShare(job.memoryLimit),
None)
.map(container => PreWarmCompleted(PreWarmedData(container, job.exec.kind, job.memoryLimit)))
.pipeTo(self)
goto(Starting)
// cold start (no container to reuse or available stem cell container)
case Event(job: Run, _) =>
implicit val transid = job.msg.transid
activeCount += 1
// create a new container
val container = factory(
job.msg.transid,
ContainerProxy.containerName(instance, job.msg.user.namespace.name.asString, job.action.name.asString),
job.action.exec.image,
job.action.exec.pull,
job.action.limits.memory.megabytes.MB,
poolConfig.cpuShare(job.action.limits.memory.megabytes.MB),
Some(job.action))
// container factory will either yield a new container ready to execute the action, or
// starting up the container failed; for the latter, it's either an internal error starting
// a container or a docker action that is not conforming to the required action API
container
.andThen {
case Success(container) =>
// the container is ready to accept an activation; register it as PreWarmed; this
// normalizes the life cycle for containers and their cleanup when activations fail
self ! PreWarmCompleted(
PreWarmedData(container, job.action.exec.kind, job.action.limits.memory.megabytes.MB, 1))
case Failure(t) =>
// the container did not come up cleanly, so disambiguate the failure mode and then cleanup
// the failure is either the system fault, or for docker actions, the application/developer fault
val response = t match {
case WhiskContainerStartupError(msg) => ActivationResponse.whiskError(msg)
case BlackboxStartupError(msg) => ActivationResponse.developerError(msg)
case _ => ActivationResponse.whiskError(Messages.resourceProvisionError)
}
val context = UserContext(job.msg.user)
// construct an appropriate activation and record it in the datastore,
// also update the feed and active ack; the container cleanup is queued
// implicitly via a FailureMessage which will be processed later when the state
// transitions to Running
val activation = ContainerProxy.constructWhiskActivation(job, None, Interval.zero, false, response)
sendActiveAck(
transid,
activation,
job.msg.blocking,
job.msg.rootControllerIndex,
job.msg.user.namespace.uuid,
CombinedCompletionAndResultMessage(transid, activation, instance))
storeActivation(transid, activation, context)
}
.flatMap { container =>
// now attempt to inject the user code and run the action
initializeAndRun(container, job)
.map(_ => RunCompleted)
}
.pipeTo(self)
goto(Running)
}
when(Starting) {
// container was successfully obtained
case Event(completed: PreWarmCompleted, _) =>
context.parent ! NeedWork(completed.data)
goto(Started) using completed.data
// container creation failed
case Event(_: FailureMessage, _) =>
context.parent ! ContainerRemoved
stop()
case _ => delay
}
when(Started) {
case Event(job: Run, data: PreWarmedData) =>
implicit val transid = job.msg.transid
activeCount += 1
initializeAndRun(data.container, job)
.map(_ => RunCompleted)
.pipeTo(self)
goto(Running) using PreWarmedData(data.container, data.kind, data.memoryLimit, 1)
case Event(Remove, data: PreWarmedData) => destroyContainer(data)
// prewarm container failed
case Event(_: FailureMessage, data: PreWarmedData) =>
MetricEmitter.emitCounterMetric(LoggingMarkers.INVOKER_CONTAINER_HEALTH_FAILED_PREWARM)
destroyContainer(data)
}
when(Running) {
// Intermediate state, we were able to start a container
// and we keep it in case we need to destroy it.
case Event(completed: PreWarmCompleted, _) => stay using completed.data
// Run during prewarm init (for concurrent > 1)
case Event(job: Run, data: PreWarmedData) =>
implicit val transid = job.msg.transid
logging.info(this, s"buffering for warming container ${data.container}; ${activeCount} activations in flight")
runBuffer = runBuffer.enqueue(job)
stay()
// Run during cold init (for concurrent > 1)
case Event(job: Run, _: NoData) =>
implicit val transid = job.msg.transid
logging.info(this, s"buffering for cold warming container ${activeCount} activations in flight")
runBuffer = runBuffer.enqueue(job)
stay()
// Init was successful
case Event(completed: InitCompleted, _: PreWarmedData) =>
processBuffer(completed.data.action, completed.data)
stay using completed.data
// Init was successful
case Event(data: WarmedData, _: PreWarmedData) =>
//in case concurrency supported, multiple runs can begin as soon as init is complete
context.parent ! NeedWork(data)
stay using data
// Run was successful
case Event(RunCompleted, data: WarmedData) =>
activeCount -= 1
val newData = data.withoutResumeRun()
//if there are items in runbuffer, process them if there is capacity, and stay; otherwise if we have any pending activations, also stay
if (requestWork(data) || activeCount > 0) {
stay using newData
} else {
goto(Ready) using newData
}
case Event(job: Run, data: WarmedData)
if activeCount >= data.action.limits.concurrency.maxConcurrent && !rescheduleJob => //if we are over concurrency limit, and not a failure on resume
implicit val transid = job.msg.transid
logging.warn(this, s"buffering for maxed warm container ${data.container}; ${activeCount} activations in flight")
runBuffer = runBuffer.enqueue(job)
stay()
case Event(job: Run, data: WarmedData)
if activeCount < data.action.limits.concurrency.maxConcurrent && !rescheduleJob => //if there was a delay, and not a failure on resume, skip the run
activeCount += 1
implicit val transid = job.msg.transid
bufferProcessing = false //reset buffer processing state
initializeAndRun(data.container, job)
.map(_ => RunCompleted)
.pipeTo(self)
stay() using data
//ContainerHealthError should cause rescheduling of the job
case Event(FailureMessage(e: ContainerHealthError), data: WarmedData) =>
implicit val tid = e.tid
MetricEmitter.emitCounterMetric(LoggingMarkers.INVOKER_CONTAINER_HEALTH_FAILED_WARM)
//resend to self will send to parent once we get to Removing state
val newData = data.resumeRun
.map { run =>
logging.warn(this, "Ready warm container unhealthy, will retry activation.")
self ! run
data.withoutResumeRun()
}
.getOrElse(data)
rescheduleJob = true
rejectBuffered()
destroyContainer(newData)
// Failed after /init (the first run failed)
case Event(_: FailureMessage, data: PreWarmedData) =>
activeCount -= 1
destroyContainer(data)
// Failed for a subsequent /run
case Event(_: FailureMessage, data: WarmedData) =>
activeCount -= 1
destroyContainer(data)
// Failed at getting a container for a cold-start run
case Event(_: FailureMessage, _) =>
activeCount -= 1
context.parent ! ContainerRemoved
rejectBuffered()
stop()
case _ => delay
}
when(Ready, stateTimeout = pauseGrace) {
case Event(job: Run, data: WarmedData) =>
implicit val transid = job.msg.transid
activeCount += 1
val newData = data.withResumeRun(job)
initializeAndRun(data.container, job, true)
.map(_ => RunCompleted)
.pipeTo(self)
goto(Running) using newData
// pause grace timed out
case Event(StateTimeout, data: WarmedData) =>
data.container.suspend()(TransactionId.invokerNanny).map(_ => ContainerPaused).pipeTo(self)
goto(Pausing)
case Event(Remove, data: WarmedData) => destroyContainer(data)
// warm container failed
case Event(_: FailureMessage, data: WarmedData) =>
destroyContainer(data)
}
when(Pausing) {
case Event(ContainerPaused, data: WarmedData) => goto(Paused)
case Event(_: FailureMessage, data: WarmedData) => destroyContainer(data)
case _ => delay
}
when(Paused, stateTimeout = unusedTimeout) {
case Event(job: Run, data: WarmedData) =>
implicit val transid = job.msg.transid
activeCount += 1
val newData = data.withResumeRun(job)
data.container
.resume()
.andThen {
// Sending the message to self on a failure will cause the message
// to ultimately be sent back to the parent (which will retry it)
// when container removal is done.
case Failure(_) =>
rescheduleJob = true
self ! job
}
.flatMap(_ => initializeAndRun(data.container, job, true))
.map(_ => RunCompleted)
.pipeTo(self)
goto(Running) using newData
// container is reclaimed by the pool or it has become too old
case Event(StateTimeout | Remove, data: WarmedData) =>
rescheduleJob = true // to supress sending message to the pool and not double count
destroyContainer(data)
}
when(Removing) {
case Event(job: Run, _) =>
// Send the job back to the pool to be rescheduled
context.parent ! job
stay
case Event(ContainerRemoved, _) => stop()
case Event(_: FailureMessage, _) => stop()
}
// Unstash all messages stashed while in intermediate state
onTransition {
case _ -> Started =>
if (healtCheckConfig.enabled) {
logging.debug(this, "enabling health ping on Started")
nextStateData.getContainer.foreach { c =>
enableHealthPing(c)
}
}
unstashAll()
case _ -> Running =>
if (healtCheckConfig.enabled && healthPingActor.isDefined) {
logging.debug(this, "disabling health ping on Running")
disableHealthPing()
}
case _ -> Ready =>
unstashAll()
case _ -> Paused =>
unstashAll()
case _ -> Removing =>
unstashAll()
}
initialize()
/** Either process runbuffer or signal parent to send work; return true if runbuffer is being processed */
def requestWork(newData: WarmedData): Boolean = {
//if there is concurrency capacity, process runbuffer, signal NeedWork, or both
if (activeCount < newData.action.limits.concurrency.maxConcurrent) {
if (runBuffer.nonEmpty) {
//only request work once, if available larger than runbuffer
val available = newData.action.limits.concurrency.maxConcurrent - activeCount
val needWork: Boolean = available > runBuffer.size
processBuffer(newData.action, newData)
if (needWork) {
//after buffer processing, then send NeedWork
context.parent ! NeedWork(newData)
}
true
} else {
context.parent ! NeedWork(newData)
bufferProcessing //true in case buffer is still in process
}
} else {
false
}
}
/** Process buffered items up to the capacity of action concurrency config */
def processBuffer(action: ExecutableWhiskAction, newData: ContainerData) = {
//send as many buffered as possible
val available = action.limits.concurrency.maxConcurrent - activeCount
logging.info(this, s"resending up to ${available} from ${runBuffer.length} buffered jobs")
1 to available foreach { _ =>
runBuffer.dequeueOption match {
case Some((run, q)) =>
self ! run
bufferProcessing = true
runBuffer = q
case _ =>
}
}
}
/** Delays all incoming messages until unstashAll() is called */
def delay = {
stash()
stay
}
/**
* Destroys the container after unpausing it if needed. Can be used
* as a state progression as it goes to Removing.
*
* @param newData the ContainerStarted which container will be destroyed
*/
def destroyContainer(newData: ContainerStarted) = {
val container = newData.container
if (!rescheduleJob) {
context.parent ! ContainerRemoved
} else {
context.parent ! RescheduleJob
}
rejectBuffered()
val unpause = stateName match {
case Paused => container.resume()(TransactionId.invokerNanny)
case _ => Future.successful(())
}
unpause
.flatMap(_ => container.destroy()(TransactionId.invokerNanny))
.map(_ => ContainerRemoved)
.pipeTo(self)
println("removing")
goto(Removing) using newData
}
/**
* Return any buffered jobs to parent, in case buffer is not empty at removal/error time.
*/
def rejectBuffered() = {
//resend any buffered items on container removal
if (runBuffer.nonEmpty) {
logging.info(this, s"resending ${runBuffer.size} buffered jobs to parent on container removal")
runBuffer.foreach(context.parent ! _)
runBuffer = immutable.Queue.empty[Run]
}
}
private def enableHealthPing(c: Container) = {
val hpa = healthPingActor.getOrElse {
logging.info(this, s"creating health ping actor for ${c.addr.asString()}")
val hp = context.actorOf(
TCPPingClient
.props(tcp, c.toString(), healtCheckConfig, new InetSocketAddress(c.addr.host, c.addr.port)))
healthPingActor = Some(hp)
hp
}
hpa ! HealthPingEnabled(true)
}
private def disableHealthPing() = {
healthPingActor.foreach(_ ! HealthPingEnabled(false))
}
/**
* Runs the job, initialize first if necessary.
* Completes the job by:
* 1. sending an activate ack,
* 2. fetching the logs for the run,
* 3. indicating the resource is free to the parent pool,
* 4. recording the result to the data store
*
* @param container the container to run the job on
* @param job the job to run
* @return a future completing after logs have been collected and
* added to the WhiskActivation
*/
def initializeAndRun(container: Container, job: Run, reschedule: Boolean = false)(
implicit tid: TransactionId): Future[WhiskActivation] = {
val actionTimeout = job.action.limits.timeout.duration
val (env, parameters) = ContainerProxy.partitionArguments(job.msg.content, job.msg.initArgs)
val environment = Map(
"namespace" -> job.msg.user.namespace.name.toJson,
"action_name" -> job.msg.action.qualifiedNameWithLeadingSlash.toJson,
"action_version" -> job.msg.action.version.toJson,
"activation_id" -> job.msg.activationId.toString.toJson,
"transaction_id" -> job.msg.transid.id.toJson)
// if the action requests the api key to be injected into the action context, add it here;
// treat a missing annotation as requesting the api key for backward compatibility
val authEnvironment = {
if (job.action.annotations.isTruthy(Annotations.ProvideApiKeyAnnotationName, valueForNonExistent = true)) {
job.msg.user.authkey.toEnvironment.fields
} else Map.empty
}
// Only initialize iff we haven't yet warmed the container
val initialize = stateData match {
case data: WarmedData =>
Future.successful(None)
case _ =>
val owEnv = (authEnvironment ++ environment + ("deadline" -> (Instant.now.toEpochMilli + actionTimeout.toMillis).toString.toJson)) map {
case (key, value) => "__OW_" + key.toUpperCase -> value
}
container
.initialize(
job.action.containerInitializer(env ++ owEnv),
actionTimeout,
job.action.limits.concurrency.maxConcurrent)
.map(Some(_))
}
val activation: Future[WhiskActivation] = initialize
.flatMap { initInterval =>
//immediately setup warmedData for use (before first execution) so that concurrent actions can use it asap
if (initInterval.isDefined) {
self ! InitCompleted(WarmedData(container, job.msg.user.namespace.name, job.action, Instant.now, 1))
}
val env = authEnvironment ++ environment ++ Map(
// compute deadline on invoker side avoids discrepancies inside container
// but potentially under-estimates actual deadline
"deadline" -> (Instant.now.toEpochMilli + actionTimeout.toMillis).toString.toJson)
container
.run(
parameters,
env.toJson.asJsObject,
actionTimeout,
job.action.limits.concurrency.maxConcurrent,
reschedule)(job.msg.transid)
.map {
case (runInterval, response) =>
val initRunInterval = initInterval
.map(i => Interval(runInterval.start.minusMillis(i.duration.toMillis), runInterval.end))
.getOrElse(runInterval)
ContainerProxy.constructWhiskActivation(
job,
initInterval,
initRunInterval,
runInterval.duration >= actionTimeout,
response)
}
}
.recoverWith {
case h: ContainerHealthError =>
Future.failed(h)
case InitializationError(interval, response) =>
Future.successful(
ContainerProxy
.constructWhiskActivation(job, Some(interval), interval, interval.duration >= actionTimeout, response))
case t =>
// Actually, this should never happen - but we want to make sure to not miss a problem
logging.error(this, s"caught unexpected error while running activation: ${t}")
Future.successful(
ContainerProxy.constructWhiskActivation(
job,
None,
Interval.zero,
false,
ActivationResponse.whiskError(Messages.abnormalRun)))
}
val splitAckMessagesPendingLogCollection = collectLogs.logsToBeCollected(job.action)
// Sending an active ack is an asynchronous operation. The result is forwarded as soon as
// possible for blocking activations so that dependent activations can be scheduled. The
// completion message which frees a load balancer slot is sent after the active ack future
// completes to ensure proper ordering.
val sendResult = if (job.msg.blocking) {
activation.map { result =>
val msg =
if (splitAckMessagesPendingLogCollection) ResultMessage(tid, result)
else CombinedCompletionAndResultMessage(tid, result, instance)
sendActiveAck(tid, result, job.msg.blocking, job.msg.rootControllerIndex, job.msg.user.namespace.uuid, msg)
}
} else {
// For non-blocking request, do not forward the result.
if (splitAckMessagesPendingLogCollection) Future.successful(())
else
activation.map { result =>
val msg = CompletionMessage(tid, result, instance)
sendActiveAck(tid, result, job.msg.blocking, job.msg.rootControllerIndex, job.msg.user.namespace.uuid, msg)
}
}
val context = UserContext(job.msg.user)
// Adds logs to the raw activation.
val activationWithLogs: Future[Either[ActivationLogReadingError, WhiskActivation]] = activation
.flatMap { activation =>
// Skips log collection entirely, if the limit is set to 0
if (!splitAckMessagesPendingLogCollection) {
Future.successful(Right(activation))
} else {
val start = tid.started(this, LoggingMarkers.INVOKER_COLLECT_LOGS, logLevel = InfoLevel)
collectLogs(tid, job.msg.user, activation, container, job.action)
.andThen {
case Success(_) => tid.finished(this, start)
case Failure(t) => tid.failed(this, start, s"reading logs failed: $t")
}
.map(logs => Right(activation.withLogs(logs)))
.recover {
case LogCollectingException(logs) =>
Left(ActivationLogReadingError(activation.withLogs(logs)))
case _ =>
Left(ActivationLogReadingError(activation.withLogs(ActivationLogs(Vector(Messages.logFailure)))))
}
}
}
activationWithLogs
.map(_.fold(_.activation, identity))
.foreach { activation =>
// Sending the completion message to the controller after the active ack ensures proper ordering
// (result is received before the completion message for blocking invokes).
if (splitAckMessagesPendingLogCollection) {
sendResult.onComplete(
_ =>
sendActiveAck(
tid,
activation,
job.msg.blocking,
job.msg.rootControllerIndex,
job.msg.user.namespace.uuid,
CompletionMessage(tid, activation, instance)))
}
// Storing the record. Entirely asynchronous and not waited upon.
storeActivation(tid, activation, context)
}
// Disambiguate activation errors and transform the Either into a failed/successful Future respectively.
activationWithLogs.flatMap {
case Right(act) if !act.response.isSuccess && !act.response.isApplicationError =>
Future.failed(ActivationUnsuccessfulError(act))
case Left(error) => Future.failed(error)
case Right(act) => Future.successful(act)
}
}
}
final case class ContainerProxyTimeoutConfig(idleContainer: FiniteDuration, pauseGrace: FiniteDuration)
final case class ContainerProxyHealthCheckConfig(enabled: Boolean, checkPeriod: FiniteDuration, maxFails: Int)
object ContainerProxy {
def props(factory: (TransactionId,
String,
ImageName,
Boolean,
ByteSize,
Int,
Option[ExecutableWhiskAction]) => Future[Container],
ack: ActiveAck,
store: (TransactionId, WhiskActivation, UserContext) => Future[Any],
collectLogs: LogsCollector,
instance: InvokerInstanceId,
poolConfig: ContainerPoolConfig,
healthCheckConfig: ContainerProxyHealthCheckConfig =
loadConfigOrThrow[ContainerProxyHealthCheckConfig](ConfigKeys.containerProxyHealth),
unusedTimeout: FiniteDuration = timeouts.idleContainer,
pauseGrace: FiniteDuration = timeouts.pauseGrace,
tcp: Option[ActorRef] = None) =
Props(
new ContainerProxy(
factory,
ack,
store,
collectLogs,
instance,
poolConfig,
healthCheckConfig,
unusedTimeout,
pauseGrace,
tcp))
// Needs to be thread-safe as it's used by multiple proxies concurrently.
private val containerCount = new Counter
val timeouts = loadConfigOrThrow[ContainerProxyTimeoutConfig](ConfigKeys.containerProxyTimeouts)
/**
* Generates a unique container name.
*
* @param prefix the container name's prefix
* @param suffix the container name's suffix
* @return a unique container name
*/
def containerName(instance: InvokerInstanceId, prefix: String, suffix: String): String = {
def isAllowed(c: Char): Boolean = c.isLetterOrDigit || c == '_'
val sanitizedPrefix = prefix.filter(isAllowed)
val sanitizedSuffix = suffix.filter(isAllowed)
s"${ContainerFactory.containerNamePrefix(instance)}_${containerCount.next()}_${sanitizedPrefix}_${sanitizedSuffix}"
}
/**
* Creates a WhiskActivation ready to be sent via active ack.
*
* @param job the job that was executed
* @param interval the time it took to execute the job
* @param response the response to return to the user
* @return a WhiskActivation to be sent to the user
*/
def constructWhiskActivation(job: Run,
initInterval: Option[Interval],
totalInterval: Interval,
isTimeout: Boolean,
response: ActivationResponse) = {
val causedBy = Some {
if (job.msg.causedBySequence) {
Parameters(WhiskActivation.causedByAnnotation, JsString(Exec.SEQUENCE))
} else {
// emit the internal system hold time as the 'wait' time, but only for non-sequence
// actions, since the transid start time for a sequence does not correspond
// with a specific component of the activation but the entire sequence;
// it will require some work to generate a new transaction id for a sequence
// component - however, because the trace of activations is recorded in the parent
// sequence, a client can determine the queue time for sequences that way
val end = initInterval.map(_.start).getOrElse(totalInterval.start)
Parameters(
WhiskActivation.waitTimeAnnotation,
Interval(job.msg.transid.meta.start, end).duration.toMillis.toJson)
}
}
val initTime = {
initInterval.map(initTime => Parameters(WhiskActivation.initTimeAnnotation, initTime.duration.toMillis.toJson))
}
val binding =
job.msg.action.binding.map(f => Parameters(WhiskActivation.bindingAnnotation, JsString(f.asString)))
WhiskActivation(
activationId = job.msg.activationId,
namespace = job.msg.user.namespace.name.toPath,
subject = job.msg.user.subject,
cause = job.msg.cause,
name = job.action.name,
version = job.action.version,
start = totalInterval.start,
end = totalInterval.end,
duration = Some(totalInterval.duration.toMillis),
response = response,
annotations = {
Parameters(WhiskActivation.limitsAnnotation, job.action.limits.toJson) ++
Parameters(WhiskActivation.pathAnnotation, JsString(job.action.fullyQualifiedName(false).asString)) ++
Parameters(WhiskActivation.kindAnnotation, JsString(job.action.exec.kind)) ++
Parameters(WhiskActivation.timeoutAnnotation, JsBoolean(isTimeout)) ++
causedBy ++ initTime ++ binding
})
}
/**
* Partitions the activation arguments into two JsObject instances. The first is exported as intended for export
* by the action runtime to the environment. The second is passed on as arguments to the action.
*
* @param content the activation arguments
* @param initArgs set of parameters to treat as initialization arguments
* @return A partition of the arguments into an environment variables map and the JsObject argument to the action
*/
def partitionArguments(content: Option[JsObject], initArgs: Set[String]): (Map[String, JsValue], JsObject) = {
content match {
case None => (Map.empty, JsObject.empty)
case Some(js) if initArgs.isEmpty => (Map.empty, js)
case Some(js) =>
val (env, args) = js.fields.partition(k => initArgs.contains(k._1))
(env, JsObject(args))
}
}
}
object TCPPingClient {
def props(tcp: ActorRef, containerId: String, config: ContainerProxyHealthCheckConfig, remote: InetSocketAddress) =
Props(new TCPPingClient(tcp, containerId, remote, config))
}
class TCPPingClient(tcp: ActorRef,
containerId: String,
remote: InetSocketAddress,
config: ContainerProxyHealthCheckConfig)
extends Actor {
implicit val logging = new AkkaLogging(context.system.log)
implicit val ec = context.system.dispatcher
implicit var healthPingTx = TransactionId.actionHealthPing
case object HealthPingSend
var scheduledPing: Option[Cancellable] = None
var failedCount = 0
val addressString = s"${remote.getHostString}:${remote.getPort}"
restartPing()
private def restartPing() = {
cancelPing() //just in case restart is called twice
scheduledPing = Some(
context.system.scheduler.schedule(config.checkPeriod, config.checkPeriod, self, HealthPingSend))
}
private def cancelPing() = {
scheduledPing.foreach(_.cancel())
}
def receive = {
case HealthPingEnabled(enabled) =>
if (enabled) {
restartPing()
} else {
cancelPing()
}
case HealthPingSend =>
healthPingTx = TransactionId(systemPrefix + "actionHealth") //reset the tx id each iteration
tcp ! Connect(remote)
case CommandFailed(_: Connect) =>
failedCount += 1
if (failedCount == config.maxFails) {
logging.error(
this,
s"Failed health connection to $containerId ($addressString) $failedCount times - exceeded max ${config.maxFails} failures")
//destroy this container since we cannot communicate with it
context.parent ! FailureMessage(
new SocketException(s"Health connection to $containerId ($addressString) failed $failedCount times"))
cancelPing()
context.stop(self)
} else {
logging.warn(this, s"Failed health connection to $containerId ($addressString) $failedCount times")
}
case Connected(_, _) =>
sender() ! Close
if (failedCount > 0) {
//reset in case of temp failure
logging.info(
this,
s"Succeeded health connection to $containerId ($addressString) after $failedCount previous failures")
failedCount = 0
} else {
logging.debug(this, s"Succeeded health connection to $containerId ($addressString)")
}
}
}
/** Indicates that something went wrong with an activation and the container should be removed */
trait ActivationError extends Exception {
val activation: WhiskActivation
}
/** Indicates an activation with a non-successful response */
case class ActivationUnsuccessfulError(activation: WhiskActivation) extends ActivationError
/** Indicates reading logs for an activation failed (terminally, truncated) */
case class ActivationLogReadingError(activation: WhiskActivation) extends ActivationError
|
rabbah/openwhisk
|
core/invoker/src/main/scala/org/apache/openwhisk/core/containerpool/ContainerProxy.scala
|
Scala
|
apache-2.0
| 43,785 |
/* sbt -- Simple Build Tool
* Copyright 2009 Mark Harrah
*/
package xsbt
package datatype
import java.io.File
import sbt.IO.readLines
import Function.tupled
import java.util.regex.Pattern
class DatatypeParser extends NotNull
{
val WhitespacePattern = Pattern compile """\\s*"""//(?>\\#(.*))?"""
val EnumPattern = Pattern compile """enum\\s+(\\S+)\\s*:\\s*(.+)"""
val ClassPattern = Pattern compile """(\\t*)(\\S+)\\s*"""
val MemberPattern = Pattern compile """(\\t*)(\\S+)\\s*:\\s*([^\\s*]+)([*]?)"""
def processWhitespaceLine(l: Array[String], line: Int) = new WhitespaceLine(l.mkString, line)
def processEnumLine(l: Array[String], line: Int) = new EnumLine(l(0), l(1).split(",").map(_.trim), line)
def processClassLine(l: Array[String], line: Int) = new ClassLine(l(1), l(0).length, line)
def processMemberLine(l: Array[String], line: Int) = new MemberLine(l(1), l(2), l(3).isEmpty, l(0).length, line)
def error(l: Line, msg: String): Nothing = error(l.line, msg)
def error(line: Int, msg: String): Nothing = throw new RuntimeException("{line " + line + "} " + msg)
def parseFile(file: File): Seq[Definition] =
{
val (open, closed) = ( (Array[ClassDef](), List[Definition]()) /: parseLines(file) ) {
case ((open, defs), line) => processLine(open, defs, line)
}
open ++ closed
}
def parseLines(file: File): Seq[Line] = readLines(file).zipWithIndex.map(tupled(parseLine))
def parseLine(line: String, lineNumber: Int): Line =
matchPattern(WhitespacePattern -> processWhitespaceLine _, EnumPattern -> processEnumLine _,
ClassPattern -> processClassLine _, MemberPattern -> processMemberLine _)(line, lineNumber)
type Handler = (Array[String], Int) => Line
def matchPattern(patterns: (Pattern, Handler)*)(line: String, lineNumber: Int): Line =
patterns.flatMap { case (pattern, f) => matchPattern(pattern, f)(line, lineNumber) }.headOption.getOrElse {
error(lineNumber, "Invalid line, expected enum, class, or member definition")
}
def matchPattern(pattern: Pattern, f: Handler)(line: String, lineNumber: Int): Option[Line] =
{
val matcher = pattern.matcher(line)
if(matcher.matches)
{
val count = matcher.groupCount
val groups = (for(i <- 1 to count) yield matcher.group(i)).toArray[String]
Some( f(groups, lineNumber) )
}
else
None
}
def processLine(open: Array[ClassDef], definitions: List[Definition], line: Line): (Array[ClassDef], List[Definition]) =
{
line match
{
case w: WhitespaceLine => (open, definitions)
case e: EnumLine => (Array(), new EnumDef(e.name, e.members) :: open.toList ::: definitions)
case m: MemberLine =>
if(m.level == 0 || m.level > open.length) error(m, "Member must be declared in a class definition")
else withCurrent(open, definitions, m.level) { c => List( c + m) }
case c: ClassLine =>
if(c.level == 0) (Array( new ClassDef(c.name, None, Nil) ), open.toList ::: definitions)
else if(c.level > open.length) error(c, "Class must be declared as top level or as a subclass")
else withCurrent(open, definitions, c.level) { p => p :: new ClassDef(c.name, Some(p), Nil) :: Nil}
}
}
private def withCurrent(open: Array[ClassDef], definitions: List[Definition], level: Int)(onCurrent: ClassDef => Seq[ClassDef]): (Array[ClassDef], List[Definition]) =
{
require(0 < level && level <= open.length)
val closed = open.drop(level).toList
val newOpen = open.take(level - 1) ++ onCurrent(open(level - 1))
( newOpen.toArray, closed ::: definitions )
}
}
|
harrah/xsbt
|
util/datatype/src/main/scala/xsbt/datatype/DatatypeParser.scala
|
Scala
|
bsd-3-clause
| 3,479 |
/*
* Copyright (c) 2019. Yuriy Stul
*/
package com.stulsoft.kafka.admin
import java.time.Duration
import java.util.Properties
import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.clients.consumer.KafkaConsumer
/**
* @author Yuriy Stul
*/
class AdminConsumer(val servers: String) extends LazyLogging {
val props = new Properties
props.put("bootstrap.servers", servers)
props.put("group.id", "admin_consumer")
props.put("enable.auto.commit", "false")
props.put("auto.offset.reset", "earliest")
props.put("auto.commit.interval.ms", "1000")
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
def calculateNumberOfMessages(topic: String): Int = {
try {
val consumer = new KafkaConsumer[String, String](props)
consumer.subscribe(java.util.Collections.singleton(topic))
val records = consumer.poll(Duration.ofMillis(10000))
records.count()
} catch {
case e: Exception =>
logger.error(s"Error: ${e.getMessage}")
-1
}
}
}
|
ysden123/poc
|
pkafka/kafka-admin/src/main/scala/com/stulsoft/kafka/admin/AdminConsumer.scala
|
Scala
|
mit
| 1,149 |
package org.jetbrains.plugins.scala
package lang.psi.light.scala
import com.intellij.psi.impl.light.LightElement
import com.intellij.psi.{PsiAnnotation, PsiElement}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.base.ScModifierList
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScAnnotation
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParamClause
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, TypeAliasSignature}
/**
* @author Alefas
* @since 04/04/14.
*/
class ScLightTypeAliasDefinition(s: TypeAliasSignature, val ta: ScTypeAliasDefinition)
extends LightElement(ta.getManager, ta.getLanguage) with ScTypeAliasDefinition {
setNavigationElement(ta)
override def nameId: PsiElement = ta.nameId
override def upperBound: TypeResult[ScType] = Success(s.upperBound, Some(this))
override def lowerBound: TypeResult[ScType] = Success(s.lowerBound, Some(this))
override def aliasedType: TypeResult[ScType] = Success(s.lowerBound, Some(this))
override def aliasedTypeElement: Option[ScTypeElement] = ta.aliasedTypeElement
override def getOriginalElement: PsiElement = super[ScTypeAliasDefinition].getOriginalElement
override def toString: String = ta.toString
override def setModifierProperty(name: String, value: Boolean): Unit = ta.setModifierProperty(name, value)
override def hasFinalModifier: Boolean = ta.hasFinalModifier
override def hasAbstractModifier: Boolean = ta.hasAbstractModifier
override def hasModifierPropertyScala(name: String): Boolean = ta.hasModifierPropertyScala(name)
override def getModifierList: ScModifierList = ta.getModifierList
override def getAnnotations: Array[PsiAnnotation] = ta.getAnnotations
override def getApplicableAnnotations: Array[PsiAnnotation] = ta.getApplicableAnnotations
override def findAnnotation(qualifiedName: String): PsiAnnotation = ta.findAnnotation(qualifiedName)
override def addAnnotation(qualifiedName: String): PsiAnnotation = ta.addAnnotation(qualifiedName)
override def hasAnnotation(qualifiedName: String): Boolean = ta.hasAnnotation(qualifiedName)
override def annotations: Seq[ScAnnotation] = ta.annotations
override def navigate(requestFocus: Boolean): Unit = ta.navigate(requestFocus)
override def canNavigate: Boolean = ta.canNavigate
override def canNavigateToSource: Boolean = ta.canNavigateToSource
override def typeParametersClause: Option[ScTypeParamClause] =
ta.typeParametersClause.map(new ScLightTypeParamClause(s.typeParams, _))
override protected def findChildrenByClassScala[T >: Null <: ScalaPsiElement](clazz: Class[T]): Array[T] =
throw new UnsupportedOperationException("Operation on light element")
override protected def findChildByClassScala[T >: Null <: ScalaPsiElement](clazz: Class[T]): T =
throw new UnsupportedOperationException("Operation on light element")
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/light/scala/ScLightTypeAliasDefinition.scala
|
Scala
|
apache-2.0
| 3,192 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.util
import java.util.Locale
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.encoders.OuterScopes
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.SubExprUtils._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.optimizer.OptimizeUpdateFields
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.streaming.StreamingRelationV2
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.connector.catalog._
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.catalog.TableChange.{AddColumn, After, ColumnChange, ColumnPosition, DeleteColumn, RenameColumn, UpdateColumnComment, UpdateColumnNullability, UpdateColumnPosition, UpdateColumnType}
import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform}
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.{PartitionOverwriteMode, StoreAssignmentPolicy}
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
/**
* A trivial [[Analyzer]] with a dummy [[SessionCatalog]] and [[EmptyFunctionRegistry]].
* Used for testing when all relations are already filled in and the analyzer needs only
* to resolve attribute references.
*/
object SimpleAnalyzer extends Analyzer(
new CatalogManager(
FakeV2SessionCatalog,
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry) {
override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = {}
})) {
override def resolver: Resolver = caseSensitiveResolution
}
object FakeV2SessionCatalog extends TableCatalog {
private def fail() = throw new UnsupportedOperationException
override def listTables(namespace: Array[String]): Array[Identifier] = fail()
override def loadTable(ident: Identifier): Table = {
throw new NoSuchTableException(ident.toString)
}
override def createTable(
ident: Identifier,
schema: StructType,
partitions: Array[Transform],
properties: util.Map[String, String]): Table = fail()
override def alterTable(ident: Identifier, changes: TableChange*): Table = fail()
override def dropTable(ident: Identifier): Boolean = fail()
override def renameTable(oldIdent: Identifier, newIdent: Identifier): Unit = fail()
override def initialize(name: String, options: CaseInsensitiveStringMap): Unit = fail()
override def name(): String = CatalogManager.SESSION_CATALOG_NAME
}
/**
* Provides a way to keep state during the analysis, this enables us to decouple the concerns
* of analysis environment from the catalog.
* The state that is kept here is per-query.
*
* Note this is thread local.
*
* @param catalogAndNamespace The catalog and namespace used in the view resolution. This overrides
* the current catalog and namespace when resolving relations inside
* views.
* @param nestedViewDepth The nested depth in the view resolution, this enables us to limit the
* depth of nested views.
* @param relationCache A mapping from qualified table names to resolved relations. This can ensure
* that the table is resolved only once if a table is used multiple times
* in a query.
*/
case class AnalysisContext(
catalogAndNamespace: Seq[String] = Nil,
nestedViewDepth: Int = 0,
relationCache: mutable.Map[Seq[String], LogicalPlan] = mutable.Map.empty)
object AnalysisContext {
private val value = new ThreadLocal[AnalysisContext]() {
override def initialValue: AnalysisContext = AnalysisContext()
}
def get: AnalysisContext = value.get()
def reset(): Unit = value.remove()
private def set(context: AnalysisContext): Unit = value.set(context)
def withAnalysisContext[A](catalogAndNamespace: Seq[String])(f: => A): A = {
val originContext = value.get()
val context = AnalysisContext(
catalogAndNamespace, originContext.nestedViewDepth + 1, originContext.relationCache)
set(context)
try f finally { set(originContext) }
}
}
/**
* Provides a logical query plan analyzer, which translates [[UnresolvedAttribute]]s and
* [[UnresolvedRelation]]s into fully typed objects using information in a [[SessionCatalog]].
*/
class Analyzer(override val catalogManager: CatalogManager)
extends RuleExecutor[LogicalPlan] with CheckAnalysis with LookupCatalog with SQLConfHelper {
private val v1SessionCatalog: SessionCatalog = catalogManager.v1SessionCatalog
override protected def isPlanIntegral(plan: LogicalPlan): Boolean = {
!Utils.isTesting || LogicalPlanIntegrity.checkIfExprIdsAreGloballyUnique(plan)
}
override def isView(nameParts: Seq[String]): Boolean = v1SessionCatalog.isView(nameParts)
// Only for tests.
def this(catalog: SessionCatalog) = {
this(new CatalogManager(FakeV2SessionCatalog, catalog))
}
def executeAndCheck(plan: LogicalPlan, tracker: QueryPlanningTracker): LogicalPlan = {
AnalysisHelper.markInAnalyzer {
val analyzed = executeAndTrack(plan, tracker)
try {
checkAnalysis(analyzed)
analyzed
} catch {
case e: AnalysisException =>
val ae = new AnalysisException(e.message, e.line, e.startPosition, Option(analyzed))
ae.setStackTrace(e.getStackTrace)
throw ae
}
}
}
override def execute(plan: LogicalPlan): LogicalPlan = {
AnalysisContext.reset()
try {
executeSameContext(plan)
} finally {
AnalysisContext.reset()
}
}
private def executeSameContext(plan: LogicalPlan): LogicalPlan = super.execute(plan)
def resolver: Resolver = conf.resolver
/**
* If the plan cannot be resolved within maxIterations, analyzer will throw exception to inform
* user to increase the value of SQLConf.ANALYZER_MAX_ITERATIONS.
*/
protected def fixedPoint =
FixedPoint(
conf.analyzerMaxIterations,
errorOnExceed = true,
maxIterationsSetting = SQLConf.ANALYZER_MAX_ITERATIONS.key)
/**
* Override to provide additional rules for the "Resolution" batch.
*/
val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide rules to do post-hoc resolution. Note that these rules will be executed
* in an individual batch. This batch is to run right after the normal resolution batch and
* execute its rules in one pass.
*/
val postHocResolutionRules: Seq[Rule[LogicalPlan]] = Nil
override def batches: Seq[Batch] = Seq(
Batch("Substitution", fixedPoint,
// This rule optimizes `UpdateFields` expression chains so looks more like optimization rule.
// However, when manipulating deeply nested schema, `UpdateFields` expression tree could be
// very complex and make analysis impossible. Thus we need to optimize `UpdateFields` early
// at the beginning of analysis.
OptimizeUpdateFields,
CTESubstitution,
WindowsSubstitution,
EliminateUnions,
SubstituteUnresolvedOrdinals),
Batch("Disable Hints", Once,
new ResolveHints.DisableHints),
Batch("Hints", fixedPoint,
ResolveHints.ResolveJoinStrategyHints,
ResolveHints.ResolveCoalesceHints),
Batch("Simple Sanity Check", Once,
LookupFunctions),
Batch("Resolution", fixedPoint,
ResolveTableValuedFunctions ::
ResolveNamespace(catalogManager) ::
new ResolveCatalogs(catalogManager) ::
ResolveInsertInto ::
ResolveRelations ::
ResolveTables ::
ResolvePartitionSpec ::
AddMetadataColumns ::
ResolveReferences ::
ResolveCreateNamedStruct ::
ResolveDeserializer ::
ResolveNewInstance ::
ResolveUpCast ::
ResolveGroupingAnalytics ::
ResolvePivot ::
ResolveOrdinalInOrderByAndGroupBy ::
ResolveAggAliasInGroupBy ::
ResolveMissingReferences ::
ExtractGenerator ::
ResolveGenerate ::
ResolveFunctions ::
ResolveAliases ::
ResolveSubquery ::
ResolveSubqueryColumnAliases ::
ResolveWindowOrder ::
ResolveWindowFrame ::
ResolveNaturalAndUsingJoin ::
ResolveOutputRelation ::
ExtractWindowExpressions ::
GlobalAggregates ::
ResolveAggregateFunctions ::
TimeWindowing ::
ResolveInlineTables ::
ResolveHigherOrderFunctions(v1SessionCatalog) ::
ResolveLambdaVariables ::
ResolveTimeZone ::
ResolveRandomSeed ::
ResolveBinaryArithmetic ::
ResolveUnion ::
TypeCoercion.typeCoercionRules ++
extendedResolutionRules : _*),
Batch("Post-Hoc Resolution", Once,
Seq(ResolveNoopDropTable) ++
postHocResolutionRules: _*),
Batch("Normalize Alter Table", Once, ResolveAlterTableChanges),
Batch("Remove Unresolved Hints", Once,
new ResolveHints.RemoveAllHints),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("UDF", Once,
HandleNullInputsForUDF,
ResolveEncodersInUDF),
Batch("UpdateNullability", Once,
UpdateAttributeNullability),
Batch("Subquery", Once,
UpdateOuterReferences),
Batch("Cleanup", fixedPoint,
CleanupAliases)
)
/**
* For [[Add]]:
* 1. if both side are interval, stays the same;
* 2. else if one side is date and the other is interval,
* turns it to [[DateAddInterval]];
* 3. else if one side is interval, turns it to [[TimeAdd]];
* 4. else if one side is date, turns it to [[DateAdd]] ;
* 5. else stays the same.
*
* For [[Subtract]]:
* 1. if both side are interval, stays the same;
* 2. else if the left side is date and the right side is interval,
* turns it to [[DateAddInterval(l, -r)]];
* 3. else if the right side is an interval, turns it to [[TimeAdd(l, -r)]];
* 4. else if one side is timestamp, turns it to [[SubtractTimestamps]];
* 5. else if the right side is date, turns it to [[DateDiff]]/[[SubtractDates]];
* 6. else if the left side is date, turns it to [[DateSub]];
* 7. else turns it to stays the same.
*
* For [[Multiply]]:
* 1. If one side is interval, turns it to [[MultiplyInterval]];
* 2. otherwise, stays the same.
*
* For [[Divide]]:
* 1. If the left side is interval, turns it to [[DivideInterval]];
* 2. otherwise, stays the same.
*/
object ResolveBinaryArithmetic extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan => p.transformExpressionsUp {
case a @ Add(l, r, f) if a.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, CalendarIntervalType) => a
case (DateType, CalendarIntervalType) => DateAddInterval(l, r, ansiEnabled = f)
case (_, CalendarIntervalType) => Cast(TimeAdd(l, r), l.dataType)
case (CalendarIntervalType, DateType) => DateAddInterval(r, l, ansiEnabled = f)
case (CalendarIntervalType, _) => Cast(TimeAdd(r, l), r.dataType)
case (DateType, dt) if dt != StringType => DateAdd(l, r)
case (dt, DateType) if dt != StringType => DateAdd(r, l)
case _ => a
}
case s @ Subtract(l, r, f) if s.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, CalendarIntervalType) => s
case (DateType, CalendarIntervalType) =>
DatetimeSub(l, r, DateAddInterval(l, UnaryMinus(r, f), ansiEnabled = f))
case (_, CalendarIntervalType) =>
Cast(DatetimeSub(l, r, TimeAdd(l, UnaryMinus(r, f))), l.dataType)
case (TimestampType, _) => SubtractTimestamps(l, r)
case (_, TimestampType) => SubtractTimestamps(l, r)
case (_, DateType) => SubtractDates(l, r)
case (DateType, dt) if dt != StringType => DateSub(l, r)
case _ => s
}
case m @ Multiply(l, r, f) if m.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, _) => MultiplyInterval(l, r, f)
case (_, CalendarIntervalType) => MultiplyInterval(r, l, f)
case _ => m
}
case d @ Divide(l, r, f) if d.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, _) => DivideInterval(l, r, f)
case _ => d
}
}
}
}
/**
* Substitute child plan with WindowSpecDefinitions.
*/
object WindowsSubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Lookup WindowSpecDefinitions. This rule works with unresolved children.
case WithWindowDefinition(windowDefinitions, child) => child.resolveExpressions {
case UnresolvedWindowExpression(c, WindowSpecReference(windowName)) =>
val errorMessage =
s"Window specification $windowName is not defined in the WINDOW clause."
val windowSpecDefinition =
windowDefinitions.getOrElse(windowName, failAnalysis(errorMessage))
WindowExpression(c, windowSpecDefinition)
}
}
}
/**
* Replaces [[UnresolvedAlias]]s with concrete aliases.
*/
object ResolveAliases extends Rule[LogicalPlan] {
private def assignAliases(exprs: Seq[NamedExpression]) = {
exprs.map(_.transformUp { case u @ UnresolvedAlias(child, optGenAliasFunc) =>
child match {
case ne: NamedExpression => ne
case go @ GeneratorOuter(g: Generator) if g.resolved => MultiAlias(go, Nil)
case e if !e.resolved => u
case g: Generator => MultiAlias(g, Nil)
case c @ Cast(ne: NamedExpression, _, _) => Alias(c, ne.name)()
case e: ExtractValue => Alias(e, toPrettySQL(e))()
case e if optGenAliasFunc.isDefined =>
Alias(child, optGenAliasFunc.get.apply(e))()
case e => Alias(e, toPrettySQL(e))()
}
}
).asInstanceOf[Seq[NamedExpression]]
}
private def hasUnresolvedAlias(exprs: Seq[NamedExpression]) =
exprs.exists(_.find(_.isInstanceOf[UnresolvedAlias]).isDefined)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Aggregate(groups, aggs, child) if child.resolved && hasUnresolvedAlias(aggs) =>
Aggregate(groups, assignAliases(aggs), child)
case g: GroupingSets if g.child.resolved && hasUnresolvedAlias(g.aggregations) =>
g.copy(aggregations = assignAliases(g.aggregations))
case Pivot(groupByOpt, pivotColumn, pivotValues, aggregates, child)
if child.resolved && groupByOpt.isDefined && hasUnresolvedAlias(groupByOpt.get) =>
Pivot(Some(assignAliases(groupByOpt.get)), pivotColumn, pivotValues, aggregates, child)
case Project(projectList, child) if child.resolved && hasUnresolvedAlias(projectList) =>
Project(assignAliases(projectList), child)
}
}
object ResolveGroupingAnalytics extends Rule[LogicalPlan] {
/*
* GROUP BY a, b, c WITH ROLLUP
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (a), ( ) ).
* Group Count: N + 1 (N is the number of group expressions)
*
* We need to get all of its subsets for the rule described above, the subset is
* represented as sequence of expressions.
*/
def rollupExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.inits.toIndexedSeq
/*
* GROUP BY a, b, c WITH CUBE
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (b, c), (a, c), (a), (b), (c), ( ) ).
* Group Count: 2 ^ N (N is the number of group expressions)
*
* We need to get all of its subsets for a given GROUPBY expression, the subsets are
* represented as sequence of expressions.
*/
def cubeExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = {
// `cubeExprs0` is recursive and returns a lazy Stream. Here we call `toIndexedSeq` to
// materialize it and avoid serialization problems later on.
cubeExprs0(exprs).toIndexedSeq
}
def cubeExprs0(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.toList match {
case x :: xs =>
val initial = cubeExprs0(xs)
initial.map(x +: _) ++ initial
case Nil =>
Seq(Seq.empty)
}
private[analysis] def hasGroupingFunction(e: Expression): Boolean = {
e.collectFirst {
case g: Grouping => g
case g: GroupingID => g
}.isDefined
}
private def replaceGroupingFunc(
expr: Expression,
groupByExprs: Seq[Expression],
gid: Expression): Expression = {
expr transform {
case e: GroupingID =>
if (e.groupByExprs.isEmpty ||
e.groupByExprs.map(_.canonicalized) == groupByExprs.map(_.canonicalized)) {
Alias(gid, toPrettySQL(e))()
} else {
throw QueryCompilationErrors.groupingIDMismatchError(e, groupByExprs)
}
case e @ Grouping(col: Expression) =>
val idx = groupByExprs.indexWhere(_.semanticEquals(col))
if (idx >= 0) {
Alias(Cast(BitwiseAnd(ShiftRight(gid, Literal(groupByExprs.length - 1 - idx)),
Literal(1L)), ByteType), toPrettySQL(e))()
} else {
throw QueryCompilationErrors.groupingColInvalidError(col, groupByExprs)
}
}
}
/*
* Create new alias for all group by expressions for `Expand` operator.
*/
private def constructGroupByAlias(groupByExprs: Seq[Expression]): Seq[Alias] = {
groupByExprs.map {
case e: NamedExpression => Alias(e, e.name)(qualifier = e.qualifier)
case other => Alias(other, other.toString)()
}
}
/*
* Construct [[Expand]] operator with grouping sets.
*/
private def constructExpand(
selectedGroupByExprs: Seq[Seq[Expression]],
child: LogicalPlan,
groupByAliases: Seq[Alias],
gid: Attribute): LogicalPlan = {
// Change the nullability of group by aliases if necessary. For example, if we have
// GROUPING SETS ((a,b), a), we do not need to change the nullability of a, but we
// should change the nullability of b to be TRUE.
// TODO: For Cube/Rollup just set nullability to be `true`.
val expandedAttributes = groupByAliases.map { alias =>
if (selectedGroupByExprs.exists(!_.contains(alias.child))) {
alias.toAttribute.withNullability(true)
} else {
alias.toAttribute
}
}
val groupingSetsAttributes = selectedGroupByExprs.map { groupingSetExprs =>
groupingSetExprs.map { expr =>
val alias = groupByAliases.find(_.child.semanticEquals(expr)).getOrElse(
failAnalysis(s"$expr doesn't show up in the GROUP BY list $groupByAliases"))
// Map alias to expanded attribute.
expandedAttributes.find(_.semanticEquals(alias.toAttribute)).getOrElse(
alias.toAttribute)
}
}
Expand(groupingSetsAttributes, groupByAliases, expandedAttributes, gid, child)
}
/*
* Construct new aggregate expressions by replacing grouping functions.
*/
private def constructAggregateExprs(
groupByExprs: Seq[Expression],
aggregations: Seq[NamedExpression],
groupByAliases: Seq[Alias],
groupingAttrs: Seq[Expression],
gid: Attribute): Seq[NamedExpression] = aggregations.map {
// collect all the found AggregateExpression, so we can check an expression is part of
// any AggregateExpression or not.
val aggsBuffer = ArrayBuffer[Expression]()
// Returns whether the expression belongs to any expressions in `aggsBuffer` or not.
def isPartOfAggregation(e: Expression): Boolean = {
aggsBuffer.exists(a => a.find(_ eq e).isDefined)
}
replaceGroupingFunc(_, groupByExprs, gid).transformDown {
// AggregateExpression should be computed on the unmodified value of its argument
// expressions, so we should not replace any references to grouping expression
// inside it.
case e: AggregateExpression =>
aggsBuffer += e
e
case e if isPartOfAggregation(e) => e
case e =>
// Replace expression by expand output attribute.
val index = groupByAliases.indexWhere(_.child.semanticEquals(e))
if (index == -1) {
e
} else {
groupingAttrs(index)
}
}.asInstanceOf[NamedExpression]
}
private def getFinalGroupByExpressions(
selectedGroupByExprs: Seq[Seq[Expression]],
groupByExprs: Seq[Expression]): Seq[Expression] = {
// In case of ANSI-SQL compliant syntax for GROUPING SETS, groupByExprs is optional and
// can be null. In such case, we derive the groupByExprs from the user supplied values for
// grouping sets.
if (groupByExprs == Nil) {
selectedGroupByExprs.flatten.foldLeft(Seq.empty[Expression]) { (result, currentExpr) =>
// Only unique expressions are included in the group by expressions and is determined
// based on their semantic equality. Example. grouping sets ((a * b), (b * a)) results
// in grouping expression (a * b)
if (result.find(_.semanticEquals(currentExpr)).isDefined) {
result
} else {
result :+ currentExpr
}
}
} else {
groupByExprs
}
}
/*
* Construct [[Aggregate]] operator from Cube/Rollup/GroupingSets.
*/
private def constructAggregate(
selectedGroupByExprs: Seq[Seq[Expression]],
groupByExprs: Seq[Expression],
aggregationExprs: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
val finalGroupByExpressions = getFinalGroupByExpressions(selectedGroupByExprs, groupByExprs)
if (finalGroupByExpressions.size > GroupingID.dataType.defaultSize * 8) {
throw QueryCompilationErrors.groupingSizeTooLargeError(GroupingID.dataType.defaultSize * 8)
}
// Expand works by setting grouping expressions to null as determined by the
// `selectedGroupByExprs`. To prevent these null values from being used in an aggregate
// instead of the original value we need to create new aliases for all group by expressions
// that will only be used for the intended purpose.
val groupByAliases = constructGroupByAlias(finalGroupByExpressions)
val gid = AttributeReference(VirtualColumn.groupingIdName, GroupingID.dataType, false)()
val expand = constructExpand(selectedGroupByExprs, child, groupByAliases, gid)
val groupingAttrs = expand.output.drop(child.output.length)
val aggregations = constructAggregateExprs(
finalGroupByExpressions, aggregationExprs, groupByAliases, groupingAttrs, gid)
Aggregate(groupingAttrs, aggregations, expand)
}
private def findGroupingExprs(plan: LogicalPlan): Seq[Expression] = {
plan.collectFirst {
case a: Aggregate =>
// this Aggregate should have grouping id as the last grouping key.
val gid = a.groupingExpressions.last
if (!gid.isInstanceOf[AttributeReference]
|| gid.asInstanceOf[AttributeReference].name != VirtualColumn.groupingIdName) {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
a.groupingExpressions.take(a.groupingExpressions.length - 1)
}.getOrElse {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
}
private def tryResolveHavingCondition(h: UnresolvedHaving): LogicalPlan = {
val aggForResolving = h.child match {
// For CUBE/ROLLUP expressions, to avoid resolving repeatedly, here we delete them from
// groupingExpressions for condition resolving.
case a @ Aggregate(Seq(c @ Cube(groupByExprs)), _, _) =>
a.copy(groupingExpressions = groupByExprs)
case a @ Aggregate(Seq(r @ Rollup(groupByExprs)), _, _) =>
a.copy(groupingExpressions = groupByExprs)
case g: GroupingSets =>
Aggregate(
getFinalGroupByExpressions(g.selectedGroupByExprs, g.groupByExprs),
g.aggregations, g.child)
}
// Try resolving the condition of the filter as though it is in the aggregate clause
val resolvedInfo =
ResolveAggregateFunctions.resolveFilterCondInAggregate(h.havingCondition, aggForResolving)
// Push the aggregate expressions into the aggregate (if any).
if (resolvedInfo.nonEmpty) {
val (extraAggExprs, resolvedHavingCond) = resolvedInfo.get
val newChild = h.child match {
case Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, child) =>
constructAggregate(
cubeExprs(groupByExprs), groupByExprs, aggregateExpressions ++ extraAggExprs, child)
case Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, child) =>
constructAggregate(
rollupExprs(groupByExprs), groupByExprs, aggregateExpressions ++ extraAggExprs, child)
case x: GroupingSets =>
constructAggregate(
x.selectedGroupByExprs, x.groupByExprs, x.aggregations ++ extraAggExprs, x.child)
}
// Since the exprId of extraAggExprs will be changed in the constructed aggregate, and the
// aggregateExpressions keeps the input order. So here we build an exprMap to resolve the
// condition again.
val exprMap = extraAggExprs.zip(
newChild.asInstanceOf[Aggregate].aggregateExpressions.takeRight(
extraAggExprs.length)).toMap
val newCond = resolvedHavingCond.transform {
case ne: NamedExpression if exprMap.contains(ne) => exprMap(ne)
}
Project(newChild.output.dropRight(extraAggExprs.length),
Filter(newCond, newChild))
} else {
h
}
}
// This require transformDown to resolve having condition when generating aggregate node for
// CUBE/ROLLUP/GROUPING SETS. This also replace grouping()/grouping_id() in resolved
// Filter/Sort.
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsDown {
case h @ UnresolvedHaving(
_, agg @ Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, _))
if agg.childrenResolved && (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
tryResolveHavingCondition(h)
case h @ UnresolvedHaving(
_, agg @ Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, _))
if agg.childrenResolved && (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
tryResolveHavingCondition(h)
case h @ UnresolvedHaving(_, g: GroupingSets)
if g.childrenResolved && g.expressions.forall(_.resolved) =>
tryResolveHavingCondition(h)
case a if !a.childrenResolved => a // be sure all of the children are resolved.
// Ensure group by expressions and aggregate expressions have been resolved.
case Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(cubeExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
case Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(rollupExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
// Ensure all the expressions have been resolved.
case x: GroupingSets if x.expressions.forall(_.resolved) =>
constructAggregate(x.selectedGroupByExprs, x.groupByExprs, x.aggregations, x.child)
// We should make sure all expressions in condition have been resolved.
case f @ Filter(cond, child) if hasGroupingFunction(cond) && cond.resolved =>
val groupingExprs = findGroupingExprs(child)
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newCond = replaceGroupingFunc(cond, groupingExprs, VirtualColumn.groupingIdAttribute)
f.copy(condition = newCond)
// We should make sure all [[SortOrder]]s have been resolved.
case s @ Sort(order, _, child)
if order.exists(hasGroupingFunction) && order.forall(_.resolved) =>
val groupingExprs = findGroupingExprs(child)
val gid = VirtualColumn.groupingIdAttribute
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newOrder = order.map(replaceGroupingFunc(_, groupingExprs, gid).asInstanceOf[SortOrder])
s.copy(order = newOrder)
}
}
object ResolvePivot extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p: Pivot if !p.childrenResolved || !p.aggregates.forall(_.resolved)
|| (p.groupByExprsOpt.isDefined && !p.groupByExprsOpt.get.forall(_.resolved))
|| !p.pivotColumn.resolved || !p.pivotValues.forall(_.resolved) => p
case Pivot(groupByExprsOpt, pivotColumn, pivotValues, aggregates, child) =>
if (!RowOrdering.isOrderable(pivotColumn.dataType)) {
throw QueryCompilationErrors.unorderablePivotColError(pivotColumn)
}
// Check all aggregate expressions.
aggregates.foreach(checkValidAggregateExpression)
// Check all pivot values are literal and match pivot column data type.
val evalPivotValues = pivotValues.map { value =>
val foldable = value match {
case Alias(v, _) => v.foldable
case _ => value.foldable
}
if (!foldable) {
throw QueryCompilationErrors.nonLiteralPivotValError(value)
}
if (!Cast.canCast(value.dataType, pivotColumn.dataType)) {
throw QueryCompilationErrors.pivotValDataTypeMismatchError(value, pivotColumn)
}
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
}
// Group-by expressions coming from SQL are implicit and need to be deduced.
val groupByExprs = groupByExprsOpt.getOrElse {
val pivotColAndAggRefs = pivotColumn.references ++ AttributeSet(aggregates)
child.output.filterNot(pivotColAndAggRefs.contains)
}
val singleAgg = aggregates.size == 1
def outputName(value: Expression, aggregate: Expression): String = {
val stringValue = value match {
case n: NamedExpression => n.name
case _ =>
val utf8Value =
Cast(value, StringType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
Option(utf8Value).map(_.toString).getOrElse("null")
}
if (singleAgg) {
stringValue
} else {
val suffix = aggregate match {
case n: NamedExpression => n.name
case _ => toPrettySQL(aggregate)
}
stringValue + "_" + suffix
}
}
if (aggregates.forall(a => PivotFirst.supportsDataType(a.dataType))) {
// Since evaluating |pivotValues| if statements for each input row can get slow this is an
// alternate plan that instead uses two steps of aggregation.
val namedAggExps: Seq[NamedExpression] = aggregates.map(a => Alias(a, a.sql)())
val namedPivotCol = pivotColumn match {
case n: NamedExpression => n
case _ => Alias(pivotColumn, "__pivot_col")()
}
val bigGroup = groupByExprs :+ namedPivotCol
val firstAgg = Aggregate(bigGroup, bigGroup ++ namedAggExps, child)
val pivotAggs = namedAggExps.map { a =>
Alias(PivotFirst(namedPivotCol.toAttribute, a.toAttribute, evalPivotValues)
.toAggregateExpression()
, "__pivot_" + a.sql)()
}
val groupByExprsAttr = groupByExprs.map(_.toAttribute)
val secondAgg = Aggregate(groupByExprsAttr, groupByExprsAttr ++ pivotAggs, firstAgg)
val pivotAggAttribute = pivotAggs.map(_.toAttribute)
val pivotOutputs = pivotValues.zipWithIndex.flatMap { case (value, i) =>
aggregates.zip(pivotAggAttribute).map { case (aggregate, pivotAtt) =>
Alias(ExtractValue(pivotAtt, Literal(i), resolver), outputName(value, aggregate))()
}
}
Project(groupByExprsAttr ++ pivotOutputs, secondAgg)
} else {
val pivotAggregates: Seq[NamedExpression] = pivotValues.flatMap { value =>
def ifExpr(e: Expression) = {
If(
EqualNullSafe(
pivotColumn,
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone))),
e, Literal(null))
}
aggregates.map { aggregate =>
val filteredAggregate = aggregate.transformDown {
// Assumption is the aggregate function ignores nulls. This is true for all current
// AggregateFunction's with the exception of First and Last in their default mode
// (which we handle) and possibly some Hive UDAF's.
case First(expr, _) =>
First(ifExpr(expr), true)
case Last(expr, _) =>
Last(ifExpr(expr), true)
case a: AggregateFunction =>
a.withNewChildren(a.children.map(ifExpr))
}.transform {
// We are duplicating aggregates that are now computing a different value for each
// pivot value.
// TODO: Don't construct the physical container until after analysis.
case ae: AggregateExpression => ae.copy(resultId = NamedExpression.newExprId)
}
Alias(filteredAggregate, outputName(value, aggregate))()
}
}
Aggregate(groupByExprs, groupByExprs ++ pivotAggregates, child)
}
}
// Support any aggregate expression that can appear in an Aggregate plan except Pandas UDF.
// TODO: Support Pandas UDF.
private def checkValidAggregateExpression(expr: Expression): Unit = expr match {
case _: AggregateExpression => // OK and leave the argument check to CheckAnalysis.
case expr: PythonUDF if PythonUDF.isGroupedAggPandasUDF(expr) =>
failAnalysis("Pandas UDF aggregate expressions are currently not supported in pivot.")
case e: Attribute =>
failAnalysis(
s"Aggregate expression required for pivot, but '${e.sql}' " +
s"did not appear in any aggregate function.")
case e => e.children.foreach(checkValidAggregateExpression)
}
}
case class ResolveNamespace(catalogManager: CatalogManager)
extends Rule[LogicalPlan] with LookupCatalog {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case s @ ShowTables(UnresolvedNamespace(Seq()), _) =>
s.copy(namespace = ResolvedNamespace(currentCatalog, catalogManager.currentNamespace))
case s @ ShowViews(UnresolvedNamespace(Seq()), _) =>
s.copy(namespace = ResolvedNamespace(currentCatalog, catalogManager.currentNamespace))
case UnresolvedNamespace(Seq()) =>
ResolvedNamespace(currentCatalog, Seq.empty[String])
case UnresolvedNamespace(CatalogAndNamespace(catalog, ns)) =>
ResolvedNamespace(catalog, ns)
}
}
private def isResolvingView: Boolean = AnalysisContext.get.catalogAndNamespace.nonEmpty
/**
* Resolve relations to temp views. This is not an actual rule, and is called by
* [[ResolveTables]] and [[ResolveRelations]].
*/
object ResolveTempViews extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedRelation(ident, _, isStreaming) =>
lookupTempView(ident, isStreaming).getOrElse(u)
case i @ InsertIntoStatement(UnresolvedRelation(ident, _, false), _, _, _, _) =>
lookupTempView(ident)
.map(view => i.copy(table = view))
.getOrElse(i)
// TODO (SPARK-27484): handle streaming write commands when we have them.
case write: V2WriteCommand =>
write.table match {
case UnresolvedRelation(ident, _, false) =>
lookupTempView(ident).map(EliminateSubqueryAliases(_)).map {
case r: DataSourceV2Relation => write.withNewTable(r)
case _ => throw new AnalysisException("Cannot write into temp view " +
s"${ident.quoted} as it's not a data source v2 relation.")
}.getOrElse(write)
case _ => write
}
case u @ UnresolvedTable(ident) =>
lookupTempView(ident).foreach { _ =>
u.failAnalysis(s"${ident.quoted} is a temp view not table.")
}
u
case u @ UnresolvedTableOrView(ident, allowTempView) =>
lookupTempView(ident)
.map { _ =>
if (!allowTempView) {
u.failAnalysis(s"${ident.quoted} is a temp view not table or permanent view.")
}
ResolvedView(ident.asIdentifier, isTemp = true)
}
.getOrElse(u)
}
def lookupTempView(
identifier: Seq[String], isStreaming: Boolean = false): Option[LogicalPlan] = {
// Permanent View can't refer to temp views, no need to lookup at all.
if (isResolvingView) return None
val tmpView = identifier match {
case Seq(part1) => v1SessionCatalog.lookupTempView(part1)
case Seq(part1, part2) => v1SessionCatalog.lookupGlobalTempView(part1, part2)
case _ => None
}
if (isStreaming && tmpView.nonEmpty && !tmpView.get.isStreaming) {
throw new AnalysisException(s"${identifier.quoted} is not a temp view of streaming " +
s"logical plan, please use batch API such as `DataFrameReader.table` to read it.")
}
tmpView
}
}
// If we are resolving relations insides views, we need to expand single-part relation names with
// the current catalog and namespace of when the view was created.
private def expandRelationName(nameParts: Seq[String]): Seq[String] = {
if (!isResolvingView) return nameParts
if (nameParts.length == 1) {
AnalysisContext.get.catalogAndNamespace :+ nameParts.head
} else if (catalogManager.isCatalogRegistered(nameParts.head)) {
nameParts
} else {
AnalysisContext.get.catalogAndNamespace.head +: nameParts
}
}
/**
* Adds metadata columns to output for child relations when nodes are missing resolved attributes.
*
* References to metadata columns are resolved using columns from [[LogicalPlan.metadataOutput]],
* but the relation's output does not include the metadata columns until the relation is replaced
* using [[DataSourceV2Relation.withMetadataColumns()]]. Unless this rule adds metadata to the
* relation's output, the analyzer will detect that nothing produces the columns.
*
* This rule only adds metadata columns when a node is resolved but is missing input from its
* children. This ensures that metadata columns are not added to the plan unless they are used. By
* checking only resolved nodes, this ensures that * expansion is already done so that metadata
* columns are not accidentally selected by *.
*/
object AddMetadataColumns extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp {
case node if node.resolved && node.children.nonEmpty && node.missingInput.nonEmpty =>
node resolveOperatorsUp {
case rel: DataSourceV2Relation =>
rel.withMetadataColumns()
}
}
}
/**
* Resolve table relations with concrete relations from v2 catalog.
*
* [[ResolveRelations]] still resolves v1 tables.
*/
object ResolveTables extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = ResolveTempViews(plan).resolveOperatorsUp {
case u: UnresolvedRelation =>
lookupV2Relation(u.multipartIdentifier, u.options, u.isStreaming)
.map { relation =>
val (catalog, ident) = relation match {
case ds: DataSourceV2Relation => (ds.catalog, ds.identifier.get)
case s: StreamingRelationV2 => (s.catalog, s.identifier.get)
}
SubqueryAlias(catalog.get.name +: ident.namespace :+ ident.name, relation)
}.getOrElse(u)
case u @ UnresolvedTable(NonSessionCatalogAndIdentifier(catalog, ident)) =>
CatalogV2Util.loadTable(catalog, ident)
.map(ResolvedTable(catalog.asTableCatalog, ident, _))
.getOrElse(u)
case u @ UnresolvedTableOrView(NonSessionCatalogAndIdentifier(catalog, ident), _) =>
CatalogV2Util.loadTable(catalog, ident)
.map(ResolvedTable(catalog.asTableCatalog, ident, _))
.getOrElse(u)
case i @ InsertIntoStatement(u @ UnresolvedRelation(_, _, false), _, _, _, _)
if i.query.resolved =>
lookupV2Relation(u.multipartIdentifier, u.options, false)
.map(v2Relation => i.copy(table = v2Relation))
.getOrElse(i)
// TODO (SPARK-27484): handle streaming write commands when we have them.
case write: V2WriteCommand =>
write.table match {
case u: UnresolvedRelation if !u.isStreaming =>
lookupV2Relation(u.multipartIdentifier, u.options, false).map {
case r: DataSourceV2Relation => write.withNewTable(r)
case other => throw new IllegalStateException(
"[BUG] unexpected plan returned by `lookupV2Relation`: " + other)
}.getOrElse(write)
case _ => write
}
case alter @ AlterTable(_, _, u: UnresolvedV2Relation, _) =>
CatalogV2Util.loadRelation(u.catalog, u.tableName)
.map(rel => alter.copy(table = rel))
.getOrElse(alter)
case u: UnresolvedV2Relation =>
CatalogV2Util.loadRelation(u.catalog, u.tableName).getOrElse(u)
}
/**
* Performs the lookup of DataSourceV2 Tables from v2 catalog.
*/
private def lookupV2Relation(
identifier: Seq[String],
options: CaseInsensitiveStringMap,
isStreaming: Boolean): Option[LogicalPlan] =
expandRelationName(identifier) match {
case NonSessionCatalogAndIdentifier(catalog, ident) =>
CatalogV2Util.loadTable(catalog, ident) match {
case Some(table) =>
if (isStreaming) {
Some(StreamingRelationV2(None, table.name, table, options,
table.schema.toAttributes, Some(catalog), Some(ident), None))
} else {
Some(DataSourceV2Relation.create(table, Some(catalog), Some(ident), options))
}
case None => None
}
case _ => None
}
}
/**
* Replaces [[UnresolvedRelation]]s with concrete relations from the catalog.
*/
object ResolveRelations extends Rule[LogicalPlan] {
// The current catalog and namespace may be different from when the view was created, we must
// resolve the view logical plan here, with the catalog and namespace stored in view metadata.
// This is done by keeping the catalog and namespace in `AnalysisContext`, and analyzer will
// look at `AnalysisContext.catalogAndNamespace` when resolving relations with single-part name.
// If `AnalysisContext.catalogAndNamespace` is non-empty, analyzer will expand single-part names
// with it, instead of current catalog and namespace.
private def resolveViews(plan: LogicalPlan): LogicalPlan = plan match {
// The view's child should be a logical plan parsed from the `desc.viewText`, the variable
// `viewText` should be defined, or else we throw an error on the generation of the View
// operator.
case view @ View(desc, _, child) if !child.resolved =>
// Resolve all the UnresolvedRelations and Views in the child.
val newChild = AnalysisContext.withAnalysisContext(desc.viewCatalogAndNamespace) {
if (AnalysisContext.get.nestedViewDepth > conf.maxNestedViewDepth) {
view.failAnalysis(s"The depth of view ${desc.identifier} exceeds the maximum " +
s"view resolution depth (${conf.maxNestedViewDepth}). Analysis is aborted to " +
s"avoid errors. Increase the value of ${SQLConf.MAX_NESTED_VIEW_DEPTH.key} to work " +
"around this.")
}
executeSameContext(child)
}
view.copy(child = newChild)
case p @ SubqueryAlias(_, view: View) =>
p.copy(child = resolveViews(view))
case _ => plan
}
def apply(plan: LogicalPlan): LogicalPlan = ResolveTempViews(plan).resolveOperatorsUp {
case i @ InsertIntoStatement(table, _, _, _, _) if i.query.resolved =>
val relation = table match {
case u @ UnresolvedRelation(_, _, false) =>
lookupRelation(u.multipartIdentifier, u.options, false).getOrElse(u)
case other => other
}
EliminateSubqueryAliases(relation) match {
case v: View =>
table.failAnalysis(s"Inserting into a view is not allowed. View: ${v.desc.identifier}.")
case other => i.copy(table = other)
}
// TODO (SPARK-27484): handle streaming write commands when we have them.
case write: V2WriteCommand =>
write.table match {
case u: UnresolvedRelation if !u.isStreaming =>
lookupRelation(u.multipartIdentifier, u.options, false)
.map(EliminateSubqueryAliases(_))
.map {
case v: View => write.failAnalysis(
s"Writing into a view is not allowed. View: ${v.desc.identifier}.")
case u: UnresolvedCatalogRelation => write.failAnalysis(
"Cannot write into v1 table: " + u.tableMeta.identifier)
case r: DataSourceV2Relation => write.withNewTable(r)
case other => throw new IllegalStateException(
"[BUG] unexpected plan returned by `lookupRelation`: " + other)
}.getOrElse(write)
case _ => write
}
case u: UnresolvedRelation =>
lookupRelation(u.multipartIdentifier, u.options, u.isStreaming)
.map(resolveViews).getOrElse(u)
case u @ UnresolvedTable(identifier) =>
lookupTableOrView(identifier).map {
case v: ResolvedView =>
val viewStr = if (v.isTemp) "temp view" else "view"
u.failAnalysis(s"${v.identifier.quoted} is a $viewStr not table.")
case table => table
}.getOrElse(u)
case u @ UnresolvedTableOrView(identifier, _) =>
lookupTableOrView(identifier).getOrElse(u)
}
private def lookupTableOrView(identifier: Seq[String]): Option[LogicalPlan] = {
expandRelationName(identifier) match {
case SessionCatalogAndIdentifier(catalog, ident) =>
CatalogV2Util.loadTable(catalog, ident).map {
case v1Table: V1Table if v1Table.v1Table.tableType == CatalogTableType.VIEW =>
ResolvedView(ident, isTemp = false)
case table =>
ResolvedTable(catalog.asTableCatalog, ident, table)
}
case _ => None
}
}
// Look up a relation from the session catalog with the following logic:
// 1) If the resolved catalog is not session catalog, return None.
// 2) If a relation is not found in the catalog, return None.
// 3) If a v1 table is found, create a v1 relation. Otherwise, create a v2 relation.
private def lookupRelation(
identifier: Seq[String],
options: CaseInsensitiveStringMap,
isStreaming: Boolean): Option[LogicalPlan] = {
expandRelationName(identifier) match {
case SessionCatalogAndIdentifier(catalog, ident) =>
lazy val loaded = CatalogV2Util.loadTable(catalog, ident).map {
case v1Table: V1Table =>
if (isStreaming) {
if (v1Table.v1Table.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(s"${identifier.quoted} is a permanent view, " +
"which is not supported by streaming reading API such as " +
"`DataStreamReader.table` yet.")
}
SubqueryAlias(
catalog.name +: ident.asMultipartIdentifier,
UnresolvedCatalogRelation(v1Table.v1Table, options, isStreaming = true))
} else {
v1SessionCatalog.getRelation(v1Table.v1Table, options)
}
case table =>
if (isStreaming) {
val v1Fallback = table match {
case withFallback: V2TableWithV1Fallback =>
Some(UnresolvedCatalogRelation(withFallback.v1Table, isStreaming = true))
case _ => None
}
SubqueryAlias(
catalog.name +: ident.asMultipartIdentifier,
StreamingRelationV2(None, table.name, table, options, table.schema.toAttributes,
Some(catalog), Some(ident), v1Fallback))
} else {
SubqueryAlias(
catalog.name +: ident.asMultipartIdentifier,
DataSourceV2Relation.create(table, Some(catalog), Some(ident), options))
}
}
val key = catalog.name +: ident.namespace :+ ident.name
AnalysisContext.get.relationCache.get(key).map(_.transform {
case multi: MultiInstanceRelation => multi.newInstance()
}).orElse {
loaded.foreach(AnalysisContext.get.relationCache.update(key, _))
loaded
}
case _ => None
}
}
}
object ResolveInsertInto extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case i @ InsertIntoStatement(r: DataSourceV2Relation, _, _, _, _) if i.query.resolved =>
// ifPartitionNotExists is append with validation, but validation is not supported
if (i.ifPartitionNotExists) {
throw QueryCompilationErrors.unsupportedIfNotExistsError(r.table.name)
}
val partCols = partitionColumnNames(r.table)
validatePartitionSpec(partCols, i.partitionSpec)
val staticPartitions = i.partitionSpec.filter(_._2.isDefined).mapValues(_.get).toMap
val query = addStaticPartitionColumns(r, i.query, staticPartitions)
if (!i.overwrite) {
AppendData.byPosition(r, query)
} else if (conf.partitionOverwriteMode == PartitionOverwriteMode.DYNAMIC) {
OverwritePartitionsDynamic.byPosition(r, query)
} else {
OverwriteByExpression.byPosition(r, query, staticDeleteExpression(r, staticPartitions))
}
}
private def partitionColumnNames(table: Table): Seq[String] = {
// get partition column names. in v2, partition columns are columns that are stored using an
// identity partition transform because the partition values and the column values are
// identical. otherwise, partition values are produced by transforming one or more source
// columns and cannot be set directly in a query's PARTITION clause.
table.partitioning.flatMap {
case IdentityTransform(FieldReference(Seq(name))) => Some(name)
case _ => None
}
}
private def validatePartitionSpec(
partitionColumnNames: Seq[String],
partitionSpec: Map[String, Option[String]]): Unit = {
// check that each partition name is a partition column. otherwise, it is not valid
partitionSpec.keySet.foreach { partitionName =>
partitionColumnNames.find(name => conf.resolver(name, partitionName)) match {
case Some(_) =>
case None =>
throw QueryCompilationErrors.nonPartitionColError(partitionName)
}
}
}
private def addStaticPartitionColumns(
relation: DataSourceV2Relation,
query: LogicalPlan,
staticPartitions: Map[String, String]): LogicalPlan = {
if (staticPartitions.isEmpty) {
query
} else {
// add any static value as a literal column
val withStaticPartitionValues = {
// for each static name, find the column name it will replace and check for unknowns.
val outputNameToStaticName = staticPartitions.keySet.map(staticName =>
relation.output.find(col => conf.resolver(col.name, staticName)) match {
case Some(attr) =>
attr.name -> staticName
case _ =>
throw QueryCompilationErrors.addStaticValToUnknownColError(staticName)
}).toMap
val queryColumns = query.output.iterator
// for each output column, add the static value as a literal, or use the next input
// column. this does not fail if input columns are exhausted and adds remaining columns
// at the end. both cases will be caught by ResolveOutputRelation and will fail the
// query with a helpful error message.
relation.output.flatMap { col =>
outputNameToStaticName.get(col.name).flatMap(staticPartitions.get) match {
case Some(staticValue) =>
Some(Alias(Cast(Literal(staticValue), col.dataType), col.name)())
case _ if queryColumns.hasNext =>
Some(queryColumns.next)
case _ =>
None
}
} ++ queryColumns
}
Project(withStaticPartitionValues, query)
}
}
private def staticDeleteExpression(
relation: DataSourceV2Relation,
staticPartitions: Map[String, String]): Expression = {
if (staticPartitions.isEmpty) {
Literal(true)
} else {
staticPartitions.map { case (name, value) =>
relation.output.find(col => conf.resolver(col.name, name)) match {
case Some(attr) =>
// the delete expression must reference the table's column names, but these attributes
// are not available when CheckAnalysis runs because the relation is not a child of
// the logical operation. instead, expressions are resolved after
// ResolveOutputRelation runs, using the query's column names that will match the
// table names at that point. because resolution happens after a future rule, create
// an UnresolvedAttribute.
EqualTo(UnresolvedAttribute(attr.name), Cast(Literal(value), attr.dataType))
case None =>
throw QueryCompilationErrors.unknownStaticPartitionColError(name)
}
}.reduce(And)
}
}
}
/**
* Replaces [[UnresolvedAttribute]]s with concrete [[AttributeReference]]s from
* a logical plan node's children.
*/
object ResolveReferences extends Rule[LogicalPlan] {
/**
* Generate a new logical plan for the right child with different expression IDs
* for all conflicting attributes.
*/
private def dedupRight (left: LogicalPlan, right: LogicalPlan): LogicalPlan = {
val conflictingAttributes = left.outputSet.intersect(right.outputSet)
logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} " +
s"between $left and $right")
/**
* For LogicalPlan likes MultiInstanceRelation, Project, Aggregate, etc, whose output doesn't
* inherit directly from its children, we could just stop collect on it. Because we could
* always replace all the lower conflict attributes with the new attributes from the new
* plan. Theoretically, we should do recursively collect for Generate and Window but we leave
* it to the next batch to reduce possible overhead because this should be a corner case.
*/
def collectConflictPlans(plan: LogicalPlan): Seq[(LogicalPlan, LogicalPlan)] = plan match {
// Handle base relations that might appear more than once.
case oldVersion: MultiInstanceRelation
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
val newVersion = oldVersion.newInstance()
Seq((oldVersion, newVersion))
case oldVersion: SerializeFromObject
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(
serializer = oldVersion.serializer.map(_.newInstance()))))
// Handle projects that create conflicting aliases.
case oldVersion @ Project(projectList, _)
if findAliases(projectList).intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(projectList = newAliases(projectList))))
// We don't need to search child plan recursively if the projectList of a Project
// is only composed of Alias and doesn't contain any conflicting attributes.
// Because, even if the child plan has some conflicting attributes, the attributes
// will be aliased to non-conflicting attributes by the Project at the end.
case _ @ Project(projectList, _)
if findAliases(projectList).size == projectList.size =>
Nil
case oldVersion @ Aggregate(_, aggregateExpressions, _)
if findAliases(aggregateExpressions).intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(
aggregateExpressions = newAliases(aggregateExpressions))))
// We don't search the child plan recursively for the same reason as the above Project.
case _ @ Aggregate(_, aggregateExpressions, _)
if findAliases(aggregateExpressions).size == aggregateExpressions.size =>
Nil
case oldVersion @ FlatMapGroupsInPandas(_, _, output, _)
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(output = output.map(_.newInstance()))))
case oldVersion: Generate
if oldVersion.producedAttributes.intersect(conflictingAttributes).nonEmpty =>
val newOutput = oldVersion.generatorOutput.map(_.newInstance())
Seq((oldVersion, oldVersion.copy(generatorOutput = newOutput)))
case oldVersion: Expand
if oldVersion.producedAttributes.intersect(conflictingAttributes).nonEmpty =>
val producedAttributes = oldVersion.producedAttributes
val newOutput = oldVersion.output.map { attr =>
if (producedAttributes.contains(attr)) {
attr.newInstance()
} else {
attr
}
}
Seq((oldVersion, oldVersion.copy(output = newOutput)))
case oldVersion @ Window(windowExpressions, _, _, child)
if AttributeSet(windowExpressions.map(_.toAttribute)).intersect(conflictingAttributes)
.nonEmpty =>
Seq((oldVersion, oldVersion.copy(windowExpressions = newAliases(windowExpressions))))
case _ => plan.children.flatMap(collectConflictPlans)
}
val conflictPlans = collectConflictPlans(right)
/*
* Note that it's possible `conflictPlans` can be empty which implies that there
* is a logical plan node that produces new references that this rule cannot handle.
* When that is the case, there must be another rule that resolves these conflicts.
* Otherwise, the analysis will fail.
*/
if (conflictPlans.isEmpty) {
right
} else {
val planMapping = conflictPlans.toMap
right.transformUpWithNewOutput {
case oldPlan =>
val newPlanOpt = planMapping.get(oldPlan)
newPlanOpt.map { newPlan =>
newPlan -> oldPlan.output.zip(newPlan.output)
}.getOrElse(oldPlan -> Nil)
}
}
}
/**
* Resolves the attribute and extract value expressions(s) by traversing the
* input expression in top down manner. The traversal is done in top-down manner as
* we need to skip over unbound lambda function expression. The lambda expressions are
* resolved in a different rule [[ResolveLambdaVariables]]
*
* Example :
* SELECT transform(array(1, 2, 3), (x, i) -> x + i)"
*
* In the case above, x and i are resolved as lambda variables in [[ResolveLambdaVariables]]
*
* Note : In this routine, the unresolved attributes are resolved from the input plan's
* children attributes.
*
* @param e The expression need to be resolved.
* @param q The LogicalPlan whose children are used to resolve expression's attribute.
* @param trimAlias When true, trim unnecessary alias of `GetStructField`. Note that,
* we cannot trim the alias of top-level `GetStructField`, as we should
* resolve `UnresolvedAttribute` to a named expression. The caller side
* can trim the alias of top-level `GetStructField` if it's safe to do so.
* @return resolved Expression.
*/
private def resolveExpressionTopDown(
e: Expression,
q: LogicalPlan,
trimAlias: Boolean = false): Expression = {
def innerResolve(e: Expression, isTopLevel: Boolean): Expression = {
if (e.resolved) return e
e match {
case f: LambdaFunction if !f.bound => f
case u @ UnresolvedAttribute(nameParts) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val resolved =
withPosition(u) {
q.resolveChildren(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, q))
.getOrElse(u)
}
val result = resolved match {
// As the comment of method `resolveExpressionTopDown`'s param `trimAlias` said,
// when trimAlias = true, we will trim unnecessary alias of `GetStructField` and
// we won't trim the alias of top-level `GetStructField`. Since we will call
// CleanupAliases later in Analyzer, trim non top-level unnecessary alias of
// `GetStructField` here is safe.
case Alias(s: GetStructField, _) if trimAlias && !isTopLevel => s
case others => others
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldExpr) if child.resolved =>
ExtractValue(child, fieldExpr, resolver)
case _ => e.mapChildren(innerResolve(_, isTopLevel = false))
}
}
innerResolve(e, isTopLevel = true)
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if !p.childrenResolved => p
// If the projection list contains Stars, expand it.
case p: Project if containsStar(p.projectList) =>
p.copy(projectList = buildExpandedProjectList(p.projectList, p.child))
// If the aggregate function argument contains Stars, expand it.
case a: Aggregate if containsStar(a.aggregateExpressions) =>
if (a.groupingExpressions.exists(_.isInstanceOf[UnresolvedOrdinal])) {
failAnalysis(
"Star (*) is not allowed in select list when GROUP BY ordinal position is used")
} else {
a.copy(aggregateExpressions = buildExpandedProjectList(a.aggregateExpressions, a.child))
}
// If the script transformation input contains Stars, expand it.
case t: ScriptTransformation if containsStar(t.input) =>
t.copy(
input = t.input.flatMap {
case s: Star => s.expand(t.child, resolver)
case o => o :: Nil
}
)
case g: Generate if containsStar(g.generator.children) =>
failAnalysis("Invalid usage of '*' in explode/json_tuple/UDTF")
// To resolve duplicate expression IDs for Join and Intersect
case j @ Join(left, right, _, _, _) if !j.duplicateResolved =>
j.copy(right = dedupRight(left, right))
case f @ FlatMapCoGroupsInPandas(leftAttributes, rightAttributes, _, _, left, right) =>
val leftRes = leftAttributes
.map(x => resolveExpressionBottomUp(x, left).asInstanceOf[Attribute])
val rightRes = rightAttributes
.map(x => resolveExpressionBottomUp(x, right).asInstanceOf[Attribute])
f.copy(leftAttributes = leftRes, rightAttributes = rightRes)
// intersect/except will be rewritten to join at the begininng of optimizer. Here we need to
// deduplicate the right side plan, so that we won't produce an invalid self-join later.
case i @ Intersect(left, right, _) if !i.duplicateResolved =>
i.copy(right = dedupRight(left, right))
case e @ Except(left, right, _) if !e.duplicateResolved =>
e.copy(right = dedupRight(left, right))
// Only after we finish by-name resolution for Union
case u: Union if !u.byName && !u.duplicateResolved =>
// Use projection-based de-duplication for Union to avoid breaking the checkpoint sharing
// feature in streaming.
val newChildren = u.children.foldRight(Seq.empty[LogicalPlan]) { (head, tail) =>
head +: tail.map {
case child if head.outputSet.intersect(child.outputSet).isEmpty =>
child
case child =>
val projectList = child.output.map { attr =>
Alias(attr, attr.name)()
}
Project(projectList, child)
}
}
u.copy(children = newChildren)
// When resolve `SortOrder`s in Sort based on child, don't report errors as
// we still have chance to resolve it based on its descendants
case s @ Sort(ordering, global, child) if child.resolved && !s.resolved =>
val newOrdering =
ordering.map(order => resolveExpressionBottomUp(order, child).asInstanceOf[SortOrder])
Sort(newOrdering, global, child)
// A special case for Generate, because the output of Generate should not be resolved by
// ResolveReferences. Attributes in the output will be resolved by ResolveGenerate.
case g @ Generate(generator, _, _, _, _, _) if generator.resolved => g
case g @ Generate(generator, join, outer, qualifier, output, child) =>
val newG = resolveExpressionBottomUp(generator, child, throws = true)
if (newG.fastEquals(generator)) {
g
} else {
Generate(newG.asInstanceOf[Generator], join, outer, qualifier, output, child)
}
// Skips plan which contains deserializer expressions, as they should be resolved by another
// rule: ResolveDeserializer.
case plan if containsDeserializer(plan.expressions) => plan
// SPARK-31670: Resolve Struct field in groupByExpressions and aggregateExpressions
// with CUBE/ROLLUP will be wrapped with alias like Alias(GetStructField, name) with
// different ExprId. This cause aggregateExpressions can't be replaced by expanded
// groupByExpressions in `ResolveGroupingAnalytics.constructAggregateExprs()`, we trim
// unnecessary alias of GetStructField here.
case a: Aggregate =>
val planForResolve = a.child match {
// SPARK-25942: Resolves aggregate expressions with `AppendColumns`'s children, instead of
// `AppendColumns`, because `AppendColumns`'s serializer might produce conflict attribute
// names leading to ambiguous references exception.
case appendColumns: AppendColumns => appendColumns
case _ => a
}
val resolvedGroupingExprs = a.groupingExpressions
.map(resolveExpressionTopDown(_, planForResolve, trimAlias = true))
.map(trimTopLevelGetStructFieldAlias)
val resolvedAggExprs = a.aggregateExpressions
.map(resolveExpressionTopDown(_, planForResolve, trimAlias = true))
.map(_.asInstanceOf[NamedExpression])
a.copy(resolvedGroupingExprs, resolvedAggExprs, a.child)
// SPARK-31670: Resolve Struct field in selectedGroupByExprs/groupByExprs and aggregations
// will be wrapped with alias like Alias(GetStructField, name) with different ExprId.
// This cause aggregateExpressions can't be replaced by expanded groupByExpressions in
// `ResolveGroupingAnalytics.constructAggregateExprs()`, we trim unnecessary alias
// of GetStructField here.
case g: GroupingSets =>
val resolvedSelectedExprs = g.selectedGroupByExprs
.map(_.map(resolveExpressionTopDown(_, g, trimAlias = true))
.map(trimTopLevelGetStructFieldAlias))
val resolvedGroupingExprs = g.groupByExprs
.map(resolveExpressionTopDown(_, g, trimAlias = true))
.map(trimTopLevelGetStructFieldAlias)
val resolvedAggExprs = g.aggregations
.map(resolveExpressionTopDown(_, g, trimAlias = true))
.map(_.asInstanceOf[NamedExpression])
g.copy(resolvedSelectedExprs, resolvedGroupingExprs, g.child, resolvedAggExprs)
case o: OverwriteByExpression if o.table.resolved =>
// The delete condition of `OverwriteByExpression` will be passed to the table
// implementation and should be resolved based on the table schema.
o.copy(deleteExpr = resolveExpressionBottomUp(o.deleteExpr, o.table))
case m @ MergeIntoTable(targetTable, sourceTable, _, _, _)
if !m.resolved && targetTable.resolved && sourceTable.resolved =>
EliminateSubqueryAliases(targetTable) match {
case r: NamedRelation if r.skipSchemaResolution =>
// Do not resolve the expression if the target table accepts any schema.
// This allows data sources to customize their own resolution logic using
// custom resolution rules.
m
case _ =>
val newMatchedActions = m.matchedActions.map {
case DeleteAction(deleteCondition) =>
val resolvedDeleteCondition = deleteCondition.map(resolveExpressionTopDown(_, m))
DeleteAction(resolvedDeleteCondition)
case UpdateAction(updateCondition, assignments) =>
val resolvedUpdateCondition = updateCondition.map(resolveExpressionTopDown(_, m))
// The update value can access columns from both target and source tables.
UpdateAction(
resolvedUpdateCondition,
resolveAssignments(assignments, m, resolveValuesWithSourceOnly = false))
case o => o
}
val newNotMatchedActions = m.notMatchedActions.map {
case InsertAction(insertCondition, assignments) =>
// The insert action is used when not matched, so its condition and value can only
// access columns from the source table.
val resolvedInsertCondition =
insertCondition.map(resolveExpressionTopDown(_, Project(Nil, m.sourceTable)))
InsertAction(
resolvedInsertCondition,
resolveAssignments(assignments, m, resolveValuesWithSourceOnly = true))
case o => o
}
val resolvedMergeCondition = resolveExpressionTopDown(m.mergeCondition, m)
m.copy(mergeCondition = resolvedMergeCondition,
matchedActions = newMatchedActions,
notMatchedActions = newNotMatchedActions)
}
// Skip the having clause here, this will be handled in ResolveAggregateFunctions.
case h: UnresolvedHaving => h
case q: LogicalPlan =>
logTrace(s"Attempting to resolve ${q.simpleString(SQLConf.get.maxToStringFields)}")
q.mapExpressions(resolveExpressionTopDown(_, q))
}
def resolveAssignments(
assignments: Seq[Assignment],
mergeInto: MergeIntoTable,
resolveValuesWithSourceOnly: Boolean): Seq[Assignment] = {
if (assignments.isEmpty) {
val expandedColumns = mergeInto.targetTable.output
val expandedValues = mergeInto.sourceTable.output
expandedColumns.zip(expandedValues).map(kv => Assignment(kv._1, kv._2))
} else {
assignments.map { assign =>
val resolvedKey = assign.key match {
case c if !c.resolved =>
resolveExpressionTopDown(c, Project(Nil, mergeInto.targetTable))
case o => o
}
val resolvedValue = assign.value match {
// The update values may contain target and/or source references.
case c if !c.resolved =>
if (resolveValuesWithSourceOnly) {
resolveExpressionTopDown(c, Project(Nil, mergeInto.sourceTable))
} else {
resolveExpressionTopDown(c, mergeInto)
}
case o => o
}
Assignment(resolvedKey, resolvedValue)
}
}
}
def newAliases(expressions: Seq[NamedExpression]): Seq[NamedExpression] = {
expressions.map {
case a: Alias => Alias(a.child, a.name)()
case other => other
}
}
def findAliases(projectList: Seq[NamedExpression]): AttributeSet = {
AttributeSet(projectList.collect { case a: Alias => a.toAttribute })
}
// This method is used to trim groupByExpressions/selectedGroupByExpressions's top-level
// GetStructField Alias. Since these expression are not NamedExpression originally,
// we are safe to trim top-level GetStructField Alias.
def trimTopLevelGetStructFieldAlias(e: Expression): Expression = {
e match {
case Alias(s: GetStructField, _) => s
case other => other
}
}
/**
* Build a project list for Project/Aggregate and expand the star if possible
*/
private def buildExpandedProjectList(
exprs: Seq[NamedExpression],
child: LogicalPlan): Seq[NamedExpression] = {
exprs.flatMap {
// Using Dataframe/Dataset API: testData2.groupBy($"a", $"b").agg($"*")
case s: Star => s.expand(child, resolver)
// Using SQL API without running ResolveAlias: SELECT * FROM testData2 group by a, b
case UnresolvedAlias(s: Star, _) => s.expand(child, resolver)
case o if containsStar(o :: Nil) => expandStarExpression(o, child) :: Nil
case o => o :: Nil
}.map(_.asInstanceOf[NamedExpression])
}
/**
* Returns true if `exprs` contains a [[Star]].
*/
def containsStar(exprs: Seq[Expression]): Boolean =
exprs.exists(_.collect { case _: Star => true }.nonEmpty)
/**
* Expands the matching attribute.*'s in `child`'s output.
*/
def expandStarExpression(expr: Expression, child: LogicalPlan): Expression = {
expr.transformUp {
case f1: UnresolvedFunction if containsStar(f1.arguments) =>
f1.copy(arguments = f1.arguments.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case c: CreateNamedStruct if containsStar(c.valExprs) =>
val newChildren = c.children.grouped(2).flatMap {
case Seq(k, s : Star) => CreateStruct(s.expand(child, resolver)).children
case kv => kv
}
c.copy(children = newChildren.toList )
case c: CreateArray if containsStar(c.children) =>
c.copy(children = c.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: Murmur3Hash if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: XxHash64 if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
// count(*) has been replaced by count(1)
case o if containsStar(o.children) =>
failAnalysis(s"Invalid usage of '*' in expression '${o.prettyName}'")
}
}
}
private def containsDeserializer(exprs: Seq[Expression]): Boolean = {
exprs.exists(_.find(_.isInstanceOf[UnresolvedDeserializer]).isDefined)
}
/**
* Literal functions do not require the user to specify braces when calling them
* When an attributes is not resolvable, we try to resolve it as a literal function.
*/
private def resolveLiteralFunction(
nameParts: Seq[String],
attribute: UnresolvedAttribute,
plan: LogicalPlan): Option[Expression] = {
if (nameParts.length != 1) return None
val isNamedExpression = plan match {
case Aggregate(_, aggregateExpressions, _) => aggregateExpressions.contains(attribute)
case Project(projectList, _) => projectList.contains(attribute)
case Window(windowExpressions, _, _, _) => windowExpressions.contains(attribute)
case _ => false
}
val wrapper: Expression => Expression =
if (isNamedExpression) f => Alias(f, toPrettySQL(f))() else identity
// support CURRENT_DATE and CURRENT_TIMESTAMP
val literalFunctions = Seq(CurrentDate(), CurrentTimestamp())
val name = nameParts.head
val func = literalFunctions.find(e => caseInsensitiveResolution(e.prettyName, name))
func.map(wrapper)
}
/**
* Resolves the attribute, column value and extract value expressions(s) by traversing the
* input expression in bottom-up manner. In order to resolve the nested complex type fields
* correctly, this function makes use of `throws` parameter to control when to raise an
* AnalysisException.
*
* Example :
* SELECT a.b FROM t ORDER BY b[0].d
*
* In the above example, in b needs to be resolved before d can be resolved. Given we are
* doing a bottom up traversal, it will first attempt to resolve d and fail as b has not
* been resolved yet. If `throws` is false, this function will handle the exception by
* returning the original attribute. In this case `d` will be resolved in subsequent passes
* after `b` is resolved.
*/
protected[sql] def resolveExpressionBottomUp(
expr: Expression,
plan: LogicalPlan,
throws: Boolean = false): Expression = {
if (expr.resolved) return expr
// Resolve expression in one round.
// If throws == false or the desired attribute doesn't exist
// (like try to resolve `a.b` but `a` doesn't exist), fail and return the origin one.
// Else, throw exception.
try {
expr transformUp {
case GetColumnByOrdinal(ordinal, _) => plan.output(ordinal)
case u @ UnresolvedAttribute(nameParts) =>
val result =
withPosition(u) {
plan.resolve(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, plan))
.getOrElse(u)
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
} catch {
case a: AnalysisException if !throws => expr
}
}
/**
* In many dialects of SQL it is valid to use ordinal positions in order/sort by and group by
* clauses. This rule is to convert ordinal positions to the corresponding expressions in the
* select list. This support is introduced in Spark 2.0.
*
* - When the sort references or group by expressions are not integer but foldable expressions,
* just ignore them.
* - When spark.sql.orderByOrdinal/spark.sql.groupByOrdinal is set to false, ignore the position
* numbers too.
*
* Before the release of Spark 2.0, the literals in order/sort by and group by clauses
* have no effect on the results.
*/
object ResolveOrdinalInOrderByAndGroupBy extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
// Replace the index with the related attribute for ORDER BY,
// which is a 1-base position of the projection list.
case Sort(orders, global, child)
if orders.exists(_.child.isInstanceOf[UnresolvedOrdinal]) =>
val newOrders = orders map {
case s @ SortOrder(UnresolvedOrdinal(index), direction, nullOrdering, _) =>
if (index > 0 && index <= child.output.size) {
SortOrder(child.output(index - 1), direction, nullOrdering, Set.empty)
} else {
s.failAnalysis(
s"ORDER BY position $index is not in select list " +
s"(valid range is [1, ${child.output.size}])")
}
case o => o
}
Sort(newOrders, global, child)
// Replace the index with the corresponding expression in aggregateExpressions. The index is
// a 1-base position of aggregateExpressions, which is output columns (select expression)
case Aggregate(groups, aggs, child) if aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedOrdinal]) =>
val newGroups = groups.map {
case u @ UnresolvedOrdinal(index) if index > 0 && index <= aggs.size =>
aggs(index - 1)
case ordinal @ UnresolvedOrdinal(index) =>
ordinal.failAnalysis(
s"GROUP BY position $index is not in select list " +
s"(valid range is [1, ${aggs.size}])")
case o => o
}
Aggregate(newGroups, aggs, child)
}
}
/**
* Replace unresolved expressions in grouping keys with resolved ones in SELECT clauses.
* This rule is expected to run after [[ResolveReferences]] applied.
*/
object ResolveAggAliasInGroupBy extends Rule[LogicalPlan] {
// This is a strict check though, we put this to apply the rule only if the expression is not
// resolvable by child.
private def notResolvableByChild(attrName: String, child: LogicalPlan): Boolean = {
!child.output.exists(a => resolver(a.name, attrName))
}
private def mayResolveAttrByAggregateExprs(
exprs: Seq[Expression], aggs: Seq[NamedExpression], child: LogicalPlan): Seq[Expression] = {
exprs.map { _.transform {
case u: UnresolvedAttribute if notResolvableByChild(u.name, child) =>
aggs.find(ne => resolver(ne.name, u.name)).getOrElse(u)
}}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case agg @ Aggregate(groups, aggs, child)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(!_.resolved) =>
agg.copy(groupingExpressions = mayResolveAttrByAggregateExprs(groups, aggs, child))
case gs @ GroupingSets(selectedGroups, groups, child, aggs)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedAttribute]) =>
gs.copy(
selectedGroupByExprs = selectedGroups.map(mayResolveAttrByAggregateExprs(_, aggs, child)),
groupByExprs = mayResolveAttrByAggregateExprs(groups, aggs, child))
}
}
/**
* In many dialects of SQL it is valid to sort by attributes that are not present in the SELECT
* clause. This rule detects such queries and adds the required attributes to the original
* projection, so that they will be available during sorting. Another projection is added to
* remove these attributes after sorting.
*
* The HAVING clause could also used a grouping columns that is not presented in the SELECT.
*/
object ResolveMissingReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Skip sort with aggregate. This will be handled in ResolveAggregateFunctions
case sa @ Sort(_, _, child: Aggregate) => sa
case s @ Sort(order, _, child)
if (!s.resolved || s.missingInput.nonEmpty) && child.resolved =>
val (newOrder, newChild) = resolveExprsAndAddMissingAttrs(order, child)
val ordering = newOrder.map(_.asInstanceOf[SortOrder])
if (child.output == newChild.output) {
s.copy(order = ordering)
} else {
// Add missing attributes and then project them away.
val newSort = s.copy(order = ordering, child = newChild)
Project(child.output, newSort)
}
case f @ Filter(cond, child) if (!f.resolved || f.missingInput.nonEmpty) && child.resolved =>
val (newCond, newChild) = resolveExprsAndAddMissingAttrs(Seq(cond), child)
if (child.output == newChild.output) {
f.copy(condition = newCond.head)
} else {
// Add missing attributes and then project them away.
val newFilter = Filter(newCond.head, newChild)
Project(child.output, newFilter)
}
}
/**
* This method tries to resolve expressions and find missing attributes recursively. Specially,
* when the expressions used in `Sort` or `Filter` contain unresolved attributes or resolved
* attributes which are missed from child output. This method tries to find the missing
* attributes out and add into the projection.
*/
private def resolveExprsAndAddMissingAttrs(
exprs: Seq[Expression], plan: LogicalPlan): (Seq[Expression], LogicalPlan) = {
// Missing attributes can be unresolved attributes or resolved attributes which are not in
// the output attributes of the plan.
if (exprs.forall(e => e.resolved && e.references.subsetOf(plan.outputSet))) {
(exprs, plan)
} else {
plan match {
case p: Project =>
// Resolving expressions against current plan.
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, p))
// Recursively resolving expressions on the child of current plan.
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, p.child)
// If some attributes used by expressions are resolvable only on the rewritten child
// plan, we need to add them into original projection.
val missingAttrs = (AttributeSet(newExprs) -- p.outputSet).intersect(newChild.outputSet)
(newExprs, Project(p.projectList ++ missingAttrs, newChild))
case a @ Aggregate(groupExprs, aggExprs, child) =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, a))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, child)
val missingAttrs = (AttributeSet(newExprs) -- a.outputSet).intersect(newChild.outputSet)
if (missingAttrs.forall(attr => groupExprs.exists(_.semanticEquals(attr)))) {
// All the missing attributes are grouping expressions, valid case.
(newExprs, a.copy(aggregateExpressions = aggExprs ++ missingAttrs, child = newChild))
} else {
// Need to add non-grouping attributes, invalid case.
(exprs, a)
}
case g: Generate =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, g))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, g.child)
(newExprs, g.copy(unrequiredChildIndex = Nil, child = newChild))
// For `Distinct` and `SubqueryAlias`, we can't recursively resolve and add attributes
// via its children.
case u: UnaryNode if !u.isInstanceOf[Distinct] && !u.isInstanceOf[SubqueryAlias] =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, u))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, u.child)
(newExprs, u.withNewChildren(Seq(newChild)))
// For other operators, we can't recursively resolve and add attributes via its children.
case other =>
(exprs.map(resolveExpressionBottomUp(_, other)), other)
}
}
}
}
/**
* Checks whether a function identifier referenced by an [[UnresolvedFunction]] is defined in the
* function registry. Note that this rule doesn't try to resolve the [[UnresolvedFunction]]. It
* only performs simple existence check according to the function identifier to quickly identify
* undefined functions without triggering relation resolution, which may incur potentially
* expensive partition/schema discovery process in some cases.
* In order to avoid duplicate external functions lookup, the external function identifier will
* store in the local hash set externalFunctionNameSet.
* @see [[ResolveFunctions]]
* @see https://issues.apache.org/jira/browse/SPARK-19737
*/
object LookupFunctions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
val externalFunctionNameSet = new mutable.HashSet[FunctionIdentifier]()
plan.resolveExpressions {
case f: UnresolvedFunction
if externalFunctionNameSet.contains(normalizeFuncName(f.name)) => f
case f: UnresolvedFunction if v1SessionCatalog.isRegisteredFunction(f.name) => f
case f: UnresolvedFunction if v1SessionCatalog.isPersistentFunction(f.name) =>
externalFunctionNameSet.add(normalizeFuncName(f.name))
f
case f: UnresolvedFunction =>
withPosition(f) {
throw new NoSuchFunctionException(
f.name.database.getOrElse(v1SessionCatalog.getCurrentDatabase),
f.name.funcName)
}
}
}
def normalizeFuncName(name: FunctionIdentifier): FunctionIdentifier = {
val funcName = if (conf.caseSensitiveAnalysis) {
name.funcName
} else {
name.funcName.toLowerCase(Locale.ROOT)
}
val databaseName = name.database match {
case Some(a) => formatDatabaseName(a)
case None => v1SessionCatalog.getCurrentDatabase
}
FunctionIdentifier(funcName, Some(databaseName))
}
protected def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
}
/**
* Replaces [[UnresolvedFunc]]s with concrete [[LogicalPlan]]s.
* Replaces [[UnresolvedFunction]]s with concrete [[Expression]]s.
*/
object ResolveFunctions extends Rule[LogicalPlan] {
val trimWarningEnabled = new AtomicBoolean(true)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Resolve functions with concrete relations from v2 catalog.
case UnresolvedFunc(multipartIdent) =>
val funcIdent = parseSessionCatalogFunctionIdentifier(multipartIdent)
ResolvedFunc(Identifier.of(funcIdent.database.toArray, funcIdent.funcName))
case q: LogicalPlan =>
q transformExpressions {
case u if !u.childrenResolved => u // Skip until children are resolved.
case u: UnresolvedAttribute if resolver(u.name, VirtualColumn.hiveGroupingIdName) =>
withPosition(u) {
Alias(GroupingID(Nil), VirtualColumn.hiveGroupingIdName)()
}
case u @ UnresolvedGenerator(name, children) =>
withPosition(u) {
v1SessionCatalog.lookupFunction(name, children) match {
case generator: Generator => generator
case other =>
failAnalysis(s"$name is expected to be a generator. However, " +
s"its class is ${other.getClass.getCanonicalName}, which is not a generator.")
}
}
case u @ UnresolvedFunction(funcId, arguments, isDistinct, filter) =>
withPosition(u) {
v1SessionCatalog.lookupFunction(funcId, arguments) match {
// AggregateWindowFunctions are AggregateFunctions that can only be evaluated within
// the context of a Window clause. They do not need to be wrapped in an
// AggregateExpression.
case wf: AggregateWindowFunction =>
if (isDistinct || filter.isDefined) {
failAnalysis("DISTINCT or FILTER specified, " +
s"but ${wf.prettyName} is not an aggregate function")
} else {
wf
}
// We get an aggregate function, we need to wrap it in an AggregateExpression.
case agg: AggregateFunction =>
if (filter.isDefined && !filter.get.deterministic) {
failAnalysis("FILTER expression is non-deterministic, " +
"it cannot be used in aggregate functions")
}
AggregateExpression(agg, Complete, isDistinct, filter)
// This function is not an aggregate function, just return the resolved one.
case other if (isDistinct || filter.isDefined) =>
failAnalysis("DISTINCT or FILTER specified, " +
s"but ${other.prettyName} is not an aggregate function")
case e: String2TrimExpression if arguments.size == 2 =>
if (trimWarningEnabled.get) {
log.warn("Two-parameter TRIM/LTRIM/RTRIM function signatures are deprecated." +
" Use SQL syntax `TRIM((BOTH | LEADING | TRAILING)? trimStr FROM str)`" +
" instead.")
trimWarningEnabled.set(false)
}
e
case other =>
other
}
}
}
}
}
/**
* This rule resolves and rewrites subqueries inside expressions.
*
* Note: CTEs are handled in CTESubstitution.
*/
object ResolveSubquery extends Rule[LogicalPlan] with PredicateHelper {
/**
* Resolve the correlated expressions in a subquery by using the an outer plans' references. All
* resolved outer references are wrapped in an [[OuterReference]]
*/
private def resolveOuterReferences(plan: LogicalPlan, outer: LogicalPlan): LogicalPlan = {
plan resolveOperatorsDown {
case q: LogicalPlan if q.childrenResolved && !q.resolved =>
q transformExpressions {
case u @ UnresolvedAttribute(nameParts) =>
withPosition(u) {
try {
outer.resolve(nameParts, resolver) match {
case Some(outerAttr) => OuterReference(outerAttr)
case None => u
}
} catch {
case _: AnalysisException => u
}
}
}
}
}
/**
* Resolves the subquery plan that is referenced in a subquery expression. The normal
* attribute references are resolved using regular analyzer and the outer references are
* resolved from the outer plans using the resolveOuterReferences method.
*
* Outer references from the correlated predicates are updated as children of
* Subquery expression.
*/
private def resolveSubQuery(
e: SubqueryExpression,
plans: Seq[LogicalPlan])(
f: (LogicalPlan, Seq[Expression]) => SubqueryExpression): SubqueryExpression = {
// Step 1: Resolve the outer expressions.
var previous: LogicalPlan = null
var current = e.plan
do {
// Try to resolve the subquery plan using the regular analyzer.
previous = current
current = executeSameContext(current)
// Use the outer references to resolve the subquery plan if it isn't resolved yet.
val i = plans.iterator
val afterResolve = current
while (!current.resolved && current.fastEquals(afterResolve) && i.hasNext) {
current = resolveOuterReferences(current, i.next())
}
} while (!current.resolved && !current.fastEquals(previous))
// Step 2: If the subquery plan is fully resolved, pull the outer references and record
// them as children of SubqueryExpression.
if (current.resolved) {
// Record the outer references as children of subquery expression.
f(current, SubExprUtils.getOuterReferences(current))
} else {
e.withNewPlan(current)
}
}
/**
* Resolves the subquery. Apart of resolving the subquery and outer references (if any)
* in the subquery plan, the children of subquery expression are updated to record the
* outer references. This is needed to make sure
* (1) The column(s) referred from the outer query are not pruned from the plan during
* optimization.
* (2) Any aggregate expression(s) that reference outer attributes are pushed down to
* outer plan to get evaluated.
*/
private def resolveSubQueries(plan: LogicalPlan, plans: Seq[LogicalPlan]): LogicalPlan = {
plan transformExpressions {
case s @ ScalarSubquery(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(s, plans)(ScalarSubquery(_, _, exprId))
case e @ Exists(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(e, plans)(Exists(_, _, exprId))
case InSubquery(values, l @ ListQuery(_, _, exprId, _))
if values.forall(_.resolved) && !l.resolved =>
val expr = resolveSubQuery(l, plans)((plan, exprs) => {
ListQuery(plan, exprs, exprId, plan.output)
})
InSubquery(values, expr.asInstanceOf[ListQuery])
}
}
/**
* Resolve and rewrite all subqueries in an operator tree..
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// In case of HAVING (a filter after an aggregate) we use both the aggregate and
// its child for resolution.
case f @ Filter(_, a: Aggregate) if f.childrenResolved =>
resolveSubQueries(f, Seq(a, a.child))
// Only a few unary nodes (Project/Filter/Aggregate) can contain subqueries.
case q: UnaryNode if q.childrenResolved =>
resolveSubQueries(q, q.children)
case j: Join if j.childrenResolved =>
resolveSubQueries(j, Seq(j, j.left, j.right))
case s: SupportsSubquery if s.childrenResolved =>
resolveSubQueries(s, s.children)
}
}
/**
* Replaces unresolved column aliases for a subquery with projections.
*/
object ResolveSubqueryColumnAliases extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedSubqueryColumnAliases(columnNames, child) if child.resolved =>
// Resolves output attributes if a query has alias names in its subquery:
// e.g., SELECT * FROM (SELECT 1 AS a, 1 AS b) t(col1, col2)
val outputAttrs = child.output
// Checks if the number of the aliases equals to the number of output columns
// in the subquery.
if (columnNames.size != outputAttrs.size) {
u.failAnalysis("Number of column aliases does not match number of columns. " +
s"Number of column aliases: ${columnNames.size}; " +
s"number of columns: ${outputAttrs.size}.")
}
val aliases = outputAttrs.zip(columnNames).map { case (attr, aliasName) =>
Alias(attr, aliasName)()
}
Project(aliases, child)
}
}
/**
* Turns projections that contain aggregate expressions into aggregations.
*/
object GlobalAggregates extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Project(projectList, child) if containsAggregates(projectList) =>
Aggregate(Nil, projectList, child)
}
def containsAggregates(exprs: Seq[Expression]): Boolean = {
// Collect all Windowed Aggregate Expressions.
val windowedAggExprs: Set[Expression] = exprs.flatMap { expr =>
expr.collect {
case WindowExpression(ae: AggregateExpression, _) => ae
case WindowExpression(e: PythonUDF, _) if PythonUDF.isGroupedAggPandasUDF(e) => e
}
}.toSet
// Find the first Aggregate Expression that is not Windowed.
exprs.exists(_.collectFirst {
case ae: AggregateExpression if !windowedAggExprs.contains(ae) => ae
case e: PythonUDF if PythonUDF.isGroupedAggPandasUDF(e) &&
!windowedAggExprs.contains(e) => e
}.isDefined)
}
}
/**
* This rule finds aggregate expressions that are not in an aggregate operator. For example,
* those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the
* underlying aggregate operator and then projected away after the original operator.
*/
object ResolveAggregateFunctions extends Rule[LogicalPlan] with AliasHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Resolve aggregate with having clause to Filter(..., Aggregate()). Note, to avoid wrongly
// resolve the having condition expression, here we skip resolving it in ResolveReferences
// and transform it to Filter after aggregate is resolved. See more details in SPARK-31519.
case UnresolvedHaving(cond, agg: Aggregate) if agg.resolved =>
resolveHaving(Filter(cond, agg), agg)
case f @ Filter(_, agg: Aggregate) if agg.resolved =>
resolveHaving(f, agg)
case sort @ Sort(sortOrder, global, aggregate: Aggregate) if aggregate.resolved =>
// Try resolving the ordering as though it is in the aggregate clause.
try {
// If a sort order is unresolved, containing references not in aggregate, or containing
// `AggregateExpression`, we need to push down it to the underlying aggregate operator.
val unresolvedSortOrders = sortOrder.filter { s =>
!s.resolved || !s.references.subsetOf(aggregate.outputSet) || containsAggregate(s)
}
val aliasedOrdering =
unresolvedSortOrders.map(o => Alias(o.child, "aggOrder")())
val aggregatedOrdering = aggregate.copy(aggregateExpressions = aliasedOrdering)
val resolvedAggregate: Aggregate =
executeSameContext(aggregatedOrdering).asInstanceOf[Aggregate]
val resolvedAliasedOrdering: Seq[Alias] =
resolvedAggregate.aggregateExpressions.asInstanceOf[Seq[Alias]]
// If we pass the analysis check, then the ordering expressions should only reference to
// aggregate expressions or grouping expressions, and it's safe to push them down to
// Aggregate.
checkAnalysis(resolvedAggregate)
val originalAggExprs = aggregate.aggregateExpressions.map(trimNonTopLevelAliases)
// If the ordering expression is same with original aggregate expression, we don't need
// to push down this ordering expression and can reference the original aggregate
// expression instead.
val needsPushDown = ArrayBuffer.empty[NamedExpression]
val evaluatedOrderings = resolvedAliasedOrdering.zip(unresolvedSortOrders).map {
case (evaluated, order) =>
val index = originalAggExprs.indexWhere {
case Alias(child, _) => child semanticEquals evaluated.child
case other => other semanticEquals evaluated.child
}
if (index == -1) {
needsPushDown += evaluated
order.copy(child = evaluated.toAttribute)
} else {
order.copy(child = originalAggExprs(index).toAttribute)
}
}
val sortOrdersMap = unresolvedSortOrders
.map(new TreeNodeRef(_))
.zip(evaluatedOrderings)
.toMap
val finalSortOrders = sortOrder.map(s => sortOrdersMap.getOrElse(new TreeNodeRef(s), s))
// Since we don't rely on sort.resolved as the stop condition for this rule,
// we need to check this and prevent applying this rule multiple times
if (sortOrder == finalSortOrders) {
sort
} else {
Project(aggregate.output,
Sort(finalSortOrders, global,
aggregate.copy(aggregateExpressions = originalAggExprs ++ needsPushDown)))
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => sort
}
}
def containsAggregate(condition: Expression): Boolean = {
condition.find(_.isInstanceOf[AggregateExpression]).isDefined
}
def resolveFilterCondInAggregate(
filterCond: Expression, agg: Aggregate): Option[(Seq[NamedExpression], Expression)] = {
try {
val aggregatedCondition =
Aggregate(
agg.groupingExpressions,
Alias(filterCond, "havingCondition")() :: Nil,
agg.child)
val resolvedOperator = executeSameContext(aggregatedCondition)
def resolvedAggregateFilter =
resolvedOperator
.asInstanceOf[Aggregate]
.aggregateExpressions.head
// If resolution was successful and we see the filter has an aggregate in it, add it to
// the original aggregate operator.
if (resolvedOperator.resolved) {
// Try to replace all aggregate expressions in the filter by an alias.
val aggregateExpressions = ArrayBuffer.empty[NamedExpression]
val transformedAggregateFilter = resolvedAggregateFilter.transform {
case ae: AggregateExpression =>
val alias = Alias(ae, ae.toString)()
aggregateExpressions += alias
alias.toAttribute
// Grouping functions are handled in the rule [[ResolveGroupingAnalytics]].
case e: Expression if agg.groupingExpressions.exists(_.semanticEquals(e)) &&
!ResolveGroupingAnalytics.hasGroupingFunction(e) &&
!agg.output.exists(_.semanticEquals(e)) =>
e match {
case ne: NamedExpression =>
aggregateExpressions += ne
ne.toAttribute
case _ =>
val alias = Alias(e, e.toString)()
aggregateExpressions += alias
alias.toAttribute
}
}
if (aggregateExpressions.nonEmpty) {
Some(aggregateExpressions.toSeq, transformedAggregateFilter)
} else {
None
}
} else {
None
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return None and the caller side will return the original plan.
case ae: AnalysisException => None
}
}
def resolveHaving(filter: Filter, agg: Aggregate): LogicalPlan = {
// Try resolving the condition of the filter as though it is in the aggregate clause
val resolvedInfo = resolveFilterCondInAggregate(filter.condition, agg)
// Push the aggregate expressions into the aggregate (if any).
if (resolvedInfo.nonEmpty) {
val (aggregateExpressions, resolvedHavingCond) = resolvedInfo.get
Project(agg.output,
Filter(resolvedHavingCond,
agg.copy(aggregateExpressions = agg.aggregateExpressions ++ aggregateExpressions)))
} else {
filter
}
}
}
/**
* Extracts [[Generator]] from the projectList of a [[Project]] operator and creates [[Generate]]
* operator under [[Project]].
*
* This rule will throw [[AnalysisException]] for following cases:
* 1. [[Generator]] is nested in expressions, e.g. `SELECT explode(list) + 1 FROM tbl`
* 2. more than one [[Generator]] is found in projectList,
* e.g. `SELECT explode(list), explode(list) FROM tbl`
* 3. [[Generator]] is found in other operators that are not [[Project]] or [[Generate]],
* e.g. `SELECT * FROM tbl SORT BY explode(list)`
*/
object ExtractGenerator extends Rule[LogicalPlan] {
private def hasGenerator(expr: Expression): Boolean = {
expr.find(_.isInstanceOf[Generator]).isDefined
}
private def hasNestedGenerator(expr: NamedExpression): Boolean = {
def hasInnerGenerator(g: Generator): Boolean = g match {
// Since `GeneratorOuter` is just a wrapper of generators, we skip it here
case go: GeneratorOuter =>
hasInnerGenerator(go.child)
case _ =>
g.children.exists { _.find {
case _: Generator => true
case _ => false
}.isDefined }
}
trimNonTopLevelAliases(expr) match {
case UnresolvedAlias(g: Generator, _) => hasInnerGenerator(g)
case Alias(g: Generator, _) => hasInnerGenerator(g)
case MultiAlias(g: Generator, _) => hasInnerGenerator(g)
case other => hasGenerator(other)
}
}
private def hasAggFunctionInGenerator(ne: Seq[NamedExpression]): Boolean = {
ne.exists(_.find {
case g: Generator =>
g.children.exists(_.find(_.isInstanceOf[AggregateFunction]).isDefined)
case _ =>
false
}.nonEmpty)
}
private def trimAlias(expr: NamedExpression): Expression = expr match {
case UnresolvedAlias(child, _) => child
case Alias(child, _) => child
case MultiAlias(child, _) => child
case _ => expr
}
private object AliasedGenerator {
/**
* Extracts a [[Generator]] expression, any names assigned by aliases to the outputs
* and the outer flag. The outer flag is used when joining the generator output.
* @param e the [[Expression]]
* @return (the [[Generator]], seq of output names, outer flag)
*/
def unapply(e: Expression): Option[(Generator, Seq[String], Boolean)] = e match {
case Alias(GeneratorOuter(g: Generator), name) if g.resolved => Some((g, name :: Nil, true))
case MultiAlias(GeneratorOuter(g: Generator), names) if g.resolved => Some((g, names, true))
case Alias(g: Generator, name) if g.resolved => Some((g, name :: Nil, false))
case MultiAlias(g: Generator, names) if g.resolved => Some((g, names, false))
case _ => None
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, _) if projectList.exists(hasNestedGenerator) =>
val nestedGenerator = projectList.find(hasNestedGenerator).get
throw QueryCompilationErrors.nestedGeneratorError(trimAlias(nestedGenerator))
case Project(projectList, _) if projectList.count(hasGenerator) > 1 =>
val generators = projectList.filter(hasGenerator).map(trimAlias)
throw QueryCompilationErrors.moreThanOneGeneratorError(generators, "select")
case Aggregate(_, aggList, _) if aggList.exists(hasNestedGenerator) =>
val nestedGenerator = aggList.find(hasNestedGenerator).get
throw QueryCompilationErrors.nestedGeneratorError(trimAlias(nestedGenerator))
case Aggregate(_, aggList, _) if aggList.count(hasGenerator) > 1 =>
val generators = aggList.filter(hasGenerator).map(trimAlias)
throw QueryCompilationErrors.moreThanOneGeneratorError(generators, "aggregate")
case agg @ Aggregate(groupList, aggList, child) if aggList.forall {
case AliasedGenerator(_, _, _) => true
case other => other.resolved
} && aggList.exists(hasGenerator) =>
// If generator in the aggregate list was visited, set the boolean flag true.
var generatorVisited = false
val projectExprs = Array.ofDim[NamedExpression](aggList.length)
val newAggList = aggList
.map(trimNonTopLevelAliases)
.zipWithIndex
.flatMap {
case (AliasedGenerator(generator, names, outer), idx) =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(!generatorVisited, "More than one generator found in aggregate.")
generatorVisited = true
val newGenChildren: Seq[Expression] = generator.children.zipWithIndex.map {
case (e, idx) => if (e.foldable) e else Alias(e, s"_gen_input_${idx}")()
}
val newGenerator = {
val g = generator.withNewChildren(newGenChildren.map { e =>
if (e.foldable) e else e.asInstanceOf[Alias].toAttribute
}).asInstanceOf[Generator]
if (outer) GeneratorOuter(g) else g
}
val newAliasedGenerator = if (names.length == 1) {
Alias(newGenerator, names(0))()
} else {
MultiAlias(newGenerator, names)
}
projectExprs(idx) = newAliasedGenerator
newGenChildren.filter(!_.foldable).asInstanceOf[Seq[NamedExpression]]
case (other, idx) =>
projectExprs(idx) = other.toAttribute
other :: Nil
}
val newAgg = Aggregate(groupList, newAggList, child)
Project(projectExprs.toList, newAgg)
case p @ Project(projectList, _) if hasAggFunctionInGenerator(projectList) =>
// If a generator has any aggregate function, we need to apply the `GlobalAggregates` rule
// first for replacing `Project` with `Aggregate`.
p
case p @ Project(projectList, child) =>
// Holds the resolved generator, if one exists in the project list.
var resolvedGenerator: Generate = null
val newProjectList = projectList
.map(trimNonTopLevelAliases)
.flatMap {
case AliasedGenerator(generator, names, outer) if generator.childrenResolved =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(resolvedGenerator == null, "More than one generator found in SELECT.")
resolvedGenerator =
Generate(
generator,
unrequiredChildIndex = Nil,
outer = outer,
qualifier = None,
generatorOutput = ResolveGenerate.makeGeneratorOutput(generator, names),
child)
resolvedGenerator.generatorOutput
case other => other :: Nil
}
if (resolvedGenerator != null) {
Project(newProjectList, resolvedGenerator)
} else {
p
}
case g: Generate => g
case p if p.expressions.exists(hasGenerator) =>
throw QueryCompilationErrors.generatorOutsideSelectError(p)
}
}
/**
* Rewrites table generating expressions that either need one or more of the following in order
* to be resolved:
* - concrete attribute references for their output.
* - to be relocated from a SELECT clause (i.e. from a [[Project]]) into a [[Generate]]).
*
* Names for the output [[Attribute]]s are extracted from [[Alias]] or [[MultiAlias]] expressions
* that wrap the [[Generator]].
*/
object ResolveGenerate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case g: Generate if !g.child.resolved || !g.generator.resolved => g
case g: Generate if !g.resolved =>
g.copy(generatorOutput = makeGeneratorOutput(g.generator, g.generatorOutput.map(_.name)))
}
/**
* Construct the output attributes for a [[Generator]], given a list of names. If the list of
* names is empty names are assigned from field names in generator.
*/
private[analysis] def makeGeneratorOutput(
generator: Generator,
names: Seq[String]): Seq[Attribute] = {
val elementAttrs = generator.elementSchema.toAttributes
if (names.length == elementAttrs.length) {
names.zip(elementAttrs).map {
case (name, attr) => attr.withName(name)
}
} else if (names.isEmpty) {
elementAttrs
} else {
failAnalysis(
"The number of aliases supplied in the AS clause does not match the number of columns " +
s"output by the UDTF expected ${elementAttrs.size} aliases but got " +
s"${names.mkString(",")} ")
}
}
}
/**
* Extracts [[WindowExpression]]s from the projectList of a [[Project]] operator and
* aggregateExpressions of an [[Aggregate]] operator and creates individual [[Window]]
* operators for every distinct [[WindowSpecDefinition]].
*
* This rule handles three cases:
* - A [[Project]] having [[WindowExpression]]s in its projectList;
* - An [[Aggregate]] having [[WindowExpression]]s in its aggregateExpressions.
* - A [[Filter]]->[[Aggregate]] pattern representing GROUP BY with a HAVING
* clause and the [[Aggregate]] has [[WindowExpression]]s in its aggregateExpressions.
* Note: If there is a GROUP BY clause in the query, aggregations and corresponding
* filters (expressions in the HAVING clause) should be evaluated before any
* [[WindowExpression]]. If a query has SELECT DISTINCT, the DISTINCT part should be
* evaluated after all [[WindowExpression]]s.
*
* For every case, the transformation works as follows:
* 1. For a list of [[Expression]]s (a projectList or an aggregateExpressions), partitions
* it two lists of [[Expression]]s, one for all [[WindowExpression]]s and another for
* all regular expressions.
* 2. For all [[WindowExpression]]s, groups them based on their [[WindowSpecDefinition]]s
* and [[WindowFunctionType]]s.
* 3. For every distinct [[WindowSpecDefinition]] and [[WindowFunctionType]], creates a
* [[Window]] operator and inserts it into the plan tree.
*/
object ExtractWindowExpressions extends Rule[LogicalPlan] {
type Spec = (Seq[Expression], Seq[SortOrder], WindowFunctionType)
private def hasWindowFunction(exprs: Seq[Expression]): Boolean =
exprs.exists(hasWindowFunction)
private def hasWindowFunction(expr: Expression): Boolean = {
expr.find {
case window: WindowExpression => true
case _ => false
}.isDefined
}
/**
* From a Seq of [[NamedExpression]]s, extract expressions containing window expressions and
* other regular expressions that do not contain any window expression. For example, for
* `col1, Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5)`, we will extract
* `col1`, `col2 + col3`, `col4`, and `col5` out and replace their appearances in
* the window expression as attribute references. So, the first returned value will be
* `[Sum(_w0) OVER (PARTITION BY _w1 ORDER BY _w2)]` and the second returned value will be
* [col1, col2 + col3 as _w0, col4 as _w1, col5 as _w2].
*
* @return (seq of expressions containing at least one window expression,
* seq of non-window expressions)
*/
private def extract(
expressions: Seq[NamedExpression]): (Seq[NamedExpression], Seq[NamedExpression]) = {
// First, we partition the input expressions to two part. For the first part,
// every expression in it contain at least one WindowExpression.
// Expressions in the second part do not have any WindowExpression.
val (expressionsWithWindowFunctions, regularExpressions) =
expressions.partition(hasWindowFunction)
// Then, we need to extract those regular expressions used in the WindowExpression.
// For example, when we have col1 - Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5),
// we need to make sure that col1 to col5 are all projected from the child of the Window
// operator.
val extractedExprBuffer = new ArrayBuffer[NamedExpression]()
def extractExpr(expr: Expression): Expression = expr match {
case ne: NamedExpression =>
// If a named expression is not in regularExpressions, add it to
// extractedExprBuffer and replace it with an AttributeReference.
val missingExpr =
AttributeSet(Seq(expr)) -- (regularExpressions ++ extractedExprBuffer)
if (missingExpr.nonEmpty) {
extractedExprBuffer += ne
}
// alias will be cleaned in the rule CleanupAliases
ne
case e: Expression if e.foldable =>
e // No need to create an attribute reference if it will be evaluated as a Literal.
case e: Expression =>
// For other expressions, we extract it and replace it with an AttributeReference (with
// an internal column name, e.g. "_w0").
val withName = Alias(e, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
}
// Now, we extract regular expressions from expressionsWithWindowFunctions
// by using extractExpr.
val seenWindowAggregates = new ArrayBuffer[AggregateExpression]
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
case wf: WindowFunction =>
val newChildren = wf.children.map(extractExpr)
wf.withNewChildren(newChildren)
// Extracts expressions from the partition spec and order spec.
case wsc @ WindowSpecDefinition(partitionSpec, orderSpec, _) =>
val newPartitionSpec = partitionSpec.map(extractExpr)
val newOrderSpec = orderSpec.map { so =>
val newChild = extractExpr(so.child)
so.copy(child = newChild)
}
wsc.copy(partitionSpec = newPartitionSpec, orderSpec = newOrderSpec)
case WindowExpression(ae: AggregateExpression, _) if ae.filter.isDefined =>
failAnalysis(
"window aggregate function with filter predicate is not supported yet.")
// Extract Windowed AggregateExpression
case we @ WindowExpression(
ae @ AggregateExpression(function, _, _, _, _),
spec: WindowSpecDefinition) =>
val newChildren = function.children.map(extractExpr)
val newFunction = function.withNewChildren(newChildren).asInstanceOf[AggregateFunction]
val newAgg = ae.copy(aggregateFunction = newFunction)
seenWindowAggregates += newAgg
WindowExpression(newAgg, spec)
case AggregateExpression(aggFunc, _, _, _, _) if hasWindowFunction(aggFunc.children) =>
failAnalysis("It is not allowed to use a window function inside an aggregate " +
"function. Please use the inner window function in a sub-query.")
// Extracts AggregateExpression. For example, for SUM(x) - Sum(y) OVER (...),
// we need to extract SUM(x).
case agg: AggregateExpression if !seenWindowAggregates.contains(agg) =>
val withName = Alias(agg, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
// Extracts other attributes
case attr: Attribute => extractExpr(attr)
}.asInstanceOf[NamedExpression]
}
(newExpressionsWithWindowFunctions, regularExpressions ++ extractedExprBuffer)
} // end of extract
/**
* Adds operators for Window Expressions. Every Window operator handles a single Window Spec.
*/
private def addWindow(
expressionsWithWindowFunctions: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// First, we need to extract all WindowExpressions from expressionsWithWindowFunctions
// and put those extracted WindowExpressions to extractedWindowExprBuffer.
// This step is needed because it is possible that an expression contains multiple
// WindowExpressions with different Window Specs.
// After extracting WindowExpressions, we need to construct a project list to generate
// expressionsWithWindowFunctions based on extractedWindowExprBuffer.
// For example, for "sum(a) over (...) / sum(b) over (...)", we will first extract
// "sum(a) over (...)" and "sum(b) over (...)" out, and assign "_we0" as the alias to
// "sum(a) over (...)" and "_we1" as the alias to "sum(b) over (...)".
// Then, the projectList will be [_we0/_we1].
val extractedWindowExprBuffer = new ArrayBuffer[NamedExpression]()
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
// We need to use transformDown because we want to trigger
// "case alias @ Alias(window: WindowExpression, _)" first.
_.transformDown {
case alias @ Alias(window: WindowExpression, _) =>
// If a WindowExpression has an assigned alias, just use it.
extractedWindowExprBuffer += alias
alias.toAttribute
case window: WindowExpression =>
// If there is no alias assigned to the WindowExpressions. We create an
// internal column.
val withName = Alias(window, s"_we${extractedWindowExprBuffer.length}")()
extractedWindowExprBuffer += withName
withName.toAttribute
}.asInstanceOf[NamedExpression]
}
// SPARK-32616: Use a linked hash map to maintains the insertion order of the Window
// operators, so the query with multiple Window operators can have the determined plan.
val groupedWindowExpressions = mutable.LinkedHashMap.empty[Spec, ArrayBuffer[NamedExpression]]
// Second, we group extractedWindowExprBuffer based on their Partition and Order Specs.
extractedWindowExprBuffer.foreach { expr =>
val distinctWindowSpec = expr.collect {
case window: WindowExpression => window.windowSpec
}.distinct
// We do a final check and see if we only have a single Window Spec defined in an
// expressions.
if (distinctWindowSpec.isEmpty) {
failAnalysis(s"$expr does not have any WindowExpression.")
} else if (distinctWindowSpec.length > 1) {
// newExpressionsWithWindowFunctions only have expressions with a single
// WindowExpression. If we reach here, we have a bug.
failAnalysis(s"$expr has multiple Window Specifications ($distinctWindowSpec)." +
s"Please file a bug report with this error message, stack trace, and the query.")
} else {
val spec = distinctWindowSpec.head
val specKey = (spec.partitionSpec, spec.orderSpec, WindowFunctionType.functionType(expr))
val windowExprs = groupedWindowExpressions
.getOrElseUpdate(specKey, new ArrayBuffer[NamedExpression])
windowExprs += expr
}
}
// Third, we aggregate them by adding each Window operator for each Window Spec and then
// setting this to the child of the next Window operator.
val windowOps =
groupedWindowExpressions.foldLeft(child) {
case (last, ((partitionSpec, orderSpec, _), windowExpressions)) =>
Window(windowExpressions.toSeq, partitionSpec, orderSpec, last)
}
// Finally, we create a Project to output windowOps's output
// newExpressionsWithWindowFunctions.
Project(windowOps.output ++ newExpressionsWithWindowFunctions, windowOps)
} // end of addWindow
// We have to use transformDown at here to make sure the rule of
// "Aggregate with Having clause" will be triggered.
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsDown {
case Filter(condition, _) if hasWindowFunction(condition) =>
failAnalysis("It is not allowed to use window functions inside WHERE clause")
case UnresolvedHaving(condition, _) if hasWindowFunction(condition) =>
failAnalysis("It is not allowed to use window functions inside HAVING clause")
// Aggregate with Having clause. This rule works with an unresolved Aggregate because
// a resolved Aggregate will not have Window Functions.
case f @ UnresolvedHaving(condition, a @ Aggregate(groupingExprs, aggregateExprs, child))
if child.resolved &&
hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add a Filter operator for conditions in the Having clause.
val withFilter = Filter(condition, withAggregate)
val withWindow = addWindow(windowExpressions, withFilter)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
case p: LogicalPlan if !p.childrenResolved => p
// Aggregate without Having clause.
case a @ Aggregate(groupingExprs, aggregateExprs, child)
if hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withAggregate)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
// We only extract Window Expressions after all expressions of the Project
// have been resolved.
case p @ Project(projectList, child)
if hasWindowFunction(projectList) && !p.expressions.exists(!_.resolved) =>
val (windowExpressions, regularExpressions) = extract(projectList)
// We add a project to get all needed expressions for window expressions from the child
// of the original Project operator.
val withProject = Project(regularExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withProject)
// Finally, generate output columns according to the original projectList.
val finalProjectList = projectList.map(_.toAttribute)
Project(finalProjectList, withWindow)
}
}
/**
* Pulls out nondeterministic expressions from LogicalPlan which is not Project or Filter,
* put them into an inner Project and finally project them away at the outer Project.
*/
object PullOutNondeterministic extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p: Project => p
case f: Filter => f
case a: Aggregate if a.groupingExpressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(a.groupingExpressions)
val newChild = Project(a.child.output ++ nondeterToAttr.values, a.child)
a.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}.copy(child = newChild)
// Don't touch collect metrics. Top-level metrics are not supported (check analysis will fail)
// and we want to retain them inside the aggregate functions.
case m: CollectMetrics => m
// todo: It's hard to write a general rule to pull out nondeterministic expressions
// from LogicalPlan, currently we only do it for UnaryNode which has same output
// schema with its child.
case p: UnaryNode if p.output == p.child.output && p.expressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(p.expressions)
val newPlan = p.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}
val newChild = Project(p.child.output ++ nondeterToAttr.values, p.child)
Project(p.output, newPlan.withNewChildren(newChild :: Nil))
}
private def getNondeterToAttr(exprs: Seq[Expression]): Map[Expression, NamedExpression] = {
exprs.filterNot(_.deterministic).flatMap { expr =>
val leafNondeterministic = expr.collect { case n: Nondeterministic => n }
leafNondeterministic.distinct.map { e =>
val ne = e match {
case n: NamedExpression => n
case _ => Alias(e, "_nondeterministic")()
}
e -> ne
}
}.toMap
}
}
/**
* Set the seed for random number generation.
*/
object ResolveRandomSeed extends Rule[LogicalPlan] {
private lazy val random = new Random()
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if p.resolved => p
case p => p transformExpressionsUp {
case Uuid(None) => Uuid(Some(random.nextLong()))
case Shuffle(child, None) => Shuffle(child, Some(random.nextLong()))
}
}
}
/**
* Correctly handle null primitive inputs for UDF by adding extra [[If]] expression to do the
* null check. When user defines a UDF with primitive parameters, there is no way to tell if the
* primitive parameter is null or not, so here we assume the primitive input is null-propagatable
* and we should return null if the input is null.
*/
object HandleNullInputsForUDF extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p => p transformExpressionsUp {
case udf: ScalaUDF if udf.inputPrimitives.contains(true) =>
// Otherwise, add special handling of null for fields that can't accept null.
// The result of operations like this, when passed null, is generally to return null.
assert(udf.inputPrimitives.length == udf.children.length)
val inputPrimitivesPair = udf.inputPrimitives.zip(udf.children)
val inputNullCheck = inputPrimitivesPair.collect {
case (isPrimitive, input) if isPrimitive && input.nullable =>
IsNull(input)
}.reduceLeftOption[Expression](Or)
if (inputNullCheck.isDefined) {
// Once we add an `If` check above the udf, it is safe to mark those checked inputs
// as null-safe (i.e., wrap with `KnownNotNull`), because the null-returning
// branch of `If` will be called if any of these checked inputs is null. Thus we can
// prevent this rule from being applied repeatedly.
val newInputs = inputPrimitivesPair.map {
case (isPrimitive, input) =>
if (isPrimitive && input.nullable) {
KnownNotNull(input)
} else {
input
}
}
val newUDF = udf.copy(children = newInputs)
If(inputNullCheck.get, Literal.create(null, udf.dataType), newUDF)
} else {
udf
}
}
}
}
/**
* Resolve the encoders for the UDF by explicitly given the attributes. We give the
* attributes explicitly in order to handle the case where the data type of the input
* value is not the same with the internal schema of the encoder, which could cause
* data loss. For example, the encoder should not cast the input value to Decimal(38, 18)
* if the actual data type is Decimal(30, 0).
*
* The resolved encoders then will be used to deserialize the internal row to Scala value.
*/
object ResolveEncodersInUDF extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p => p transformExpressionsUp {
case udf: ScalaUDF if udf.inputEncoders.nonEmpty =>
val boundEncoders = udf.inputEncoders.zipWithIndex.map { case (encOpt, i) =>
val dataType = udf.children(i).dataType
encOpt.map { enc =>
val attrs = if (enc.isSerializedAsStructForTopLevel) {
dataType.asInstanceOf[StructType].toAttributes
} else {
// the field name doesn't matter here, so we use
// a simple literal to avoid any overhead
new StructType().add("input", dataType).toAttributes
}
enc.resolveAndBind(attrs)
}
}
udf.copy(inputEncoders = boundEncoders)
}
}
}
/**
* Check and add proper window frames for all window functions.
*/
object ResolveWindowFrame extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: FrameLessOffsetWindowFunction,
WindowSpecDefinition(_, _, f: SpecifiedWindowFrame)) if wf.frame != f =>
failAnalysis(s"Cannot specify window frame for ${wf.prettyName} function")
case WindowExpression(wf: WindowFunction, WindowSpecDefinition(_, _, f: SpecifiedWindowFrame))
if wf.frame != UnspecifiedFrame && wf.frame != f =>
failAnalysis(s"Window Frame $f must match the required frame ${wf.frame}")
case WindowExpression(wf: WindowFunction, s @ WindowSpecDefinition(_, _, UnspecifiedFrame))
if wf.frame != UnspecifiedFrame =>
WindowExpression(wf, s.copy(frameSpecification = wf.frame))
case we @ WindowExpression(e, s @ WindowSpecDefinition(_, o, UnspecifiedFrame))
if e.resolved =>
val frame = if (o.nonEmpty) {
SpecifiedWindowFrame(RangeFrame, UnboundedPreceding, CurrentRow)
} else {
SpecifiedWindowFrame(RowFrame, UnboundedPreceding, UnboundedFollowing)
}
we.copy(windowSpec = s.copy(frameSpecification = frame))
}
}
/**
* Check and add order to [[AggregateWindowFunction]]s.
*/
object ResolveWindowOrder extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: WindowFunction, spec) if spec.orderSpec.isEmpty =>
failAnalysis(s"Window function $wf requires window to be ordered, please add ORDER BY " +
s"clause. For example SELECT $wf(value_expr) OVER (PARTITION BY window_partition " +
s"ORDER BY window_ordering) from table")
case WindowExpression(rank: RankLike, spec) if spec.resolved =>
val order = spec.orderSpec.map(_.child)
WindowExpression(rank.withOrder(order), spec)
}
}
/**
* Removes natural or using joins by calculating output columns based on output from two sides,
* Then apply a Project on a normal Join to eliminate natural or using join.
*/
object ResolveNaturalAndUsingJoin extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case j @ Join(left, right, UsingJoin(joinType, usingCols), _, hint)
if left.resolved && right.resolved && j.duplicateResolved =>
commonNaturalJoinProcessing(left, right, joinType, usingCols, None, hint)
case j @ Join(left, right, NaturalJoin(joinType), condition, hint)
if j.resolvedExceptNatural =>
// find common column names from both sides
val joinNames = left.output.map(_.name).intersect(right.output.map(_.name))
commonNaturalJoinProcessing(left, right, joinType, joinNames, condition, hint)
}
}
/**
* Resolves columns of an output table from the data in a logical plan. This rule will:
*
* - Reorder columns when the write is by name
* - Insert casts when data types do not match
* - Insert aliases when column names do not match
* - Detect plans that are not compatible with the output table and throw AnalysisException
*/
object ResolveOutputRelation extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case v2Write: V2WriteCommand
if v2Write.table.resolved && v2Write.query.resolved && !v2Write.outputResolved =>
validateStoreAssignmentPolicy()
val projection = TableOutputResolver.resolveOutputColumns(
v2Write.table.name, v2Write.table.output, v2Write.query, v2Write.isByName, conf)
if (projection != v2Write.query) {
v2Write.withNewQuery(projection)
} else {
v2Write
}
}
}
private def validateStoreAssignmentPolicy(): Unit = {
// SPARK-28730: LEGACY store assignment policy is disallowed in data source v2.
if (conf.storeAssignmentPolicy == StoreAssignmentPolicy.LEGACY) {
throw QueryCompilationErrors.legacyStoreAssignmentPolicyError()
}
}
private def commonNaturalJoinProcessing(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
joinNames: Seq[String],
condition: Option[Expression],
hint: JoinHint) = {
val leftKeys = joinNames.map { keyName =>
left.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw QueryCompilationErrors.unresolvedUsingColForJoinError(keyName, left, "left")
}
}
val rightKeys = joinNames.map { keyName =>
right.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw QueryCompilationErrors.unresolvedUsingColForJoinError(keyName, right, "right")
}
}
val joinPairs = leftKeys.zip(rightKeys)
val newCondition = (condition ++ joinPairs.map(EqualTo.tupled)).reduceOption(And)
// columns not in joinPairs
val lUniqueOutput = left.output.filterNot(att => leftKeys.contains(att))
val rUniqueOutput = right.output.filterNot(att => rightKeys.contains(att))
// the output list looks like: join keys, columns from left, columns from right
val projectList = joinType match {
case LeftOuter =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput.map(_.withNullability(true))
case LeftExistence(_) =>
leftKeys ++ lUniqueOutput
case RightOuter =>
rightKeys ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput
case FullOuter =>
// in full outer join, joinCols should be non-null if there is.
val joinedCols = joinPairs.map { case (l, r) => Alias(Coalesce(Seq(l, r)), l.name)() }
joinedCols ++
lUniqueOutput.map(_.withNullability(true)) ++
rUniqueOutput.map(_.withNullability(true))
case _ : InnerLike =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput
case _ =>
sys.error("Unsupported natural join type " + joinType)
}
// use Project to trim unnecessary fields
Project(projectList, Join(left, right, joinType, newCondition, hint))
}
/**
* Replaces [[UnresolvedDeserializer]] with the deserialization expression that has been resolved
* to the given input attributes.
*/
object ResolveDeserializer extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case UnresolvedDeserializer(deserializer, inputAttributes) =>
val inputs = if (inputAttributes.isEmpty) {
p.children.flatMap(_.output)
} else {
inputAttributes
}
validateTopLevelTupleFields(deserializer, inputs)
val resolved = resolveExpressionBottomUp(
deserializer, LocalRelation(inputs), throws = true)
val result = resolved transformDown {
case UnresolvedMapObjects(func, inputData, cls) if inputData.resolved =>
inputData.dataType match {
case ArrayType(et, cn) =>
MapObjects(func, inputData, et, cn, cls) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw QueryCompilationErrors.dataTypeMismatchForDeserializerError(other,
"array")
}
case u: UnresolvedCatalystToExternalMap if u.child.resolved =>
u.child.dataType match {
case _: MapType =>
CatalystToExternalMap(u) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw QueryCompilationErrors.dataTypeMismatchForDeserializerError(other, "map")
}
}
validateNestedTupleFields(result)
result
}
}
private def fail(schema: StructType, maxOrdinal: Int): Unit = {
throw QueryCompilationErrors.fieldNumberMismatchForDeserializerError(schema, maxOrdinal)
}
/**
* For each top-level Tuple field, we use [[GetColumnByOrdinal]] to get its corresponding column
* by position. However, the actual number of columns may be different from the number of Tuple
* fields. This method is used to check the number of columns and fields, and throw an
* exception if they do not match.
*/
private def validateTopLevelTupleFields(
deserializer: Expression, inputs: Seq[Attribute]): Unit = {
val ordinals = deserializer.collect {
case GetColumnByOrdinal(ordinal, _) => ordinal
}.distinct.sorted
if (ordinals.nonEmpty && ordinals != inputs.indices) {
fail(inputs.toStructType, ordinals.last)
}
}
/**
* For each nested Tuple field, we use [[GetStructField]] to get its corresponding struct field
* by position. However, the actual number of struct fields may be different from the number
* of nested Tuple fields. This method is used to check the number of struct fields and nested
* Tuple fields, and throw an exception if they do not match.
*/
private def validateNestedTupleFields(deserializer: Expression): Unit = {
val structChildToOrdinals = deserializer
// There are 2 kinds of `GetStructField`:
// 1. resolved from `UnresolvedExtractValue`, and it will have a `name` property.
// 2. created when we build deserializer expression for nested tuple, no `name` property.
// Here we want to validate the ordinals of nested tuple, so we should only catch
// `GetStructField` without the name property.
.collect { case g: GetStructField if g.name.isEmpty => g }
.groupBy(_.child)
.mapValues(_.map(_.ordinal).distinct.sorted)
structChildToOrdinals.foreach { case (expr, ordinals) =>
val schema = expr.dataType.asInstanceOf[StructType]
if (ordinals != schema.indices) {
fail(schema, ordinals.last)
}
}
}
}
/**
* Resolves [[NewInstance]] by finding and adding the outer scope to it if the object being
* constructed is an inner class.
*/
object ResolveNewInstance extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case n: NewInstance if n.childrenResolved && !n.resolved =>
val outer = OuterScopes.getOuterScope(n.cls)
if (outer == null) {
throw QueryCompilationErrors.outerScopeFailureForNewInstanceError(n.cls.getName)
}
n.copy(outerPointer = Some(outer))
}
}
}
/**
* Replace the [[UpCast]] expression by [[Cast]], and throw exceptions if the cast may truncate.
*/
object ResolveUpCast extends Rule[LogicalPlan] {
private def fail(from: Expression, to: DataType, walkedTypePath: Seq[String]) = {
val fromStr = from match {
case l: LambdaVariable => "array element"
case e => e.sql
}
throw QueryCompilationErrors.upCastFailureError(fromStr, from, to, walkedTypePath)
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case u @ UpCast(child, _, _) if !child.resolved => u
case UpCast(_, target, _) if target != DecimalType && !target.isInstanceOf[DataType] =>
throw QueryCompilationErrors.unsupportedAbstractDataTypeForUpCastError(target)
case UpCast(child, target, walkedTypePath) if target == DecimalType
&& child.dataType.isInstanceOf[DecimalType] =>
assert(walkedTypePath.nonEmpty,
"object DecimalType should only be used inside ExpressionEncoder")
// SPARK-31750: if we want to upcast to the general decimal type, and the `child` is
// already decimal type, we can remove the `Upcast` and accept any precision/scale.
// This can happen for cases like `spark.read.parquet("/tmp/file").as[BigDecimal]`.
child
case UpCast(child, target: AtomicType, _)
if SQLConf.get.getConf(SQLConf.LEGACY_LOOSE_UPCAST) &&
child.dataType == StringType =>
Cast(child, target.asNullable)
case u @ UpCast(child, _, walkedTypePath) if !Cast.canUpCast(child.dataType, u.dataType) =>
fail(child, u.dataType, walkedTypePath)
case u @ UpCast(child, _, _) => Cast(child, u.dataType.asNullable)
}
}
}
/** Rule to mostly resolve, normalize and rewrite column names based on case sensitivity. */
object ResolveAlterTableChanges extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case a @ AlterTable(_, _, t: NamedRelation, changes) if t.resolved =>
// 'colsToAdd' keeps track of new columns being added. It stores a mapping from a
// normalized parent name of fields to field names that belong to the parent.
// For example, if we add columns "a.b.c", "a.b.d", and "a.c", 'colsToAdd' will become
// Map(Seq("a", "b") -> Seq("c", "d"), Seq("a") -> Seq("c")).
val colsToAdd = mutable.Map.empty[Seq[String], Seq[String]]
val schema = t.schema
val normalizedChanges = changes.flatMap {
case add: AddColumn =>
def addColumn(
parentSchema: StructType,
parentName: String,
normalizedParentName: Seq[String]): TableChange = {
val fieldsAdded = colsToAdd.getOrElse(normalizedParentName, Nil)
val pos = findColumnPosition(add.position(), parentName, parentSchema, fieldsAdded)
val field = add.fieldNames().last
colsToAdd(normalizedParentName) = fieldsAdded :+ field
TableChange.addColumn(
(normalizedParentName :+ field).toArray,
add.dataType(),
add.isNullable,
add.comment,
pos)
}
val parent = add.fieldNames().init
if (parent.nonEmpty) {
// Adding a nested field, need to normalize the parent column and position
val target = schema.findNestedField(parent, includeCollections = true, conf.resolver)
if (target.isEmpty) {
// Leave unresolved. Throws error in CheckAnalysis
Some(add)
} else {
val (normalizedName, sf) = target.get
sf.dataType match {
case struct: StructType =>
Some(addColumn(struct, parent.quoted, normalizedName :+ sf.name))
case other =>
Some(add)
}
}
} else {
// Adding to the root. Just need to normalize position
Some(addColumn(schema, "root", Nil))
}
case typeChange: UpdateColumnType =>
// Hive style syntax provides the column type, even if it may not have changed
val fieldOpt = schema.findNestedField(
typeChange.fieldNames(), includeCollections = true, conf.resolver)
if (fieldOpt.isEmpty) {
// We couldn't resolve the field. Leave it to CheckAnalysis
Some(typeChange)
} else {
val (fieldNames, field) = fieldOpt.get
if (field.dataType == typeChange.newDataType()) {
// The user didn't want the field to change, so remove this change
None
} else {
Some(TableChange.updateColumnType(
(fieldNames :+ field.name).toArray, typeChange.newDataType()))
}
}
case n: UpdateColumnNullability =>
// Need to resolve column
resolveFieldNames(
schema,
n.fieldNames(),
TableChange.updateColumnNullability(_, n.nullable())).orElse(Some(n))
case position: UpdateColumnPosition =>
position.position() match {
case after: After =>
// Need to resolve column as well as position reference
val fieldOpt = schema.findNestedField(
position.fieldNames(), includeCollections = true, conf.resolver)
if (fieldOpt.isEmpty) {
Some(position)
} else {
val (normalizedPath, field) = fieldOpt.get
val targetCol = schema.findNestedField(
normalizedPath :+ after.column(), includeCollections = true, conf.resolver)
if (targetCol.isEmpty) {
// Leave unchanged to CheckAnalysis
Some(position)
} else {
Some(TableChange.updateColumnPosition(
(normalizedPath :+ field.name).toArray,
ColumnPosition.after(targetCol.get._2.name)))
}
}
case _ =>
// Need to resolve column
resolveFieldNames(
schema,
position.fieldNames(),
TableChange.updateColumnPosition(_, position.position())).orElse(Some(position))
}
case comment: UpdateColumnComment =>
resolveFieldNames(
schema,
comment.fieldNames(),
TableChange.updateColumnComment(_, comment.newComment())).orElse(Some(comment))
case rename: RenameColumn =>
resolveFieldNames(
schema,
rename.fieldNames(),
TableChange.renameColumn(_, rename.newName())).orElse(Some(rename))
case delete: DeleteColumn =>
resolveFieldNames(schema, delete.fieldNames(), TableChange.deleteColumn)
.orElse(Some(delete))
case column: ColumnChange =>
// This is informational for future developers
throw new UnsupportedOperationException(
"Please add an implementation for a column change here")
case other => Some(other)
}
a.copy(changes = normalizedChanges)
}
/**
* Returns the table change if the field can be resolved, returns None if the column is not
* found. An error will be thrown in CheckAnalysis for columns that can't be resolved.
*/
private def resolveFieldNames(
schema: StructType,
fieldNames: Array[String],
copy: Array[String] => TableChange): Option[TableChange] = {
val fieldOpt = schema.findNestedField(
fieldNames, includeCollections = true, conf.resolver)
fieldOpt.map { case (path, field) => copy((path :+ field.name).toArray) }
}
private def findColumnPosition(
position: ColumnPosition,
parentName: String,
struct: StructType,
fieldsAdded: Seq[String]): ColumnPosition = {
position match {
case null => null
case after: After =>
(struct.fieldNames ++ fieldsAdded).find(n => conf.resolver(n, after.column())) match {
case Some(colName) =>
ColumnPosition.after(colName)
case None =>
throw QueryCompilationErrors.referenceColNotFoundForAlterTableChangesError(after,
parentName)
}
case other => other
}
}
}
}
/**
* Removes [[SubqueryAlias]] operators from the plan. Subqueries are only required to provide
* scoping information for attributes and can be removed once analysis is complete.
*/
object EliminateSubqueryAliases extends Rule[LogicalPlan] {
// This is also called in the beginning of the optimization phase, and as a result
// is using transformUp rather than resolveOperators.
def apply(plan: LogicalPlan): LogicalPlan = AnalysisHelper.allowInvokingTransformsInAnalyzer {
plan transformUp {
case SubqueryAlias(_, child) => child
}
}
}
/**
* Removes [[Union]] operators from the plan if it just has one child.
*/
object EliminateUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case u: Union if u.children.size == 1 => u.children.head
}
}
/**
* Cleans up unnecessary Aliases inside the plan. Basically we only need Alias as a top level
* expression in Project(project list) or Aggregate(aggregate expressions) or
* Window(window expressions). Notice that if an expression has other expression parameters which
* are not in its `children`, e.g. `RuntimeReplaceable`, the transformation for Aliases in this
* rule can't work for those parameters.
*/
object CleanupAliases extends Rule[LogicalPlan] with AliasHelper {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, child) =>
val cleanedProjectList = projectList.map(trimNonTopLevelAliases)
Project(cleanedProjectList, child)
case Aggregate(grouping, aggs, child) =>
val cleanedAggs = aggs.map(trimNonTopLevelAliases)
Aggregate(grouping.map(trimAliases), cleanedAggs, child)
case Window(windowExprs, partitionSpec, orderSpec, child) =>
val cleanedWindowExprs = windowExprs.map(trimNonTopLevelAliases)
Window(cleanedWindowExprs, partitionSpec.map(trimAliases),
orderSpec.map(trimAliases(_).asInstanceOf[SortOrder]), child)
case CollectMetrics(name, metrics, child) =>
val cleanedMetrics = metrics.map(trimNonTopLevelAliases)
CollectMetrics(name, cleanedMetrics, child)
// Operators that operate on objects should only have expressions from encoders, which should
// never have extra aliases.
case o: ObjectConsumer => o
case o: ObjectProducer => o
case a: AppendColumns => a
case other =>
other transformExpressionsDown {
case Alias(child, _) => child
}
}
}
/**
* Ignore event time watermark in batch query, which is only supported in Structured Streaming.
* TODO: add this rule into analyzer rule list.
*/
object EliminateEventTimeWatermark extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case EventTimeWatermark(_, _, child) if !child.isStreaming => child
}
}
/**
* Maps a time column to multiple time windows using the Expand operator. Since it's non-trivial to
* figure out how many windows a time column can map to, we over-estimate the number of windows and
* filter out the rows where the time column is not inside the time window.
*/
object TimeWindowing extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalyst.dsl.expressions._
private final val WINDOW_COL_NAME = "window"
private final val WINDOW_START = "start"
private final val WINDOW_END = "end"
/**
* Generates the logical plan for generating window ranges on a timestamp column. Without
* knowing what the timestamp value is, it's non-trivial to figure out deterministically how many
* window ranges a timestamp will map to given all possible combinations of a window duration,
* slide duration and start time (offset). Therefore, we express and over-estimate the number of
* windows there may be, and filter the valid windows. We use last Project operator to group
* the window columns into a struct so they can be accessed as `window.start` and `window.end`.
*
* The windows are calculated as below:
* maxNumOverlapping <- ceil(windowDuration / slideDuration)
* for (i <- 0 until maxNumOverlapping)
* windowId <- ceil((timestamp - startTime) / slideDuration)
* windowStart <- windowId * slideDuration + (i - maxNumOverlapping) * slideDuration + startTime
* windowEnd <- windowStart + windowDuration
* return windowStart, windowEnd
*
* This behaves as follows for the given parameters for the time: 12:05. The valid windows are
* marked with a +, and invalid ones are marked with a x. The invalid ones are filtered using the
* Filter operator.
* window: 12m, slide: 5m, start: 0m :: window: 12m, slide: 5m, start: 2m
* 11:55 - 12:07 + 11:52 - 12:04 x
* 12:00 - 12:12 + 11:57 - 12:09 +
* 12:05 - 12:17 + 12:02 - 12:14 +
*
* @param plan The logical plan
* @return the logical plan that will generate the time windows using the Expand operator, with
* the Filter operator for correctness and Project for usability.
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if p.children.size == 1 =>
val child = p.children.head
val windowExpressions =
p.expressions.flatMap(_.collect { case t: TimeWindow => t }).toSet
val numWindowExpr = windowExpressions.size
// Only support a single window expression for now
if (numWindowExpr == 1 &&
windowExpressions.head.timeColumn.resolved &&
windowExpressions.head.checkInputDataTypes().isSuccess) {
val window = windowExpressions.head
val metadata = window.timeColumn match {
case a: Attribute => a.metadata
case _ => Metadata.empty
}
def getWindow(i: Int, overlappingWindows: Int): Expression = {
val division = (PreciseTimestampConversion(
window.timeColumn, TimestampType, LongType) - window.startTime) / window.slideDuration
val ceil = Ceil(division)
// if the division is equal to the ceiling, our record is the start of a window
val windowId = CaseWhen(Seq((ceil === division, ceil + 1)), Some(ceil))
val windowStart = (windowId + i - overlappingWindows) *
window.slideDuration + window.startTime
val windowEnd = windowStart + window.windowDuration
CreateNamedStruct(
Literal(WINDOW_START) ::
PreciseTimestampConversion(windowStart, LongType, TimestampType) ::
Literal(WINDOW_END) ::
PreciseTimestampConversion(windowEnd, LongType, TimestampType) ::
Nil)
}
val windowAttr = AttributeReference(
WINDOW_COL_NAME, window.dataType, metadata = metadata)()
if (window.windowDuration == window.slideDuration) {
val windowStruct = Alias(getWindow(0, 1), WINDOW_COL_NAME)(
exprId = windowAttr.exprId, explicitMetadata = Some(metadata))
val replacedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
// For backwards compatibility we add a filter to filter out nulls
val filterExpr = IsNotNull(window.timeColumn)
replacedPlan.withNewChildren(
Filter(filterExpr,
Project(windowStruct +: child.output, child)) :: Nil)
} else {
val overlappingWindows =
math.ceil(window.windowDuration * 1.0 / window.slideDuration).toInt
val windows =
Seq.tabulate(overlappingWindows)(i => getWindow(i, overlappingWindows))
val projections = windows.map(_ +: child.output)
val filterExpr =
window.timeColumn >= windowAttr.getField(WINDOW_START) &&
window.timeColumn < windowAttr.getField(WINDOW_END)
val substitutedPlan = Filter(filterExpr,
Expand(projections, windowAttr +: child.output, child))
val renamedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
renamedPlan.withNewChildren(substitutedPlan :: Nil)
}
} else if (numWindowExpr > 1) {
p.failAnalysis("Multiple time window expressions would result in a cartesian product " +
"of rows, therefore they are currently not supported.")
} else {
p // Return unchanged. Analyzer will throw exception later
}
}
}
/**
* Resolve a [[CreateNamedStruct]] if it contains [[NamePlaceholder]]s.
*/
object ResolveCreateNamedStruct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveExpressions {
case e: CreateNamedStruct if !e.resolved =>
val children = e.children.grouped(2).flatMap {
case Seq(NamePlaceholder, e: NamedExpression) if e.resolved =>
Seq(Literal(e.name), e)
case kv =>
kv
}
CreateNamedStruct(children.toList)
}
}
/**
* The aggregate expressions from subquery referencing outer query block are pushed
* down to the outer query block for evaluation. This rule below updates such outer references
* as AttributeReference referring attributes from the parent/outer query block.
*
* For example (SQL):
* {{{
* SELECT l.a FROM l GROUP BY 1 HAVING EXISTS (SELECT 1 FROM r WHERE r.d < min(l.b))
* }}}
* Plan before the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < min(outer(b#227))) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
* Plan after the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < outer(min(b#227)#249)) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
*/
object UpdateOuterReferences extends Rule[LogicalPlan] {
private def stripAlias(expr: Expression): Expression = expr match { case a: Alias => a.child }
private def updateOuterReferenceInSubquery(
plan: LogicalPlan,
refExprs: Seq[Expression]): LogicalPlan = {
plan resolveExpressions { case e =>
val outerAlias =
refExprs.find(stripAlias(_).semanticEquals(stripOuterReference(e)))
outerAlias match {
case Some(a: Alias) => OuterReference(a.toAttribute)
case _ => e
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = {
plan resolveOperators {
case f @ Filter(_, a: Aggregate) if f.resolved =>
f transformExpressions {
case s: SubqueryExpression if s.children.nonEmpty =>
// Collect the aliases from output of aggregate.
val outerAliases = a.aggregateExpressions collect { case a: Alias => a }
// Update the subquery plan to record the OuterReference to point to outer query plan.
s.withNewPlan(updateOuterReferenceInSubquery(s.plan, outerAliases))
}
}
}
}
|
shuangshuangwang/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
|
Scala
|
apache-2.0
| 170,405 |
package org.brandonhaynes.schimmy
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.Writable
import org.apache.hadoop.mapreduce
import org.apache.hadoop.mapreduce.InputFormat
import org.brandonhaynes.support.ComposablePartitionedIterativeJob
/** Base class for a Schimmy-enabled map/reduce job (Lin & Dyer, 2010); here this job is assumed to be iterative and
* composable (i.e., it has the same input and output key and value types) -- this is not strictly necessary for
* Schimmy (intermediate types need only be a superset of input types), but is generally the case.
*
* The Schimmy pattern increases the performance of some map/reduce jobs by reducing the cost of the intermediate
* shuffle. It does so by performing a merge-join between the intermediate result and the original input to the
* step; accordingly, static and (otherwise unmodified) pairs need not be emitted into the shuffle. Since the
* cost of shuffling exceeds that of the merge join, a performance gain is realized.
*
* This class automatically performs the Schimmy merge-join and presents the aggregate groups to each reducer
* invocation. Input pairs are yielded prior to intermediate pairs in the value iterator.
*
* Note that in order to properly perform the merge-join, it is necessary to fix the number of partitions; similarly,
* the job input should be pre-partitioned by the same partition function.
*
* @param configurationTemplate Template configuration from which to draw metadata
* @param name Name of the job
* @param partitions Number of partitions in the job
* @param step Step number of this job in the iterative process
* @param steps Total number of steps in the iterative job
* @tparam Key Key used for the iterative job
* @tparam Value Value used for the iterative job
* @tparam Mapper Class containing the mapping function for this step of the iterative job
* @tparam Reducer Class containing the reduction function for this step of the iterative job
* @tparam Partitioner Partitioner for the iterative job
* @tparam Format Input format for the iterative job
*/
class SchimmyJob[Key <: Writable : Manifest,
Value <: Writable : Manifest,
Mapper <: mapreduce.Mapper[Key, Value, Key, Value] : Manifest,
Reducer <: mapreduce.Reducer[Key, Value, Key, Value] : Manifest,
Partitioner <: mapreduce.Partitioner[Key, Value] : Manifest,
Format <: InputFormat[Key, Value] : Manifest](
configurationTemplate:Configuration, name:String, partitions:Int, step:Int, steps:Int)
extends ComposablePartitionedIterativeJob[Key, Value, Mapper, Reducer, Partitioner, Format](
configurationTemplate, name, partitions, step, steps) { }
|
BrandonHaynes/timr
|
src/org/brandonhaynes/schimmy/SchimmyJob.scala
|
Scala
|
mit
| 2,793 |
package example
import domala._
import example.util.prettyPrint
object ExampleApp extends App {
implicit val config: jdbc.Config = ExampleConfig
val dao: PersonDao = PersonDao.impl
def myPrint(title: String, x: Any): Unit = println(s"[$title]: ${prettyPrint(x)}")
Required {
dao.create()
myPrint("initial data", dao.selectAll())
// new person insert
val newPerson = Person(
ID.notAssigned,
Name("name1"),
Some(10),
Address("city1", "street1"),
Some(ID(1)),
-1
)
myPrint("insert entity", newPerson)
val inserted = dao.insert(newPerson)
myPrint("inserted entity", inserted)
myPrint("data after insert", dao.selectAll())
// Person(id = 2) update to (age + 1)
dao.selectById(2).foreach(entity =>
dao.update(entity.copy(age = entity.age.map(_ + 1)))
)
// Person(id = 1) delete
dao.selectById(1).foreach(entity =>
dao.delete(entity)
)
myPrint("data after modify", dao.selectAll())
myPrint("like select result", dao.selectByName("n"))
myPrint("join select result", dao.selectWithDepartmentById(2))
myPrint("sql interpolation", select"select * from person where id = 2".getMapList)
myPrint("sql interpolation", select"select /*%expand*/* from person where id = 2".getList[Person])
}
}
|
bakenezumi/domala
|
example/src/main/scala/example/ExampleApp.scala
|
Scala
|
apache-2.0
| 1,327 |
import sbt.Keys.{name, _}
object Settings {
lazy val commonSettings = Seq(
organization := "org.gscopelliti",
scalaVersion := "2.11.9"
)
lazy val apiGatewaySettings = Seq(
name := "Api Gateway",
version := "1.0",
javacOptions ++= Seq("-source", "1.8", "-target", "1.8"), //, "-Xmx2G"),
scalacOptions ++= Seq("-deprecation", "-unchecked")
)
lazy val noteServiceSettings = Seq(
name := "Note Service",
version := "1.0",
javacOptions ++= Seq("-source", "1.8", "-target", "1.8"), //, "-Xmx2G"),
scalacOptions ++= Seq("-deprecation", "-unchecked")
)
}
|
PScopelliti/ProjectTracker
|
project/Settings.scala
|
Scala
|
apache-2.0
| 603 |
package com.github.gigurra.glasciia
import com.badlogic.gdx.files.FileHandle
import com.badlogic.gdx.graphics.g2d.TextureRegion
import com.badlogic.gdx.graphics.{Pixmap, Texture}
object StaticImage {
def fromFile(fileHandle: FileHandle,
useMipMaps: Boolean = true,
minFilter: Texture.TextureFilter = Texture.TextureFilter.MipMapLinearLinear,
magFilter: Texture.TextureFilter = Texture.TextureFilter.Linear): TextureRegion = {
val texture = new Texture(fileHandle, useMipMaps)
texture.setFilter(minFilter, magFilter)
new TextureRegion(texture)
}
def fromPixMap(pixMap: Pixmap,
useMipMaps: Boolean = true,
minFilter: Texture.TextureFilter = Texture.TextureFilter.MipMapLinearLinear,
magFilter: Texture.TextureFilter = Texture.TextureFilter.Linear): TextureRegion = {
val texture = new Texture(pixMap, useMipMaps)
texture.setFilter(minFilter, magFilter)
new TextureRegion(texture)
}
}
|
GiGurra/glasciia
|
glasciia-core/src/main/scala/com/github/gigurra/glasciia/StaticImage.scala
|
Scala
|
mit
| 1,014 |
package com.prediction
import akka.actor.Actor
class UpdateTrieActor extends Actor {
override def receive: Receive = {
case sentenceTokens: SentenceTokens =>
val tokens = sentenceTokens.tokens
val remainingTokens = tokens.tail
tokens.headOption
.foreach(primaryKey => {
val currentTrie = TrieDataStore.getTrieFromDataStore(primaryKey)
currentTrie.addSubsentenceTokens(remainingTokens)
TrieDataStore.addTrieToDataStore(primaryKey, currentTrie)
println(s"LOG: Added ${tokens.mkString(" ")} to the trie")
sender ! currentTrie
})
}
}
|
vineetfrozeninferno/ActorBasedWordPrediction
|
app/com/prediction/UpdateTrieActor.scala
|
Scala
|
gpl-3.0
| 627 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.hibench.sparkbench.ml
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.classification.{SVMModel, SVMWithSGD}
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.regression.LabeledPoint
import scopt.OptionParser
object SVMWithSGDExample {
case class Params(
numIterations: Int = 100,
stepSize: Double = 1.0,
regParam: Double = 0.01,
dataPath: String = null
)
def main(args: Array[String]): Unit = {
val defaultParams = Params()
val parser = new OptionParser[Params]("SVM") {
head("SVM: an example of SVM for classification.")
opt[Int]("numIterations")
.text(s"numIterations, default: ${defaultParams.numIterations}")
.action((x,c) => c.copy(numIterations = x))
opt[Double]("stepSize")
.text(s"stepSize, default: ${defaultParams.stepSize}")
.action((x,c) => c.copy(stepSize = x))
opt[Double]("regParam")
.text(s"regParam, default: ${defaultParams.regParam}")
.action((x,c) => c.copy(regParam = x))
arg[String]("<dataPath>")
.required()
.text("data path of SVM")
.action((x, c) => c.copy(dataPath = x))
}
parser.parse(args, defaultParams) match {
case Some(params) => run(params)
case _ => sys.exit(1)
}
}
def run(params: Params): Unit = {
val conf = new SparkConf().setAppName(s"SVM with $params")
val sc = new SparkContext(conf)
val dataPath = params.dataPath
val numIterations = params.numIterations
val stepSize = params.stepSize
val regParam = params.regParam
val data: RDD[LabeledPoint] = sc.objectFile(dataPath)
// Split data into training (60%) and test (40%).
val splits = data.randomSplit(Array(0.6, 0.4), seed = 11L)
val training = splits(0).cache()
val test = splits(1)
// Run training algorithm to build the model
val model = SVMWithSGD.train(training, numIterations, stepSize, regParam)
// Clear the default threshold.
model.clearThreshold()
// Compute raw scores on the test set.
val scoreAndLabels = test.map { point =>
val score = model.predict(point.features)
(score, point.label)
}
// Get evaluation metrics.
val metrics = new BinaryClassificationMetrics(scoreAndLabels)
val auROC = metrics.areaUnderROC()
println("Area under ROC = " + auROC)
sc.stop()
}
}
|
maismail/HiBench
|
sparkbench/ml/src/main/scala/com/intel/sparkbench/ml/SVMWithSGDExample.scala
|
Scala
|
apache-2.0
| 3,293 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.integrationtest.jobs
import java.nio.file.Paths
import com.google.common.base.Charsets
import com.google.common.io.Files
import org.apache.spark.SparkException
import org.apache.spark.sql.SparkSession
private[spark] object FileExistenceTest {
def main(args: Array[String]): Unit = {
if (args.length < 2) {
throw new IllegalArgumentException(
s"Invalid args: ${args.mkString}, " +
"Usage: FileExistenceTest <source-file> <expected contents>")
}
// Can't use SparkContext.textFile since the file is local to the driver
val file = Paths.get(args(0)).toFile
if (!file.exists()) {
throw new SparkException(s"Failed to find file at ${file.getAbsolutePath}")
} else {
// scalastyle:off println
val contents = Files.toString(file, Charsets.UTF_8)
if (args(1) != contents) {
throw new SparkException(s"Contents do not match. Expected: ${args(1)}," +
s" actual: $contents")
} else {
println(s"File found at ${file.getAbsolutePath} with correct contents.")
}
// scalastyle:on println
}
while (true) {
Thread.sleep(600000)
}
}
}
|
kimoonkim/spark
|
resource-managers/kubernetes/integration-tests-spark-jobs/src/main/scala/org/apache/spark/deploy/kubernetes/integrationtest/jobs/FileExistenceTest.scala
|
Scala
|
apache-2.0
| 2,009 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.impl.oflow
import kumoi.core.Config
/**
*
* @author Akiyoshi Sugiki
*/
object OFlowCommon {
val ofcHost = Config("impl.oflow.node", "localhost")
val ofcPort = Config("impl.oflow.port", 10030)
val ofcName = Config("impl.oflow.name", "ofc")
val matchImpl = Config("impl.oflow.match", "kumoi.impl.oflow.floodlight.FloodlightMatch")
}
|
axi-sugiki/kumoi
|
src/kumoi/impl/oflow/OFlowCommon.scala
|
Scala
|
apache-2.0
| 978 |
package models
import play.api.db.slick.Config.driver.simple._
case class EGAAccession(id: Option[Int], resourceType: String, accession: String, refname: String, releaseName: String, created: java.sql.Timestamp)
class EGAAccessionTable(tag: Tag) extends Table[EGAAccession](tag, "ega_accession") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def resourceType = column[String]("resource_type", O.NotNull)
def accession = column[String]("accession", O.NotNull)
def refname = column[String]("refname", O.NotNull)
def releaseName = column[String]("release_name", O.NotNull)
def created = column[java.sql.Timestamp]("created_tmstp", O.NotNull)
def * = (id.?, resourceType, accession, refname, releaseName, created) <> (EGAAccession.tupled, EGAAccession.unapply)
}
|
seqprodbio/restoule
|
app/models/EGAAccession.scala
|
Scala
|
gpl-3.0
| 791 |
/**
* Copyright 2015 Peter Nerg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package javascalautils.converters
/**
* Contains the converters from Java -> Scala, both implicit as well as explicit.
* @author Peter Nerg
*/
package object j2s {}
|
pnerg/java-scala-util-converter
|
src/main/scala/javascalautils/converters/j2s/package.scala
|
Scala
|
apache-2.0
| 775 |
package org.scalaide.util.internal
/** Utility to unify how we convert settings to preference names */
object SettingConverterUtil {
val USE_PROJECT_SETTINGS_PREFERENCE="scala.compiler.useProjectSettings"
val SCALA_DESIRED_INSTALLATION="scala.compiler.installation"
/** Warning:
* This should no longer be user-accessible. Do not use in UI.
* It's a hail Mary preference deduced from the above, to be saved
* and used in case aforementioned above becomes no longer resolvable.
*/
val SCALA_DESIRED_SOURCELEVEL="scala.compiler.sourceLevel"
/** Creates preference name from "name" of a compiler setting. */
def convertNameToProperty(name : String) = {
//Returns the underlying name without the -
name.substring(1)
}
}
|
Kwestor/scala-ide
|
org.scala-ide.sdt.core/src/org/scalaide/util/internal/SettingConverterUtil.scala
|
Scala
|
bsd-3-clause
| 760 |
package doobie.enum
import doobie.util.invariant._
import java.sql.Connection._
import scalaz.Equal
import scalaz.std.anyVal.intInstance
object transactionisolation {
/** @group Implementation */
sealed abstract class TransactionIsolation(val toInt: Int)
/** @group Values */ case object TransactionNone extends TransactionIsolation(TRANSACTION_NONE)
/** @group Values */ case object TransactionReadUncommitted extends TransactionIsolation(TRANSACTION_READ_UNCOMMITTED)
/** @group Values */ case object TransactionReadCommitted extends TransactionIsolation(TRANSACTION_READ_COMMITTED)
/** @group Values */ case object TransactionRepeatableRead extends TransactionIsolation(TRANSACTION_REPEATABLE_READ)
/** @group Values */ case object TransactionSerializable extends TransactionIsolation(TRANSACTION_SERIALIZABLE)
/** @group Implementation */
object TransactionIsolation {
def fromInt(n: Int): Option[TransactionIsolation] =
Some(n) collect {
case TransactionNone.toInt => TransactionNone
case TransactionReadUncommitted.toInt => TransactionReadUncommitted
case TransactionReadCommitted.toInt => TransactionReadCommitted
case TransactionRepeatableRead.toInt => TransactionRepeatableRead
case TransactionSerializable.toInt => TransactionSerializable
}
def unsafeFromInt(n: Int): TransactionIsolation =
fromInt(n).getOrElse(throw InvalidOrdinal[TransactionIsolation](n))
implicit val EqualTransactionIsolation: Equal[TransactionIsolation] =
Equal.equalBy(_.toInt)
}
}
|
jamescway/doobie
|
core/src/main/scala/doobie/enum/transactionisolation.scala
|
Scala
|
mit
| 1,609 |
package io.surfkit.data
import play.api.libs.json.Json
object Data {
case class Anchor(text: String, href: String, title: String)
implicit val anchorWrites = Json.writes[Anchor]
implicit val anchorReads = Json.reads[Anchor]
case class TopicAnchor( topic: String, url: String, links: List[Anchor])
implicit val tanchorWrites = Json.writes[TopicAnchor]
implicit val tanchorReads = Json.reads[TopicAnchor]
case class EntityMeta(
uri:String,
rss: Set[String],
timestamp: String,
version:String,
icon: String,
thumb: String,
domain:String,
publishDate:Option[String],
contentType:String,
title:String,
description:String,
authors:Set[String],
keywords:Set[String],
coverUrl:String,
imgs:Set[String],
meta:Map[String, String],
content:Option[String],
raw:Option[String]
)
implicit val metaWrites = Json.writes[EntityMeta]
implicit val metaReads = Json.reads[EntityMeta]
case class TopicEtl(topic: String, url: String, sites:List[EntityMeta])
implicit val tmetaWrites = Json.writes[TopicEtl]
implicit val tmetaReads = Json.reads[TopicEtl]
case class Rss(site: String, rss:Set[String])
implicit val rssWrites = Json.writes[Rss]
implicit val rssReads = Json.reads[Rss]
case class TopicRss(topic: String, url: String, sites:List[Rss])
implicit val trssWrites = Json.writes[TopicRss]
implicit val trssReads = Json.reads[TopicRss]
}
|
coreyauger/scala-alltop-to-rss
|
src/main/scala/io/surfkit/data/Data.scala
|
Scala
|
mit
| 1,839 |
package dotty.tools.scaladoc
import dotty.tools.scaladoc.tasty.ScaladocTastyInspector
import collection.JavaConverters._
import transformers._
case class Module(rootPackage: Member, members: Map[DRI, Member])
object ScalaModuleProvider:
def mkModule()(using ctx: DocContext): Module =
val (result, rootDoc) = ScaladocTastyInspector().result()
val (rootPck, rest) = result.partition(_.name == "API")
val packageMembers = (rest ++ rootPck.flatMap(_.members))
.filter(p => p.members.nonEmpty || p.docs.nonEmpty).sortBy(_.name)
def flattenMember(m: Member): Seq[(DRI, Member)] = (m.dri -> m) +: m.members.flatMap(flattenMember)
val topLevelPackage =
Member("API", site.apiPageDRI, Kind.RootPackage, members = packageMembers, docs = rootDoc)
val original = Module(topLevelPackage, flattenMember(topLevelPackage).toMap)
val transformers = List(
ImplicitMembersExtensionTransformer(),
InheritanceInformationTransformer(),
SealedMarksGraphTransformer()
)
transformers.foldLeft(original)((module, transformer) => transformer.apply(module))
|
lampepfl/dotty
|
scaladoc/src/dotty/tools/scaladoc/ScalaModuleProvider.scala
|
Scala
|
apache-2.0
| 1,106 |
package vaadin.scala
import vaadin.scala.mixins.AbstractSelectMixin
import vaadin.scala.mixins.AbstractFieldMixin
import vaadin.scala.mixins.ContainerMixin
import vaadin.scala.mixins.ContainerViewerMixin
import vaadin.scala.internal.WrapperUtil
import vaadin.scala.mixins.NewItemHandlerMixin
package mixins {
trait AbstractSelectMixin extends AbstractFieldMixin with ContainerMixin with ContainerViewerMixin
trait NewItemHandlerMixin extends TypedScaladinMixin[NewItemHandler]
}
object AbstractSelect {
object ItemCaptionMode extends Enumeration {
import com.vaadin.ui.AbstractSelect._
val Id = Value(ITEM_CAPTION_MODE_ID)
val Item = Value(ITEM_CAPTION_MODE_ITEM)
val Index = Value(ITEM_CAPTION_MODE_INDEX)
val ExplicitDefaultsId = Value(ITEM_CAPTION_MODE_EXPLICIT_DEFAULTS_ID)
val Explicit = Value(ITEM_CAPTION_MODE_EXPLICIT)
val IconOnly = Value(ITEM_CAPTION_MODE_ICON_ONLY)
val Property = Value(ITEM_CAPTION_MODE_PROPERTY)
}
}
abstract class AbstractSelect(override val p: com.vaadin.ui.AbstractSelect with AbstractSelectMixin)
extends AbstractField(p) with Container with Container.Viewer {
// TODO: Move newItemHandler and newItemsAllowed to a trait because not all subclasses of AbstractSelect support new items.
//initial setup of the default newItemHandler
newItemHandler = new DefaultNewItemHandler(this)
def newItemHandler: Option[NewItemHandler] = WrapperUtil.wrapperFor[NewItemHandler](p.getNewItemHandler)
def newItemHandler_=(newItemHandler: NewItemHandler): Unit = p.setNewItemHandler(newItemHandler.p)
def newItemHandler_=(newItemHandler: Option[NewItemHandler]): Unit = if (newItemHandler.isDefined) p.setNewItemHandler(newItemHandler.get.p) else p.setNewItemHandler(null)
def newItemsAllowed: Boolean = p.isNewItemsAllowed
def newItemsAllowed_=(newItemsAllowed: Boolean): Unit = p.setNewItemsAllowed(newItemsAllowed)
def itemCaptionMode = AbstractSelect.ItemCaptionMode(p.getItemCaptionMode)
def itemCaptionMode_=(itemCaptionMode: AbstractSelect.ItemCaptionMode.Value) = p.setItemCaptionMode(itemCaptionMode.id)
def itemCaptionPropertyId: Option[Any] = Option(p.getItemCaptionPropertyId)
def itemCaptionPropertyId_=(itemCaptionPropertyId: Option[Any]) = p.setItemCaptionPropertyId(itemCaptionPropertyId.orNull)
def itemCaptionPropertyId_=(itemCaptionPropertyId: Any) = p.setItemCaptionPropertyId(itemCaptionPropertyId)
def itemIconPropertyId: Option[Any] = Option(p.getItemIconPropertyId)
def itemIconPropertyId_=(itemIconPropertyId: Option[Any]) = p.setItemIconPropertyId(itemIconPropertyId.orNull)
def itemIconPropertyId_=(itemIconPropertyId: Any) = p.setItemIconPropertyId(itemIconPropertyId)
def nullSelectionAllowed = p.isNullSelectionAllowed
def nullSelectionAllowed_=(nullSelectionAllowed: Boolean) = p.setNullSelectionAllowed(nullSelectionAllowed)
def nullSelectionItemId: Option[Any] = Option(p.getNullSelectionItemId)
def nullSelectionItemId_=(nullSelectionItemId: Option[Any]) = p.setNullSelectionItemId(nullSelectionItemId.orNull)
def nullSelectionItemId_=(nullSelectionItemId: Any) = p.setNullSelectionItemId(nullSelectionItemId)
def selected(itemId: Any) = p.isSelected(itemId)
def select(itemId: Any) = p.select(itemId)
def unselect(itemId: Any) = p.unselect(itemId)
// Container.Container:
protected def wrapItem(unwrapped: com.vaadin.data.Item): Item = new IndexedContainerItem(unwrapped)
}
trait NewItemHandler extends Wrapper {
override val p: com.vaadin.ui.AbstractSelect.NewItemHandler with NewItemHandlerMixin = new NewItemHandlerDelegator
p.wrapper = this
def addNewItem(newItemCaption: String): Unit
}
class NewItemHandlerDelegator extends com.vaadin.ui.AbstractSelect.NewItemHandler with NewItemHandlerMixin {
def addNewItem(newItemCaption: String): Unit = wrapper.addNewItem(newItemCaption)
}
//copied here because original is a non-static inner class
class DefaultNewItemHandler(select: AbstractSelect) extends NewItemHandler {
def addNewItem(newItemCaption: String) = {
if (select.readOnly) throw new com.vaadin.data.Property.ReadOnlyException()
if (select.addItem(newItemCaption).isDefined) {
// Sets the caption property, if used
if (select.itemCaptionPropertyId.isDefined) {
try {
select.property(newItemCaption, select.itemCaptionPropertyId.get).get.value = newItemCaption
} catch {
case ignored: com.vaadin.data.Property.ConversionException =>
// The conversion exception is safely ignored, the caption is just missing
}
}
if (select.isInstanceOf[MultiSelectable] && select.asInstanceOf[MultiSelectable].multiSelect) {
var values: Set[Any] = select.value.get.asInstanceOf[Iterable[Any]].toSet
values += newItemCaption
select.value = values
} else {
select.value = newItemCaption
}
}
}
}
|
CloudInABox/scalavaadinutils
|
src/main/scala/vaadin/scala/AbstractSelect.scala
|
Scala
|
mit
| 4,902 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller, Vishnu Gowda Harish, Vinay Kumar Bingi
* @version 1.3
* @date Thu Dec 15 12:47:37 EST 2016
* @see LICENSE (MIT style license file).
*/
package testing.linalgebra
import org.junit.Test
import scalation.linalgebra.{MatrixD, RleMatrixD, VectorD}
import scalation.random.{Randi0, RandomMatD, RandomVecD}
import testing.Tester
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `RleMatrixD_T` driver class conducts unit testing on the `RleMatrixD` class
* by invoking the `RleMatrixD_T` testing object. Run 'test-only' to test `RleMatrixD`
* or 'test' to run all unit tests.
*------------------------------------------------------------------------------
* > test-only testing.linalgebra.RleMatrixD_T
* > test
*/
class RleMatrixD_T { @Test def testAll () { RleMatrixD_T } }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `RleMatrixD_T` object conducts unit testing on the `RleMatrixD` class using the
* `Tester` trait. It compares correctness/performance of a method/operator 'call'
* to an 'oracle' and optionally a 'contender'.
*------------------------------------------------------------------------------
* All methods except 'this', 'apply', 'update', 'foreach' and 'hashCode' should be tested.
* May skip '=op' if 'op' is tested, e.g., skip '+=' if '+' is tested.
* Also the 'equals' and 'toString' are tested implicitly.
* Depending on the 'CORRECT' flag, it will either test correctness or performance.
* Note, if the code for the 'contender' or even the 'oracle' is significantly faster,
* the method/operator may need be to re-coded.
*------------------------------------------------------------------------------
* To run directly, uncomment "// with App" and run 'test:runMain'.
* > test:runMain testing.linalgebra.RleMatrixD_T
*/
object RleMatrixD_T extends Tester with App
{
// Reassign parameters from `Tester` trait as needed
DEBUG = false // debug flag
CORRECT = true // test correctness/performance
FOCUS = "" // method/operator to focus on, "" => all
KLASS = "RleMatrixD" // the class under test
ITER = 10 // number of test iterations
// Size parameter(s) used for variables in 'test' (customize per class)
private val dim1 = 50 // # matrix rows
private val dim2 = 50 // # matrix columns
// Random variate generators (customize per class)
private val rmg = RandomMatD (dim1, dim2) // random matrix generator
private val rvg1 = RandomVecD (dim1) // random vector generator
private val rvg2 = RandomVecD (dim2) // random vector generator
private val rig1 = Randi0 (0, dim1 - 1) // random integer/index generator
private val rig2 = Randi0 (0, dim2 - 1) // random integer/index generator
private val rig3 = Randi0 (math.min (dim1 - 1, dim2 - 1)) // random integer/index generator
// Variables used in 'test' (customize per class)
private var x = rmg.rlegenc // first Rle matrix
private var y = rmg.rlegenc // second Rle matrix
private var z = RleMatrixD (rmg.gen) // third Rle matrix
private var xD = x.toDense // first dense matrix
private var yD = y.toDense // second dense matrix
private var zD = z.toDense // third dense matrix
private var u = rvg1.repgen // first Rle vector (Row Vector)
private var v = rvg2.repgen // second Rle vector (Column Vector)
private var uD = u.toDense // first dense vector (Row Vector)
private var vD = v.toDense // second dense vector (Column Vector)
private var j = 0 // first integer/index value
private var k = 0 // second integer/index value
private var s = 2 // third integer/non zero value
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Randomize all variables used in `Tester`s 'test' method.
*/
def randomize ()
{
x = rmg.rlegenc // randomly reset variables
y = rmg.rlegenc
z = RleMatrixD (rmg.gen)
xD = x.toDense
yD = y.toDense
zD = z.toDense
j = rig1.igen
k = rig2.igen
s = rig3.igen
} // randomize
testClass ()
println ("\nTest no argument methods/unary operators")
test ("t", x.t.toDense,
xD.t)
test ("sum", x.sum,
xD.sum)
// test ("det", xD.det,
// x.det) // WORKS BUT VERY HIGH COMPLEXITY. DO IT AS DET L * DET U
test ("sumLower", x.sumLower,
xD.sumLower)
test ("sumAbs", x.sumAbs,
xD.sumAbs)
test ("reduce", z.reduce.toDense,
zD.reduce)
test ("nullspace_ip", z.nullspace_ip.toDense,
zD.nullspace_ip)
test ("nullspace", z.nullspace.toDense,
zD.nullspace)
test ("isRectangular", x.isRectangular,
xD.isRectangular)
test ("inverse", z.inverse.toDense,
zD.inverse)
test ("inverse_ip", z.inverse_ip.toDense,
zD.inverse_ip)
test ("upperT", x.upperT.toDense,
xD.upperT)
test ("lowerT", x.lowerT.toDense,
xD.lowerT)
test ("toInt", x.toInt.toDense,
xD.toInt)
test ("trace", x.trace,
xD.trace)
println ("\nTest methods that take scalar arguments")
test ("+s", (x + s).toDense,
xD + s)
test ("+=s", (x += s).toDense,
xD += s)
test ("-s", (x - s).toDense,
xD - s)
test ("-=s", (x -= s).toDense,
xD -= s)
test ("*s", (x * s).toDense,
xD * s)
test ("*=s", (x *= s).toDense,
xD *= s)
test ("/s", (x / s).toDense,
xD / s)
test ("/=s", (x /= s).toDense,
xD /= s)
test ("~^", (x ~^ 5).toDense,
xD ~^ 5)
test ("col", (x.col (s)).toDense,
xD.col (s))
test ("diag", (x.diag (s, s)).toDense,
xD.diag (s, s))
test ("getDiag", (x.getDiag (s)).toDense,
xD getDiag (s))
test ("max", x max s,
xD max s)
test ("min", (x min s),
xD min s)
test ("sliceCol", (x.sliceCol (math.max(0, s - 1), s)).toDense,
xD.sliceCol (math.max(0, s - 1), s))
test ("sliceExclude", (x.sliceExclude (s, s)).toDense,
xD.sliceExclude (s, s))
println ("\nTest vector argument methods")
test ("+v", (x + v).toDense,
xD + v)
test ("+=v", (x += v).toDense,
xD += v)
test ("-v", (x - v).toDense,
xD - v)
test ("-=v", (x -= v).toDense,
xD -= v)
test ("*v", (x * v).toDense,
xD * v)
test ("**v", (x ** v).toDense,
xD ** v)
test ("**=v", (x **= v).toDense,
xD **= v)
test ("+:", (x.+: (u)).toDense,
xD.+: (uD))
test ("+:^", (x.+^: (v)).toDense,
xD.+^: (vD))
test (":+", (x.:+ (u)).toDense,
xD.:+ (uD))
test (":+^", (x.:^+ (v)).toDense,
xD.:^+ (vD))
test ("bsolve", (x bsolve v).toDense,
xD bsolve v)
test ("dot v", (x dot u).toDense,
xD dot u)
// test ("solve v", (z solve (v)).toDense, // TODO FLOATING POINT ERRORS
// zD solve (v))
println ("\nTest matrix argument methods")
test ("+", (x + y).toDense,
xD + yD)
test ("++", (x ++ y).toDense,
xD ++ yD)
test ("++^", (x ++^ y).toDense,
xD ++^ yD)
test ("+dense", (x + yD).toDense,
xD + yD)
test ("+=", (x += y).toDense,
xD += yD)
test ("-", (x - y).toDense,
xD - yD)
test ("-dense", (x - yD).toDense,
xD - yD)
test ("-=", (x -= y).toDense,
xD -= yD)
test ("*", (x * y).toDense,
xD * yD)
test ("*=", (x *= y).toDense,
xD *= yD)
test ("diag", (x diag y).toDense,
xD diag yD)
test ("dot", (x dot y).toDense,
xD dot yD)
test ("mdot", (x mdot y).toDense,
xD mdot yD)
} // RleMatrixD_T object
|
scalation/fda
|
scalation_1.3/scalation_mathstat/src/test/scala/testing/linalgebra/RleMatrixD_T.scala
|
Scala
|
mit
| 10,763 |
/** Adapted from https://github.com/sbt/sbt/blob/0.13/compile/interface/src/test/scala/xsbt/ScalaCompilerForUnitTesting.scala */
package xsbt
import xsbti.compile.SingleOutput
import java.io.File
import xsbti._
import sbt.io.IO
import xsbti.api.{ ClassLike, Def, DependencyContext }
import DependencyContext._
import xsbt.api.SameAPI
import sbt.internal.util.ConsoleLogger
import TestCallback.ExtractedClassDependencies
/**
* Provides common functionality needed for unit tests that require compiling
* source code using Scala compiler.
*/
class ScalaCompilerForUnitTesting {
import scala.language.reflectiveCalls
/**
* Compiles given source code using Scala compiler and returns API representation
* extracted by ExtractAPI class.
*/
def extractApiFromSrc(src: String): Seq[ClassLike] = {
val (Seq(tempSrcFile), analysisCallback) = compileSrcs(src)
analysisCallback.apis(tempSrcFile)
}
/**
* Compiles given source code using Scala compiler and returns API representation
* extracted by ExtractAPI class.
*/
def extractApisFromSrcs(reuseCompilerInstance: Boolean)(srcs: List[String]*): Seq[Seq[ClassLike]] = {
val (tempSrcFiles, analysisCallback) = compileSrcs(srcs.toList, reuseCompilerInstance)
tempSrcFiles.map(analysisCallback.apis)
}
/**
* Extract used names from src provided as the second argument.
* If `assertDefaultScope` is set to true it will fail if there is any name used in scope other then Default
*
* The purpose of the first argument is to define names that the second
* source is going to refer to. Both files are compiled in the same compiler
* Run but only names used in the second src file are returned.
*/
def extractUsedNamesFromSrc(
definitionSrc: String,
actualSrc: String,
assertDefaultScope: Boolean = true
): Map[String, Set[String]] = {
// we drop temp src file corresponding to the definition src file
val (Seq(_, tempSrcFile), analysisCallback) = compileSrcs(definitionSrc, actualSrc)
if (assertDefaultScope) for {
(className, used) <- analysisCallback.usedNamesAndScopes
analysisCallback.TestUsedName(name, scopes) <- used
} assert(scopes.size() == 1 && scopes.contains(UseScope.Default), s"$className uses $name in $scopes")
val classesInActualSrc = analysisCallback.classNames(tempSrcFile).map(_._1)
classesInActualSrc.map(className => className -> analysisCallback.usedNames(className)).toMap
}
/**
* Extract used names from the last source file in `sources`.
*
* The previous source files are provided to successfully compile examples.
* Only the names used in the last src file are returned.
*/
def extractUsedNamesFromSrc(sources: String*): Map[String, Set[String]] = {
val (srcFiles, analysisCallback) = compileSrcs(sources: _*)
srcFiles
.map { srcFile =>
val classesInSrc = analysisCallback.classNames(srcFile).map(_._1)
classesInSrc.map(className => className -> analysisCallback.usedNames(className)).toMap
}
.reduce(_ ++ _)
}
/**
* Compiles given source code snippets (passed as Strings) using Scala compiler and returns extracted
* dependencies between snippets. Source code snippets are identified by symbols. Each symbol should
* be associated with one snippet only.
*
* Snippets can be grouped to be compiled together in the same compiler run. This is
* useful to compile macros, which cannot be used in the same compilation run that
* defines them.
*
* Symbols are used to express extracted dependencies between source code snippets. This way we have
* file system-independent way of testing dependencies between source code "files".
*/
def extractDependenciesFromSrcs(srcs: List[List[String]]): ExtractedClassDependencies = {
val (_, testCallback) = compileSrcs(srcs, reuseCompilerInstance = true)
val memberRefDeps = testCallback.classDependencies collect {
case (target, src, DependencyByMemberRef) => (src, target)
}
val inheritanceDeps = testCallback.classDependencies collect {
case (target, src, DependencyByInheritance) => (src, target)
}
val localInheritanceDeps = testCallback.classDependencies collect {
case (target, src, LocalDependencyByInheritance) => (src, target)
}
ExtractedClassDependencies.fromPairs(memberRefDeps, inheritanceDeps, localInheritanceDeps)
}
def extractDependenciesFromSrcs(srcs: String*): ExtractedClassDependencies = {
extractDependenciesFromSrcs(List(srcs.toList))
}
/**
* Compiles given source code snippets written to temporary files. Each snippet is
* written to a separate temporary file.
*
* Snippets can be grouped to be compiled together in the same compiler run. This is
* useful to compile macros, which cannot be used in the same compilation run that
* defines them.
*
* The `reuseCompilerInstance` parameter controls whether the same Scala compiler instance
* is reused between compiling source groups. Separate compiler instances can be used to
* test stability of API representation (with respect to pickling) or to test handling of
* binary dependencies.
*
* The sequence of temporary files corresponding to passed snippets and analysis
* callback is returned as a result.
*/
def compileSrcs(groupedSrcs: List[List[String]],
reuseCompilerInstance: Boolean): (Seq[File], TestCallback) = {
// withTemporaryDirectory { temp =>
{
val temp = IO.createTemporaryDirectory
val analysisCallback = new TestCallback
val classesDir = new File(temp, "classes")
classesDir.mkdir()
lazy val commonCompilerInstanceAndCtx = prepareCompiler(classesDir, analysisCallback, classesDir.toString)
val files = for ((compilationUnit, unitId) <- groupedSrcs.zipWithIndex) yield {
// use a separate instance of the compiler for each group of sources to
// have an ability to test for bugs in instability between source and pickled
// representation of types
val (compiler, ctx) = if (reuseCompilerInstance) commonCompilerInstanceAndCtx else
prepareCompiler(classesDir, analysisCallback, classesDir.toString)
val run = compiler.newRun(ctx)
val srcFiles = compilationUnit.toSeq.zipWithIndex map {
case (src, i) =>
val fileName = s"Test-$unitId-$i.scala"
prepareSrcFile(temp, fileName, src)
}
val srcFilePaths = srcFiles.map(srcFile => srcFile.getAbsolutePath).toList
run.compile(srcFilePaths)
// srcFilePaths.foreach(f => new File(f).delete)
srcFiles
}
(files.flatten.toSeq, analysisCallback)
}
}
def compileSrcs(srcs: String*): (Seq[File], TestCallback) = {
compileSrcs(List(srcs.toList), reuseCompilerInstance = true)
}
private def prepareSrcFile(baseDir: File, fileName: String, src: String): File = {
val srcFile = new File(baseDir, fileName)
IO.write(srcFile, src)
srcFile
}
private def prepareCompiler(outputDir: File, analysisCallback: AnalysisCallback, classpath: String = ".") = {
val args = Array.empty[String]
import dotty.tools.dotc.{Compiler, Driver}
import dotty.tools.dotc.core.Contexts._
val driver = new TestDriver
val ctx = (new ContextBase).initialCtx.fresh.setSbtCallback(analysisCallback)
driver.getCompiler(Array("-classpath", classpath, "-usejavacp", "-d", outputDir.getAbsolutePath), ctx)
}
private object ConsoleReporter extends Reporter {
def reset(): Unit = ()
def hasErrors: Boolean = false
def hasWarnings: Boolean = false
def printWarnings(): Unit = ()
def problems(): Array[xsbti.Problem] = Array.empty
def log(problem: xsbti.Problem): Unit = println(problem.message)
def comment(pos: Position, msg: String): Unit = ()
def printSummary(): Unit = ()
}
}
|
som-snytt/dotty
|
sbt-bridge/test/xsbt/ScalaCompilerForUnitTesting.scala
|
Scala
|
apache-2.0
| 7,909 |
package is.hail.backend
import is.hail.annotations.RegionPool
import is.hail.utils._
abstract class HailTaskContext {
def stageId(): Int
def partitionId(): Int
def attemptNumber(): Int
private lazy val thePool = RegionPool()
def getRegionPool(): RegionPool = thePool
def partSuffix(): String = {
val rng = new java.security.SecureRandom()
val fileUUID = new java.util.UUID(rng.nextLong(), rng.nextLong())
s"${ stageId() }-${ partitionId() }-${ attemptNumber() }-$fileUUID"
}
def finish(): Unit = {
log.info(s"TaskReport: stage=${ stageId() }, partition=${ partitionId() }, attempt=${ attemptNumber() }, " +
s"peakBytes=${ thePool.getHighestTotalUsage }, peakBytesReadable=${ formatSpace(thePool.getHighestTotalUsage) }, "+
s"chunks requested=${thePool.getUsage._1}, cache hits=${thePool.getUsage._2}")
thePool.close()
}
}
|
hail-is/hail
|
hail/src/main/scala/is/hail/backend/HailTaskContext.scala
|
Scala
|
mit
| 880 |
package redbot.utils
import play.api.libs.json.Format
import scala.collection.Map
import scala.collection.mutable
import scala.collection.immutable.Set
class JoinMap[L, R] private (private val backingL: mutable.Map[L, Set[R]],
private val backingR: mutable.Map[R, Set[L]]) {
def getL(key: L): Option[Set[R]] = backingL.get(key)
def getR(key: R): Option[Set[L]] = backingR.get(key)
def join(left: L, right: R): Unit = {
backingL(left) = backingL.getOrElse(left, Set.empty) + right
backingR(right) = backingR.getOrElse(right, Set.empty) + left
}
def unjoin(left: L, right: R): Boolean =
(for {
rSet <- getL(left)
if rSet contains right
lSet <- getR(right)
if lSet contains left
} yield {
backingL(left) = rSet - right
backingR(right) = lSet - left
}).isDefined
def isJoined(left: L, right: R): Boolean =
getL(left).exists(_ contains right)
def flip(): JoinMap[R, L] = new JoinMap(backingR.clone(), backingL.clone())
override def clone(): JoinMap[L, R] = new JoinMap(backingL.clone(), backingR.clone())
def keyedLeft: Map[L, Set[R]] = backingL
def keyedRight: Map[R, Set[L]] = backingR
def keysL: Iterable[L] = backingL.keys
def keysR: Iterable[R] = backingR.keys
}
object JoinMap {
def empty[L, R]: JoinMap[L, R] = new JoinMap(mutable.Map.empty, mutable.Map.empty)
def apply[L, R](map: Map[L, Set[R]]): JoinMap[L, R] = {
val j = JoinMap.empty[L, R]
for {
(l, rSet) <- map
r <- rSet
} j.join(l, r)
j
}
implicit def LeftKeyedFormat[A, B](implicit keyFormat: Format[A],
valuesFormat: Format[B]): Format[JoinMap[A, B]] = {
import play.api.libs.json.Reads
import play.api.libs.json.Writes
Format(
Reads.ArrayReads[(A, Set[B])].map { arr => JoinMap(arr.toMap) },
(o: JoinMap[A, B]) => Writes.arrayWrites[(A, Set[B])].writes(o.keyedLeft.toArray)
)
}
implicit def RightKeyedFormat[A, B](implicit keyFormat: Format[B],
valuesFormat: Format[A]): Format[JoinMap[A, B]] = {
import play.api.libs.json.Reads
import play.api.libs.json.Writes
Format(
Reads.ArrayReads[(B, Set[A])].map { arr => JoinMap(arr.toMap).flip() },
(o: JoinMap[A, B]) => Writes.arrayWrites[(B, Set[A])].writes(o.keyedRight.toArray)
)
}
}
|
JamesGallicchio/RedBot
|
src/main/scala/redbot/utils/JoinMap.scala
|
Scala
|
mit
| 2,426 |
package de.endrullis.sta
import java.awt.Color
import collection.mutable
import scala.language.implicitConversions
/**
* Implicit casts for some base variable types.
*
* @author Stefan Endrullis <[email protected]>
*/
trait BaseVarIC extends PosIC {
implicit def value2var[T,TM](value: T): Var[T,T] = Var[T](value)
implicit def var2stringVar(value: String): StringVar[String] = StringVar[String](value)
implicit def color2var(color: Color): ColorVar[Color] = ColorVar[Color](color)
implicit def double2var(value: Double): DoubleVar[Double] = DoubleVar[Double](value)
implicit def pos2var(pos: Pos): PosVar[Pos] = PosVar[Pos](pos)
implicit class StringExt(s: String) {
/**
* Injects the variables into the text.
*
* @param varMap maps variable names to the variables being injected.
* @return StringGenerator representing the text with injected variables
*/
def << (varMap: (String, Generator[Any])*): Generator[String] = {
val end = "ềǹd"
def split(s: String, name: String, v: Generator[String]): List[AnyRef] = {
val y = ((s+end).split("\\\\$"+name).toList.foldLeft(List[AnyRef]())((a: List[AnyRef], b: String) => b :: v :: a): @unchecked) match {
case x :: xs => x.asInstanceOf[String].replaceAll(end+"$", "") :: xs
}
y.reverse.drop(1)
}
val list = varMap.toList.foldLeft(List[AnyRef](s)){ case (l: List[AnyRef], t: (String, Generator[Any])) =>
l.flatMap{
case s: String => split(s, t._1, t._2.toStringGenerator)
case g: Generator[_] => List(g)
}
}
new ImmutableCodeContainer(_ intersect _, list.map{
case s: String => StringVar(s)
case g: Generator[_] => g.asInstanceOf[Generator[String]]
}: _*)
}
}
implicit class GeneratorHelper(val sc: StringContext) {
def vi(args: Generator[Any]*): Generator[String] = {
val strings = sc.parts.iterator.map(_.stripMargin)
val gens = args.iterator
val list = new mutable.ListBuffer[AnyRef]
list += strings.next
while (strings.hasNext) {
list += gens.next
list += strings.next
}
if (list.head == "") list.remove(0)
if (list.last == "") list.remove(list.size-1)
new ImmutableCodeContainer(_ intersect _, (list: @unchecked).map{
case s: String => StringVar(s)
case g: Generator[Any] => g.toStringGenerator
}: _*)
}
}
// arithmetic operations for double generators
implicit class DoubleGeneratorExt(thisGen: Generator[Double]) {
def + (thatGen: Generator[Double]) = (thisGen~~thatGen).map{case a~~b => a+b}
def - (thatGen: Generator[Double]) = (thisGen~~thatGen).map{case a~~b => a-b}
def * (thatGen: Generator[Double]) = (thisGen~~thatGen).map{case a~~b => a*b}
def / (thatGen: Generator[Double]) = (thisGen~~thatGen).map{case a~~b => a/b}
}
}
object BaseVarIC extends BaseVarIC
|
xylo/scala-tikz-animations
|
src/de/endrullis/sta/BaseVarIC.scala
|
Scala
|
apache-2.0
| 2,891 |
package com.oomagnitude.metrics.model
case class ExperimentId(experiment: String) {
override val toString: String = experiment
}
|
oomagnitude/metrics-shared
|
metrics/shared/src/main/scala/com/oomagnitude/metrics/model/ExperimentId.scala
|
Scala
|
apache-2.0
| 132 |
package com.hilverd.simcallstack.unit
import org.scalatest.{Matchers, FunSpec}
import com.hilverd.simcallstack.CollatzSequence
class CollatzSequenceTest extends FunSpec with Matchers {
it("should compute the total stopping time") {
val n = 27
val result = 111
CollatzSequence.stoppingTimeRecursive(n) should be(result)
CollatzSequence.stoppingTime(n) should be(result)
}
}
|
hilverd/simulated-call-stack
|
src/test/scala/com/hilverd/simcallstack/unit/CollatzSequenceTest.scala
|
Scala
|
mit
| 396 |
package controllers
import java.io.FileInputStream
import org.squeryl.PrimitiveTypeMode.__thisDsl
import org.squeryl.PrimitiveTypeMode.long2ScalarLong
import org.squeryl.PrimitiveTypeMode.transaction
import play.api.Play.current
import play.api.data.Forms.boolean
import play.api.data.Forms.list
import play.api.data.Forms.longNumber
import play.api.data.Forms.mapping
import play.api.data.Forms.nonEmptyText
import play.api.data.Forms.number
import play.api.data.Forms.optional
import play.api.data.Form
import play.api.i18n.Lang
import play.api.i18n.Messages
import play.api.libs.json.Json.toJson
import play.api.libs.json.JsArray
import play.api.libs.json.JsNumber
import play.api.libs.json.JsObject
import play.api.libs.json.JsString
import play.api.mvc.Controller
import play.api.Logger
import play.api.Play
import plugins.cloudimage.CloudImageErrorResponse
import plugins.cloudimage.CloudImagePlugin
import plugins.cloudimage.CloudImageService
import plugins.cloudimage.CloudImageSuccessResponse
import plugins.use
import plugins.cloudimage.TransformationProperty
import model.dto.SponsorWithLogo
import model.Resource
object Sponsor extends LangAwareController with securesocial.core.SecureSocial {
private lazy val cloudImageService = use[CloudImagePlugin].cloudImageService
/* Transformation properties for displaying a sponsor logo
* We try to resize the image according to the predefined configuration (see application.conf) proportionally so that
* the image fits into defined boundaries
*/
private lazy val SPONSOR_LOGO_TRANSFORMATION_PROPS = Map[TransformationProperty.Value, String](
TransformationProperty.WIDTH -> Play.current.configuration.getString("sponsors.logo.maxwidth").getOrElse(""),
TransformationProperty.HEIGHT -> Play.current.configuration.getString("sponsors.logo.maxheight").getOrElse(""),
TransformationProperty.CROP_MODE -> "c_fit");
// shortcut for image transformation
private def transformLogo(url: String) = {
cloudImageService.getTransformationUrl(url, SPONSOR_LOGO_TRANSFORMATION_PROPS)
}
val sponsorForm = Form(
mapping(
"name" -> nonEmptyText,
"title" -> nonEmptyText,
"description" -> nonEmptyText,
"website" -> nonEmptyText,
"order" -> number,
"hackathonId" -> optional(longNumber),
"logoResourceId" -> optional(longNumber),
"logoUrl" -> optional(nonEmptyText)) // apply
((name,
title,
description,
website,
order,
hackathonId,
logoResourceId,
logoUrl) => new model.dto.SponsorWithLogo(
new model.Sponsor(name,
title,
description,
website,
order,
hackathonId,
logoResourceId), if (logoUrl.isDefined) Some(new model.Resource(logoUrl.get, ""))
else None)) // unapply
((sl: SponsorWithLogo) =>
Some(sl.sponsor.name, sl.sponsor.title, sl.sponsor.description, sl.sponsor.website, sl.sponsor.order, sl.sponsor.hackathonId, sl.sponsor.logoResourceId, sl.logo.map(l => transformLogo(l.url)))))
def uploadError(implicit text: String, lang: Lang) = toJson(Seq(toJson(Map("error" -> toJson(Messages(text))))))
def index = UserAwareAction { implicit request =>
transaction {
Ok(views.html.sponsors.index(model.dto.SponsorWithLogo.portalSponsors, userFromRequest))
}
}
def indexH(hid: Long) = UserAwareAction { implicit request =>
transaction {
Ok(views.html.sponsors.indexH(model.Hackathon.lookup(hid), model.dto.SponsorWithLogo.hackathonSponsors(hid), userFromRequest))
}
}
def view(id: Long) = UserAwareAction { implicit request =>
transaction {
Ok(views.html.sponsors.view(model.dto.SponsorWithLogo.lookup(id), userFromRequest))
}
}
def viewH(hid: Long, id: Long) = UserAwareAction { implicit request =>
transaction {
val sponsor = model.dto.SponsorWithLogo.lookup(id)
val hackathon = model.Hackathon.lookup(hid)
Ok(views.html.sponsors.viewH(hackathon, sponsor, userFromRequest))
}
}
def create = SecuredAction { implicit request =>
transaction {
ensureAdmin {
val user = userFromRequest(request)
val sponsor = new model.dto.SponsorWithLogo()
Ok(views.html.sponsors.create(sponsorForm.fill(sponsor), user))
}
}
}
def createH(hid: Long) = SecuredAction { implicit request =>
transaction {
model.Hackathon.lookup(hid).map { hackathon =>
ensureHackathonOrganiserOrAdmin(hackathon) {
val user = userFromRequest(request)
val sponsor = new model.dto.SponsorWithLogo(new model.Sponsor(Some(hid)), None)
Ok(views.html.sponsors.createH(Some(hackathon), sponsorForm.fill(sponsor), user))
}
}.getOrElse(Redirect(routes.Hackathon.view(hid)))
}
}
def save = SecuredAction { implicit request =>
val user = userFromRequest(request)
sponsorForm.bindFromRequest.fold(
errors => BadRequest(views.html.sponsors.create(errors, user)),
sponsorWithLogo => transaction {
ensureAdmin {
model.Sponsor.insert(sponsorWithLogo.sponsor)
Redirect(routes.Sponsor.index).flashing("status" -> "added", "title" -> sponsorWithLogo.sponsor.name)
}
})
}
def saveH(hid: Long) = SecuredAction { implicit request =>
sponsorForm.bindFromRequest.fold(
errors => transaction {
val user = userFromRequest(request)
BadRequest(views.html.sponsors.createH(model.Hackathon.lookup(hid), errors, user))
},
sponsorWithLogo => transaction {
model.Hackathon.lookup(hid).map { hackathon =>
ensureHackathonOrganiserOrAdmin(hackathon) {
model.Sponsor.insert(sponsorWithLogo.sponsor)
Redirect(routes.Sponsor.indexH(hid)).flashing("status" -> "added", "title" -> sponsorWithLogo.sponsor.name)
}
}.getOrElse(Redirect(routes.Hackathon.view(hid)))
})
}
def edit(id: Long) = SecuredAction { implicit request =>
transaction {
model.dto.SponsorWithLogo.lookup(id).map { sponsorWithLogo =>
ensureAdmin {
val user = userFromRequest(request)
Ok(views.html.sponsors.edit(id, sponsorForm.fill(sponsorWithLogo), user))
}
}.getOrElse (Redirect(routes.Sponsor.view(id)))
}
}
def editH(hid: Long, id: Long) = SecuredAction { implicit request =>
transaction {
model.Hackathon.lookup(hid).map { hackathon =>
model.dto.SponsorWithLogo.lookup(id).filter(_.sponsor.hackathonId == Some(hid)).map { sponsorWithLogo =>
ensureHackathonOrganiserOrAdmin(hackathon) {
val user = userFromRequest(request)
Ok(views.html.sponsors.editH(Some(hackathon), id, sponsorForm.fill(sponsorWithLogo), user))
}
}.getOrElse(Redirect(routes.Sponsor.viewH(hid, id)))
}.getOrElse(Redirect(routes.Hackathon.view(hid)))
}
}
def update(id: Long) = SecuredAction { implicit request =>
val user = userFromRequest(request)
sponsorForm.bindFromRequest.fold(
errors => BadRequest(views.html.sponsors.edit(id, errors, user)),
sponsorWithLogo => transaction {
ensureAdmin {
model.Sponsor.update(id, sponsorWithLogo.sponsor)
Redirect(routes.Sponsor.index).flashing("status" -> "updated", "title" -> sponsorWithLogo.sponsor.name)
}
})
}
def updateH(hid: Long, id: Long) = SecuredAction { implicit request =>
sponsorForm.bindFromRequest.fold(
errors => transaction {
val user = userFromRequest(request)
BadRequest(views.html.sponsors.editH(model.Hackathon.lookup(hid), id, errors, user))
},
sponsorWithLogo => transaction {
model.Hackathon.lookup(hid).map { hackathon =>
model.dto.SponsorWithLogo.lookup(id).map { sponsorWithLogo =>
ensureHackathonOrganiserOrAdmin(hackathon) {
model.Sponsor.update(id, sponsorWithLogo.sponsor)
Redirect(routes.Sponsor.indexH(hid)).flashing("status" -> "updated", "title" -> sponsorWithLogo.sponsor.name)
}
}.getOrElse(Redirect(routes.Sponsor.viewH(hid, id)))
}.getOrElse(Redirect(routes.Hackathon.view(hid)))
})
}
def delete(id: Long) = SecuredAction { implicit request =>
transaction {
ensureAdmin {
model.Sponsor.delete(id)
Redirect(routes.Sponsor.index).flashing("status" -> "deleted")
}
}
}
def deleteH(hid: Long, id: Long) = SecuredAction { implicit request =>
transaction {
model.Hackathon.lookup(hid).map { hackathon =>
model.Sponsor.lookup(id).filter(_.hackathonId == Some(hid)).map { sponsor =>
ensureHackathonOrganiserOrAdmin(hackathon) {
model.Sponsor.delete(id)
Redirect(routes.Sponsor.indexH(hid)).flashing("status" -> "deleted")
}
}.getOrElse(Redirect(routes.Sponsor.viewH(hid, id)))
}.getOrElse(Redirect(routes.Hackathon.view(hid)))
}
}
private def logoDetailsAsJson(url: String, resourceId: Long) = {
toJson(Seq(JsObject(List(
"url" -> JsString(transformLogo(url)),
"resourceId" -> JsNumber(resourceId)))))
}
def uploadLogo = UserAwareAction(parse.multipartFormData) { implicit request =>
var temporaryHandle = request.body.file("files").get
val temporaryFile = temporaryHandle.ref.file
val in = new FileInputStream(temporaryFile)
val bytes = new Array[Byte](temporaryFile.length.toInt);
in.read(bytes)
in.close()
val maxSize = Play.current.configuration.getString("sponsors.logo.maxsize").getOrElse("0").toLong * 1024
{
if (bytes.length > maxSize) {
Ok(uploadError("js.fileupload.filetoobig", lang))
} else {
val filename = temporaryHandle.filename
val response = cloudImageService.upload(filename, bytes)
response match {
case success: CloudImageSuccessResponse =>
transaction {
val resource = model.Resource(success.url, success.publicId);
model.Resource.insert(resource)
Ok(logoDetailsAsJson(resource.url, resource.id))
}
case error: CloudImageErrorResponse =>
Logger.debug("Sponsor - cloudinaryService - error: " + error.message)
Ok(uploadError("fileupload.server.error", lang))
}
}
}.withHeaders(CONTENT_TYPE -> "text/plain")
}
def getLogoDetails(id: Long) = UserAwareAction { implicit request =>
transaction {
Ok(model.Resource.lookup(id).map {
r => logoDetailsAsJson(r.url, r.id)
}.get)
}
}
}
|
lukaszbudnik/hackaton-portal
|
app/controllers/Sponsor.scala
|
Scala
|
apache-2.0
| 10,720 |
package chapter24
/**
* 24.5 시퀀스 트레이트: Seq, IndexedSeq, LinearSeq
*
* Seq 트레이트는 시퀀스를 표현한다. 시퀀스는 길이가 정해져 있고, 각 원소의 위치를 0부터 시작하는
* 정해진 인덱스로 지정할 수 있는 일종의 Iterable이다.
*
* - 인덱스와 길이 연산: apply, isDefinedAt, length, indices, lengthCompare.
* Seq의 경우 apply는 인덱스로 원소를 찾는 것을 의미한다. Seq[T] 타입 시퀀스는 Int를 받아서
* T 타입의 원소를 돌려주는 부분 함수다. 즉, Seq[T]는 PartialFunction[Int, T]를 확장한다.
* 시퀀스에 있는, length 메소드는 일반적인 컬렉션에 있는 size 메소드에 대한 별칭이다. lengthCompare
* 메소드를 사용하면 어느 한쪽의 길이가 무한하더라도 두 시퀀스의 길이를 비교할 수 있다.
*
* - 인덱스 찾기 연산: indexOf, lastIndexOf, indexOfSlice, lastIndexOfSlice,
* indexWhere, lastIndexWhere, segmentLength, prefixLength. 주어진 값과 같거나
* 어떤 술어(조건 함수)를 만족하는 원소의 인덱스를 반환한다.
*
* - 추가 연산: +:, :+, padTo. 시퀀스의 맨 앞이나 뒤에 원소를 추가한 새 시퀀스를 반환 한다.
* - 변경 연산: updated, patch. 원래의 시퀀스 일부 원소를 바꿔서 나오는 새로운 시퀀스를 반환
* - 정렬 연산: sorted, sortWith, sortBy는 시퀀스 원소들을 여러 기준에 따라 정렬한다.
* - 반전 연산: reverse, reverseIterator, reverseMap. 시퀀스의 원소를 역순, 즉 마지막에서
* 맨 앞쪽으로 처리하거나 역순으로 토해낸다.
* - 비교 연산: startsWith, endsWith, contains, corresponds, containsSlice.
* 두 시퀀스 간의 관계를 판단하거나, 시퀀스에서 원소를 찾는다.
* - 중복 집합 연산: intersect, diff, union, distinct. 두 시퀀스에 대해 집합과 비슷한 연산을
* 수행하거나, 중복을 제거한다.
*
* 어떤 시퀀스가 변경 가능하다면, 추가로 부수 효과를 통해 시퀀스의 원소를 변경할 수 있는 update 메소드를 제공한다.
* 3장에서 이미 말했듯 seq(idx) = elem 은 seq.update(idx, elem)을 짧게 쓴 것일 뿐이다.
* update와 updated를 구분해야 한다. update는 그 자리에서 변경하고, 변경 가능한 시퀀스에서만 쓰인다.
* updated는 모든 시퀀스에서 사용 가능하며, 원래의 시퀀스를 변경하지 않고, 새로운 시퀀스를 반환한다.
*
* 각 Seq 트레이트에는 두 가지 하위 트레이트 LinearSeq와 IndexedSeq가 있다. 이들은 새로운 연산을
* 추가하지는 않지만,성능 특성이 다르다. 선형 시퀀스는 더 효율적인 head, tail 연산을 제공하지만
* 인덱스 시퀀스는 효율적인 apply, length, (변경가능인 경우) update 연산을 제공한다. List나
* Stream은 가장 많이 쓰이는 선형 시퀀스다. 많이 쓰이는 인덱스 시컨스는 Array와 ArrayBuffer이다.
* Vector 클래스는 선형 접근과 인덱스 접근 사이에 흥미로운 절충점을 제공한다. 따라서, 인덱스와 선형 접근을
* 모두 사용해야 하는 혼합 접근 패턴의 경우 벡터가 좋은 기반 클래스가 될 수 있다.
*
* 1. 버퍼
*
* 변경 가능한 시퀀스의 중요한 하위 범주로 버퍼가 있다.
* 맨 뒤에 추가 : +=, ++=
* 맨 앞에 추가 : +=:, ++=:, insert, insertAll
* 원소제거 : -=, remove
*
* 가장 많이 사용하는 버퍼구현은 ListBuffer, ArrayBuffer가 있다.
*/
object c24_i05 {
}
|
seraekim/srkim-lang-scala
|
src/main/java/chapter24/c24_i05.scala
|
Scala
|
bsd-3-clause
| 3,636 |
package org.jetbrains.plugins.scala
package highlighter
package usages
import java.util
import java.util.Collections
import com.intellij.codeInsight.highlighting.HighlightUsagesHandlerBase
import com.intellij.openapi.editor.Editor
import com.intellij.psi.{PsiElement, PsiFile}
import com.intellij.util.Consumer
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScPatternDefinition, ScVariableDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
/**
* Highlights the expressions that will be evaluated during construction.
*/
class ScalaHighlightPrimaryConstructorExpressionsHandler(templateDef: ScTemplateDefinition, editor: Editor,
file: PsiFile, keyword: PsiElement)
extends HighlightUsagesHandlerBase[PsiElement](editor, file) {
override def computeUsages(targets: util.List[_ <: PsiElement]): Unit = {
val eb = templateDef.extendsBlock
val varAndValDefsExprs = eb.members.flatMap {
case p: ScPatternDefinition => p.expr.toList // we include lazy vals, perhaps they could be excluded.
case v: ScVariableDefinition => v.expr.toList
case _ => Seq.empty
}
val usages = varAndValDefsExprs ++ eb.templateBody.toList.flatMap(_.exprs) :+ keyword
usages.map(_.getTextRange).foreach(myReadUsages.add)
}
override def selectTargets(targets: util.List[_ <: PsiElement], selectionConsumer: Consumer[_ >: util.List[_ <: PsiElement]]): Unit = {
selectionConsumer.consume(targets)
}
override def getTargets: util.List[PsiElement] = Collections.singletonList(keyword)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/highlighter/usages/ScalaHighlightPrimaryConstructorExpressionsHandler.scala
|
Scala
|
apache-2.0
| 1,631 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import io.fabric8.kubernetes.api.model.Pod
import io.fabric8.kubernetes.client.KubernetesClient
import io.fabric8.kubernetes.client.dsl.PodResource
import org.mockito.{ArgumentCaptor, Mock, MockitoAnnotations}
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito.{mock, never, times, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.BeforeAndAfter
import scala.collection.mutable
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.k8s.Config
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.Fabric8Aliases._
import org.apache.spark.deploy.k8s.KubernetesUtils._
import org.apache.spark.scheduler.ExecutorExited
import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils._
class ExecutorPodsLifecycleManagerSuite extends SparkFunSuite with BeforeAndAfter {
private var namedExecutorPods: mutable.Map[String, PodResource[Pod]] = _
@Mock
private var kubernetesClient: KubernetesClient = _
@Mock
private var podOperations: PODS = _
@Mock
private var schedulerBackend: KubernetesClusterSchedulerBackend = _
private var snapshotsStore: DeterministicExecutorPodsSnapshotsStore = _
private var eventHandlerUnderTest: ExecutorPodsLifecycleManager = _
before {
MockitoAnnotations.openMocks(this).close()
snapshotsStore = new DeterministicExecutorPodsSnapshotsStore()
namedExecutorPods = mutable.Map.empty[String, PodResource[Pod]]
when(schedulerBackend.getExecutorsWithRegistrationTs()).thenReturn(Map.empty[String, Long])
when(kubernetesClient.pods()).thenReturn(podOperations)
when(podOperations.withName(any(classOf[String]))).thenAnswer(namedPodsAnswer())
eventHandlerUnderTest = new ExecutorPodsLifecycleManager(
new SparkConf(),
kubernetesClient,
snapshotsStore)
eventHandlerUnderTest.start(schedulerBackend)
}
test("When an executor reaches error states immediately, remove from the scheduler backend.") {
val failedPod = failedExecutorWithoutDeletion(1)
snapshotsStore.updatePod(failedPod)
snapshotsStore.notifySubscribers()
val msg = exitReasonMessage(1, failedPod, 1)
val expectedLossReason = ExecutorExited(1, exitCausedByApp = true, msg)
verify(schedulerBackend).doRemoveExecutor("1", expectedLossReason)
verify(namedExecutorPods(failedPod.getMetadata.getName)).delete()
}
test("Don't remove executors twice from Spark but remove from K8s repeatedly.") {
val failedPod = failedExecutorWithoutDeletion(1)
snapshotsStore.updatePod(failedPod)
snapshotsStore.notifySubscribers()
snapshotsStore.updatePod(failedPod)
snapshotsStore.notifySubscribers()
val msg = exitReasonMessage(1, failedPod, 1)
val expectedLossReason = ExecutorExited(1, exitCausedByApp = true, msg)
verify(schedulerBackend, times(1)).doRemoveExecutor("1", expectedLossReason)
verify(namedExecutorPods(failedPod.getMetadata.getName), times(2)).delete()
}
test("When the scheduler backend lists executor ids that aren't present in the cluster," +
" remove those executors from Spark.") {
when(schedulerBackend.getExecutorsWithRegistrationTs()).thenReturn(Map("1" -> 7L))
val missingPodDelta =
eventHandlerUnderTest.conf.get(Config.KUBERNETES_EXECUTOR_MISSING_POD_DETECT_DELTA)
snapshotsStore.clock.advance(missingPodDelta + 7)
snapshotsStore.replaceSnapshot(Seq.empty[Pod])
snapshotsStore.notifySubscribers()
verify(schedulerBackend, never()).doRemoveExecutor(any(), any())
// 1 more millisecond and the accepted delta is over so the missing POD will be detected
snapshotsStore.clock.advance(1)
snapshotsStore.replaceSnapshot(Seq.empty[Pod])
snapshotsStore.notifySubscribers()
val msg = "The executor with ID 1 (registered at 7 ms) was not found in the cluster at " +
"the polling time (30008 ms) which is after the accepted detect delta time (30000 ms) " +
"configured by `spark.kubernetes.executor.missingPodDetectDelta`. The executor may have " +
"been deleted but the driver missed the deletion event. Marking this executor as failed."
val expectedLossReason = ExecutorExited(-1, exitCausedByApp = false, msg)
verify(schedulerBackend).doRemoveExecutor("1", expectedLossReason)
}
test("Keep executor pods in k8s if configured.") {
val failedPod = failedExecutorWithoutDeletion(1)
eventHandlerUnderTest.conf.set(Config.KUBERNETES_DELETE_EXECUTORS, false)
snapshotsStore.updatePod(failedPod)
snapshotsStore.notifySubscribers()
val msg = exitReasonMessage(1, failedPod, 1)
val expectedLossReason = ExecutorExited(1, exitCausedByApp = true, msg)
verify(schedulerBackend).doRemoveExecutor("1", expectedLossReason)
verify(namedExecutorPods(failedPod.getMetadata.getName), never()).delete()
val podCaptor = ArgumentCaptor.forClass(classOf[Pod])
verify(namedExecutorPods(failedPod.getMetadata.getName)).patch(podCaptor.capture())
val pod = podCaptor.getValue()
assert(pod.getMetadata().getLabels().get(SPARK_EXECUTOR_INACTIVE_LABEL) === "true")
}
private def exitReasonMessage(execId: Int, failedPod: Pod, exitCode: Int): String = {
val reason = Option(failedPod.getStatus.getReason)
val message = Option(failedPod.getStatus.getMessage)
val explained = ExecutorPodsLifecycleManager.describeExitCode(exitCode)
val exitMsg = s"The executor with id $execId exited with exit code $explained."
val reasonStr = reason.map(r => s"The API gave the following brief reason: ${r}")
val msgStr = message.map(m => s"The API gave the following message: ${m}")
s"""
|${exitMsg}
|${reasonStr.getOrElse("")}
|${msgStr.getOrElse("")}
|
|The API gave the following container statuses:
|
|${containersDescription(failedPod)}
""".stripMargin
}
private def namedPodsAnswer(): Answer[PodResource[Pod]] =
(invocation: InvocationOnMock) => {
val podName: String = invocation.getArgument(0)
namedExecutorPods.getOrElseUpdate(
podName, mock(classOf[PodResource[Pod]]))
}
}
|
shaneknapp/spark
|
resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsLifecycleManagerSuite.scala
|
Scala
|
apache-2.0
| 7,031 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.index
import org.apache.hadoop.io.Text
import org.joda.time.DateTime
import org.locationtech.geomesa.utils.geohash.GeoHash
import org.opengis.feature.simple.SimpleFeature
import scala.util.hashing.MurmurHash3
trait TextFormatter {
def format(gh: GeoHash, dt: DateTime, sf: SimpleFeature): Text = new Text(formatString(gh, dt, sf))
def formatString(gh: GeoHash, dt: DateTime, sf: SimpleFeature): String
def numBits: Int
}
object TextFormatter {
implicit def string2Text(s: String): Text = new Text(s)
}
/**
* These GeoHash strings are padded to 7 characters with a period. This is
* done for a few reasons:
* 1. with the addition of GeoHash decomposition for non-point data, some
* of the GeoHashes will be at fewer than 35 bits (but always on 5-bit
* boundaries);
* 2. a period appears earlier in the ASCII chart than do any of the alpha-
* numeric characters
*
* @param offset how many characters (from the left) to skip
* @param numBits how many characters to use
*/
case class GeoHashTextFormatter(offset: Int, numBits: Int) extends TextFormatter {
def formatString(gh: GeoHash, dt: DateTime, sf: SimpleFeature) = {
val padded = gh.hash.padTo(7, ".").mkString
padded.substring(offset, offset + numBits)
}
}
// note: this will fail if you have an entry lacking a valid date
case class DateTextFormatter(f: String) extends TextFormatter {
val numBits = f.length
val formatter = org.joda.time.format.DateTimeFormat.forPattern(f)
def formatString(gh: GeoHash, dt: DateTime, sf: SimpleFeature) =
formatter.print(dt)
}
/**
* Responsible for assigning a shard number (partition) to the given
* entry based on a hash of the feature ID.
*
* MurmurHash3 was chosen, because 1) it is part of the standard
* Scala libraries; 2) it claims to do a reasonable job spreading
* hash values around. See http://code.google.com/p/smhasher/wiki/MurmurHash3
*
* Assumptions:
* <ul>
* <li>IDs that are null will be hashed based on the string version
* of the feature. (It should not be possible to have a null
* ID, or at least not easy: Both DataUtilities.createFeature
* and SimpleFeatureBuilder.buildFeature will automatically
* generate an ID if you don't provide a non-null ID of your
* own)
* <li>We will need code to cover the case where an ID changes,
* because it may mean moving an entry to a different tablet-
* server. (How likely is this to happen?)</li>
* </ul>
*
* @param numPartitions "%99#r" will mean: create shards from 0..99
*/
case class PartitionTextFormatter(numPartitions: Int) extends TextFormatter {
val numBits: Int = numPartitions.toString.length
val fmt = ("%0" + numBits + "d").format(_: Int)
def getIdHashPartition(entry: SimpleFeature): Int = {
val toHash = entry.getID match {
case null => entry.getAttributes.toArray
case id => Array(id)
}
Math.abs(MurmurHash3.arrayHash(toHash) % (numPartitions + 1))
}
def formatString(gh: GeoHash, dt: DateTime, sf: SimpleFeature) = fmt(getIdHashPartition(sf))
}
case class ConstantTextFormatter(constStr: String) extends TextFormatter {
val constText = new Text(constStr)
def formatString(gh: GeoHash, dt: DateTime, sf: SimpleFeature) = constStr
def numBits = constStr.length
}
case class IdFormatter(maxLength: Int) extends TextFormatter {
def formatString(gh: GeoHash, dt: DateTime, sf: SimpleFeature) = sf.getID.padTo(maxLength, "_").mkString
def numBits: Int = maxLength
}
case class CompositeTextFormatter(lf: Seq[TextFormatter], sep: String) extends TextFormatter {
val numBits = lf.map(_.numBits).sum
def formatString(gh: GeoHash, dt: DateTime, sf: SimpleFeature) = lf.map { _.formatString(gh, dt, sf) }.mkString(sep)
}
|
jwkessi/geomesa
|
geomesa-core/src/main/scala/org/locationtech/geomesa/core/index/Formatters.scala
|
Scala
|
apache-2.0
| 4,429 |
package flowlib
import java.util.concurrent.ForkJoinPool
import Site._
import DefaultSite._
trait DefaultSite extends Site with SiteForkJoin with SiteFailFast with SiteLogAll
object DefaultSite {
lazy val forkJoin = new ForkJoinPool
trait SiteForkJoin extends Monitored { this: Site with SiteForkJoin =>
def executor: ForkJoinPool
def backlog = executor.getActiveThreadCount
def quota = executor.getParallelism
def waiters = executor.getQueuedTaskCount.toInt
}
def apply(printer: String => Unit = println _, hook: Throwable => Unit = _ => ()): DefaultSite = new DefaultSite {
def executor = forkJoin
def log(m: String) = printer(m)
def shutdown(e: Throwable) = hook(e)
}
}
|
arnolddevos/FlowLib
|
src/jvm/scala/flowlib/DefaultSite.scala
|
Scala
|
lgpl-2.1
| 717 |
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scray.loader.configparser
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.io.IOUtils
// scalastyle:off underscore.import
import org.parboiled2._
// scalastyle:on underscore.import
import scala.util.{ Failure, Try }
import scray.loader.{ DBMSUndefinedException, UnsupportedMappingTypeException }
import scray.loader.configuration.{ QueryspaceIndexstore, QueryspaceOption, QueryspaceRowstore }
import scray.querying.description.TableIdentifier
/**
* Parse properties for a user model with Queryspaces.
* Grammar ::= (STRING ":" STRING ":" ("LDAP" | "PLAIN") ":" ID? ("," ID)* "\\n")*
* Meaning that there may one or more lines containing username and password pairs
* along with the information about the directory identification and a comma-separated
* list of queryspace-names.
*/
// scalastyle:off method.name
class ScrayUserConfigurationParser (override val input: ParserInput, val config: ScrayConfiguration)
extends ScrayGenericParsingRules with LazyLogging {
override implicit def wspStr(s: String): Rule0 = rule { str(s) ~ zeroOrMore(SingleLineWhitespaceChars) }
/**
* read until all input has been consumed
*/
def InputLine: Rule1[ScrayUsersConfiguration] = rule { UserConfigModel ~ EOI }
def UserConfigModel: Rule1[ScrayUsersConfiguration] = rule { optional(LineBreak) ~ oneOrMore(UserLine) ~> { ScrayUsersConfiguration }}
def UserLine: Rule1[ScrayAuthConfiguration] = rule { QuotedSingleString ~ COLON ~ AuthMethod ~ COLON ~ optional(QuotedSingleString) ~ COLON ~
zeroOrMore(IdentifierSingle).separatedBy(",") ~ LineBreak ~> { (user: String, method: ScrayAuthMethod.Value, pwd: Option[String], qs: Seq[String]) =>
ScrayAuthConfiguration(user, pwd.getOrElse(""), method, qs.toSet)
}}
def AuthMethod: Rule1[ScrayAuthMethod.Value] = rule { IdentifierSingle ~> { (id: String) => id.toUpperCase() match {
case "LDAP" => ScrayAuthMethod.LDAP
case "PLAIN" => ScrayAuthMethod.Plain
case _ => ScrayAuthMethod.Plain
}}}
}
// scalastyle:on method.name
object ScrayUserConfigurationParser extends LazyLogging {
private def handleWithErrorLogging(input: String, config: ScrayConfiguration, logError: Boolean = true): Try[ScrayUsersConfiguration] = {
val parser = new ScrayUserConfigurationParser(input, config)
val parseResult = parser.InputLine.run()
logError match {
case true => parseResult.recoverWith { case e: ParseError =>
val msg = parser.formatError(e)
logger.error(s"Parse error parsing user-configuration file. Message from parser is $msg", e)
Failure(e)
}
case false => parseResult
}
}
def parse(text: String, config: ScrayConfiguration, logError: Boolean = true): Try[ScrayUsersConfiguration] =
handleWithErrorLogging(text, config, logError)
def parseResource(resource: String, config: ScrayConfiguration, logError: Boolean = true): Try[ScrayUsersConfiguration] = {
val text = IOUtils.toString(this.getClass().getResourceAsStream(resource), "UTF-8")
handleWithErrorLogging(text, config, logError)
}
}
|
scray/scray
|
scray-loader/src/main/scala/scray/loader/configparser/ScrayUserConfigurationParser.scala
|
Scala
|
apache-2.0
| 3,757 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
class InvalidRequestException(val message: String) extends RuntimeException(message) {
def this() = this("")
}
|
unix1986/universe
|
tool/kafka-0.8.1.1-src/core/src/main/scala/kafka/network/InvalidRequestException.scala
|
Scala
|
bsd-2-clause
| 946 |
package org.scanamo
import cats.implicits._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType._
import org.scanamo.query._
import org.scanamo.syntax._
import org.scanamo.fixtures._
import org.scanamo.generic.auto._
import org.scanamo.ops.ScanamoOps
class ScanamoAsyncTest extends AnyFunSpec with Matchers with BeforeAndAfterAll with ScalaFutures {
implicit val defaultPatience: PatienceConfig =
PatienceConfig(timeout = Span(2, Seconds), interval = Span(15, Millis))
import scala.concurrent.ExecutionContext.Implicits.global
val client = LocalDynamoDB.client()
val scanamo = ScanamoAsync(client)
override protected def afterAll(): Unit = {
client.close()
super.afterAll()
}
it("should put asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmers = Table[Farmer](t)
val result = for {
_ <- farmers.put(Farmer("McDonald", 156L, Farm(List("sheep", "cow"))))
f <- farmers.get("name" === "McDonald")
} yield f
scanamo.exec(result).futureValue should equal(
Some(Right(Farmer("McDonald", 156, Farm(List("sheep", "cow")))))
)
}
}
it("should get asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmers = Table[Farmer](t)
val result = for {
_ <- farmers.put(Farmer("Maggot", 75L, Farm(List("dog"))))
r1 <- farmers.get(UniqueKey(KeyEquals("name", "Maggot")))
r2 <- farmers.get("name" === "Maggot")
} yield (r1, r1 == r2)
scanamo.exec(result).futureValue should equal(
(Some(Right(Farmer("Maggot", 75, Farm(List("dog"))))), true)
)
}
LocalDynamoDB.usingRandomTable(client)("name" -> S, "number" -> N) { t =>
val engines = Table[Engine](t)
val result = for {
_ <- engines.put(Engine("Thomas", 1))
e <- engines.get("name" === "Thomas" and "number" === 1)
} yield e
scanamo.exec(result).futureValue should equal(Some(Right(Engine("Thomas", 1))))
}
}
it("should get consistently asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val cities = Table[City](t)
val result = for {
_ <- cities.put(City("Nashville", "US"))
c <- cities.consistently.get("name" === "Nashville")
} yield c
scanamo.exec(result).futureValue should equal(Some(Right(City("Nashville", "US"))))
}
}
it("should delete asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmers = Table[Farmer](t)
scanamo.exec {
for {
_ <- farmers.put(Farmer("McGregor", 62L, Farm(List("rabbit"))))
_ <- farmers.delete("name" === "McGregor")
f <- farmers.get("name" === "McGregor")
} yield f
}.futureValue should equal(None)
}
}
it("should deleteAll asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmers = Table[Farmer](t)
val dataSet = Set(
Farmer("Patty", 200L, Farm(List("unicorn"))),
Farmer("Ted", 40L, Farm(List("T-Rex"))),
Farmer("Jack", 2L, Farm(List("velociraptor")))
)
val ops = for {
_ <- farmers.putAll(dataSet)
_ <- farmers.deleteAll("name" in dataSet.map(_.name))
fs <- farmers.scan()
} yield fs
scanamo.exec(ops).futureValue should equal(List.empty)
}
}
it("should update asynchronously") {
LocalDynamoDB.usingRandomTable(client)("location" -> S) { t =>
val forecasts = Table[Forecast](t)
val ops = for {
_ <- forecasts.put(Forecast("London", "Rain", None))
_ <- forecasts.update("location" === "London", set("weather", "Sun"))
fs <- forecasts.scan()
} yield fs
scanamo.exec(ops).futureValue should equal(List(Right(Forecast("London", "Sun", None))))
}
}
it("should update asynchronously if a condition holds") {
LocalDynamoDB.usingRandomTable(client)("location" -> S) { t =>
val forecasts = Table[Forecast](t)
val ops = for {
_ <- forecasts.putAll(Set(Forecast("London", "Rain", None), Forecast("Birmingham", "Sun", None)))
_ <- forecasts.when("weather" === "Rain").update("location" === "London", set("equipment", Some("umbrella")))
_ <-
forecasts
.when("weather" === "Rain")
.update("location" === "Birmingham", set("equipment", Some("umbrella")))
results <- forecasts.scan()
} yield results
scanamo.exec(ops).futureValue should equal(
List(Right(Forecast("London", "Rain", Some("umbrella"))), Right(Forecast("Birmingham", "Sun", None)))
)
}
}
it("should scan asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val bears = Table[Bear](t)
val ops = for {
_ <- bears.put(Bear("Pooh", "honey", None))
_ <- bears.put(Bear("Yogi", "picnic baskets", None))
bs <- bears.scan()
} yield bs
scanamo.exec(ops).futureValue should equal(
List(Right(Bear("Pooh", "honey", None)), Right(Bear("Yogi", "picnic baskets", None)))
)
}
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val lemmings = Table[Lemming](t)
val ops = for {
_ <- lemmings.putAll(List.fill(100)(Lemming(util.Random.nextString(500), util.Random.nextString(5000))).toSet)
ls <- lemmings.scan()
} yield ls
scanamo.exec(ops).futureValue.size should equal(100)
}
}
it("scans with a limit asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val bears = Table[Bear](t)
val ops = for {
_ <- bears.put(Bear("Pooh", "honey", None))
_ <- bears.put(Bear("Yogi", "picnic baskets", None))
bs <- bears.limit(1).scan()
} yield bs
scanamo.exec(ops).futureValue should equal(List(Right(Bear("Pooh", "honey", None))))
}
}
it("scanIndexWithLimit") {
LocalDynamoDB.withRandomTableWithSecondaryIndex(client)("name" -> S)("alias" -> S) { (t, i) =>
val bears = Table[Bear](t)
val ops = for {
_ <- bears.put(Bear("Pooh", "honey", Some("Winnie")))
_ <- bears.put(Bear("Yogi", "picnic baskets", None))
_ <- bears.put(Bear("Graham", "quinoa", Some("Guardianista")))
bs <- bears.index(i).limit(1).scan()
} yield bs
scanamo.exec(ops).futureValue should equal(
List(Right(Bear("Graham", "quinoa", Some("Guardianista"))))
)
}
}
it("Paginate scanIndexWithLimit") {
LocalDynamoDB.withRandomTableWithSecondaryIndex(client)("name" -> S)("alias" -> S) { (t, i) =>
val bears = Table[Bear](t)
val ops = for {
_ <- bears.put(Bear("Pooh", "honey", Some("Winnie")))
_ <- bears.put(Bear("Yogi", "picnic baskets", Some("Kanga")))
_ <- bears.put(Bear("Graham", "quinoa", Some("Guardianista")))
bs <- for {
_ <- bears.index(i).limit(1).scan()
res2 <- bears.index(i).limit(1).from("name" === "Graham" and "alias" === "Guardianista").scan()
res3 <- bears.index(i).limit(1).from("name" === "Yogi" and "alias" === "Kanga").scan()
} yield res2 ::: res3
} yield bs
scanamo.exec(ops).futureValue should equal(
List(Right(Bear("Yogi", "picnic baskets", Some("Kanga"))), Right(Bear("Pooh", "honey", Some("Winnie"))))
)
}
}
it("should stream full table scan") {
import cats.{ ~>, Apply, Monad, MonoidK }
import cats.instances.future._
import scala.concurrent.Future
type SFuture[A] = Future[Stream[A]]
implicit val applicative: MonoidK[SFuture] with Monad[SFuture] = new MonoidK[SFuture] with Monad[SFuture] {
def combineK[A](x: SFuture[A], y: SFuture[A]): SFuture[A] = Apply[Future].map2(x, y)(_ ++ _)
def empty[A]: SFuture[A] = Future.successful(Stream.empty)
def flatMap[A, B](fa: SFuture[A])(f: A => SFuture[B]): SFuture[B] =
fa flatMap { as =>
Future.traverse(as)(f)
} map (_.flatten)
def tailRecM[A, B](a: A)(f: A => SFuture[Either[A, B]]): SFuture[B] =
f(a) flatMap { eas =>
Future.traverse(eas) {
case Left(a) => tailRecM(a)(f)
case Right(b) => Future.successful(Stream(b))
} map (_.flatten)
}
def pure[A](x: A): SFuture[A] = Future.successful(Stream(x))
}
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val list = List(
Item("item #1"),
Item("item #2"),
Item("item #3"),
Item("item #4"),
Item("item #5"),
Item("item #6")
)
val expected = list.map(i => List(Right(i)))
val items = Table[Item](t)
val ops = for {
_ <- items.putAll(list.toSet).toFreeT[SFuture]
list <- items.scanPaginatedM[SFuture](1)
} yield list
val f = new (Future ~> SFuture) {
override def apply[A](a: Future[A]): SFuture[A] = a.map(Stream(_))
}
scanamo.execT(f)(ops).futureValue should contain theSameElementsAs expected
}
}
it("should query asynchronously") {
LocalDynamoDB.usingRandomTable(client)("species" -> S, "number" -> N) { t =>
val animals = Table[Animal](t)
val ops = for {
_ <- animals.put(Animal("Wolf", 1))
_ <- (1 to 3).toList.traverse(i => animals.put(Animal("Pig", i)))
r1 <- animals.query("species" === "Pig")
r2 <- animals.query("species" === "Pig" and "number" < 3)
r3 <- animals.query("species" === "Pig" and "number" > 1)
r4 <- animals.query("species" === "Pig" and "number" <= 2)
r5 <- animals.query("species" === "Pig" and "number" >= 2)
} yield (r1, r2, r3, r4, r5)
scanamo.exec(ops).futureValue should equal(
(
List(Right(Animal("Pig", 1)), Right(Animal("Pig", 2)), Right(Animal("Pig", 3))),
List(Right(Animal("Pig", 1)), Right(Animal("Pig", 2))),
List(Right(Animal("Pig", 2)), Right(Animal("Pig", 3))),
List(Right(Animal("Pig", 1)), Right(Animal("Pig", 2))),
List(Right(Animal("Pig", 2)), Right(Animal("Pig", 3)))
)
)
}
LocalDynamoDB.usingRandomTable(client)("mode" -> S, "line" -> S) { t =>
val transports = Table[Transport](t)
val ops = for {
_ <- transports.putAll(
Set(
Transport("Underground", "Circle", "Yellow"),
Transport("Underground", "Metropolitan", "Purple"),
Transport("Underground", "Central", "Red")
)
)
ts <- transports.query("mode" === "Underground" and ("line" beginsWith "C"))
} yield ts
scanamo.exec(ops).futureValue should equal(
List(Right(Transport("Underground", "Central", "Red")), Right(Transport("Underground", "Circle", "Yellow")))
)
}
}
it("queries with a limit asynchronously") {
LocalDynamoDB.withRandomTable(client)("mode" -> S, "line" -> S) { t =>
val transports = Table[Transport](t)
val result = for {
_ <- transports.putAll(
Set(
Transport("Underground", "Circle", "Yellow"),
Transport("Underground", "Metropolitan", "Purple"),
Transport("Underground", "Central", "Red")
)
)
rs <- transports.limit(1).query("mode" === "Underground" and ("line" beginsWith "C"))
} yield rs
scanamo.exec(result).futureValue should equal(List(Right(Transport("Underground", "Central", "Red"))))
}
}
it("queries an index with a limit asynchronously") {
LocalDynamoDB.withRandomTableWithSecondaryIndex(client)("mode" -> S, "line" -> S)("mode" -> S, "colour" -> S) {
(t, i) =>
val transports = Table[Transport](t)
val result = for {
_ <- transports.putAll(
Set(
Transport("Underground", "Circle", "Yellow"),
Transport("Underground", "Metropolitan", "Magenta"),
Transport("Underground", "Central", "Red"),
Transport("Underground", "Picadilly", "Blue"),
Transport("Underground", "Northern", "Black")
)
)
rs <-
transports
.index(i)
.limit(1)
.query(
"mode" === "Underground" and ("colour" beginsWith "Bl")
)
} yield rs
scanamo.exec(result).futureValue should equal(
List(Right(Transport("Underground", "Northern", "Black")))
)
}
}
it("queries an index asynchronously with `between` sort-key condition") {
def deletaAllStations(stationTable: Table[Station], stations: Set[Station]) =
stationTable.deleteAll(
UniqueKeys(MultipleKeyList(("mode", "name"), stations.map(station => (station.mode, station.name))))
)
val LiverpoolStreet = Station("Underground", "Liverpool Street", 1)
val CamdenTown = Station("Underground", "Camden Town", 2)
val GoldersGreen = Station("Underground", "Golders Green", 3)
val Hainault = Station("Underground", "Hainault", 4)
LocalDynamoDB.withRandomTableWithSecondaryIndex(client)("mode" -> S, "name" -> S)("mode" -> S, "zone" -> N) {
(t, i) =>
val stationTable = Table[Station](t)
val stations = Set(LiverpoolStreet, CamdenTown, GoldersGreen, Hainault)
val ops = for {
_ <- stationTable.putAll(stations)
ts1 <- stationTable.index(i).query("mode" === "Underground" and ("zone" between 2 and 4))
ts2 <- for { _ <- deletaAllStations(stationTable, stations); ts <- stationTable.scan() } yield ts
_ <- stationTable.putAll(Set(LiverpoolStreet))
ts3 <- stationTable.index(i).query("mode" === "Underground" and ("zone" between 2 and 4))
ts4 <- for { _ <- deletaAllStations(stationTable, stations); ts <- stationTable.scan() } yield ts
_ <- stationTable.putAll(Set(CamdenTown))
ts5 <- stationTable.index(i).query("mode" === "Underground" and ("zone" between 1 and 1))
} yield (ts1, ts2, ts3, ts4, ts5)
scanamo.exec(ops).futureValue should equal(
(
List(Right(CamdenTown), Right(GoldersGreen), Right(Hainault)),
List.empty,
List.empty,
List.empty,
List.empty
)
)
}
}
it("queries for items that are missing an attribute") {
LocalDynamoDB.usingRandomTable(client)("firstName" -> S, "surname" -> S) { t =>
val farmersTable = Table[Worker](t)
val farmerOps = for {
_ <- farmersTable.put(Worker("Fred", "Perry", None))
_ <- farmersTable.put(Worker("Fred", "McDonald", Some(54)))
farmerWithNoAge <- farmersTable.filter(attributeNotExists("age")).query("firstName" === "Fred")
} yield farmerWithNoAge
scanamo.exec(farmerOps).futureValue should equal(
List(Right(Worker("Fred", "Perry", None)))
)
}
}
it("should put multiple items asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val rabbits = Table[Rabbit](t)
val result = for {
_ <- rabbits.putAll(List.fill(100)(Rabbit(util.Random.nextString(500))).toSet)
rs <- rabbits.scan()
} yield rs
scanamo.exec(result).futureValue.size should equal(100)
}
}
it("should get multiple items asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmers = Table[Farmer](t)
scanamo
.exec(for {
_ <- farmers.putAll(
Set(
Farmer("Boggis", 43L, Farm(List("chicken"))),
Farmer("Bunce", 52L, Farm(List("goose"))),
Farmer("Bean", 55L, Farm(List("turkey")))
)
)
fs1 <- farmers.getAll(UniqueKeys(KeyList("name", Set("Boggis", "Bean"))))
fs2 <- farmers.getAll("name" in Set("Boggis", "Bean"))
} yield (fs1, fs2))
.futureValue should equal(
(
Set(Right(Farmer("Boggis", 43, Farm(List("chicken")))), Right(Farmer("Bean", 55, Farm(List("turkey"))))),
Set(Right(Farmer("Boggis", 43, Farm(List("chicken")))), Right(Farmer("Bean", 55, Farm(List("turkey")))))
)
)
}
LocalDynamoDB.usingRandomTable(client)("actor" -> S, "regeneration" -> N) { t =>
val doctors = Table[Doctor](t)
scanamo
.exec(for {
_ <- doctors.putAll(Set(Doctor("McCoy", 9), Doctor("Ecclestone", 10), Doctor("Ecclestone", 11)))
ds <- doctors.getAll(("actor" -> "regeneration") =*= Set("McCoy" -> 9, "Ecclestone" -> 11))
} yield ds)
.futureValue should equal(Set(Right(Doctor("McCoy", 9)), Right(Doctor("Ecclestone", 11))))
}
}
it("should get multiple items asynchronously (automatically handling batching)") {
LocalDynamoDB.usingRandomTable(client)("id" -> N) { t =>
val farms = (1 to 101).map(i => Factory(i, s"Farm #$i")).toSet
val farmsTable = Table[Factory](t)
scanamo
.exec(for {
_ <- farmsTable.putAll(farms)
fs <- farmsTable.getAll(UniqueKeys(KeyList("id", farms.map(_.id))))
} yield fs)
.futureValue should equal(farms.map(Right(_)))
}
}
it("should get multiple items consistently asynchronously (automatically handling batching)") {
LocalDynamoDB.usingRandomTable(client)("id" -> N) { t =>
val farms = (1 to 101).map(i => Factory(i, s"Farm #$i")).toSet
val farmsTable = Table[Factory](t)
scanamo
.exec(for {
_ <- farmsTable.putAll(farms)
fs <- farmsTable.consistently.getAll(UniqueKeys(KeyList("id", farms.map(_.id))))
} yield fs)
.futureValue should equal(farms.map(Right(_)))
}
}
it("should return old item after put asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmersTable = Table[Farmer](t)
val farmerOps = for {
_ <- farmersTable.put(Farmer("McDonald", 156L, Farm(List("sheep", "cow"))))
result <- farmersTable.putAndReturn(PutReturn.OldValue)(Farmer("McDonald", 50L, Farm(List("chicken", "cow"))))
} yield result
scanamo.exec(farmerOps).futureValue should equal(
Some(Right(Farmer("McDonald", 156L, Farm(List("sheep", "cow")))))
)
}
}
it("should return None when putting a new item asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmersTable = Table[Farmer](t)
val farmerOps = for {
result <- farmersTable.putAndReturn(PutReturn.OldValue)(Farmer("McDonald", 156L, Farm(List("sheep", "cow"))))
} yield result
scanamo.exec(farmerOps).futureValue should equal(
None
)
}
}
it("conditionally put asynchronously") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmersTable = Table[Farmer](t)
val farmerOps = for {
_ <- farmersTable.put(Farmer("McDonald", 156L, Farm(List("sheep", "cow"))))
_ <- farmersTable.when("age" === 156L).put(Farmer("McDonald", 156L, Farm(List("sheep", "chicken"))))
_ <- farmersTable.when("age" === 15L).put(Farmer("McDonald", 156L, Farm(List("gnu", "chicken"))))
farmerWithNewStock <- farmersTable.get("name" === "McDonald")
} yield farmerWithNewStock
scanamo.exec(farmerOps).futureValue should equal(
Some(Right(Farmer("McDonald", 156, Farm(List("sheep", "chicken")))))
)
}
}
it("conditionally put asynchronously with `between` condition") {
LocalDynamoDB.usingRandomTable(client)("name" -> S) { t =>
val farmersTable = Table[Farmer](t)
val farmerOps = for {
_ <- farmersTable.put(Farmer("McDonald", 55, Farm(List("sheep", "cow"))))
_ <- farmersTable.put(Farmer("Butch", 57, Farm(List("cattle"))))
_ <- farmersTable.put(Farmer("Wade", 58, Farm(List("chicken", "sheep"))))
_ <- farmersTable.when("age" between 56 and 57).put(Farmer("Butch", 57, Farm(List("chicken"))))
_ <- farmersTable.when("age" between 58 and 59).put(Farmer("Butch", 57, Farm(List("dinosaur"))))
farmerButch <- farmersTable.get("name" === "Butch")
} yield farmerButch
scanamo.exec(farmerOps).futureValue should equal(
Some(Right(Farmer("Butch", 57, Farm(List("chicken")))))
)
}
}
it("conditionally delete asynchronously") {
LocalDynamoDB.usingRandomTable(client)("number" -> N) { t =>
val gremlinsTable = Table[Gremlin](t)
val ops = for {
_ <- gremlinsTable.putAll(Set(Gremlin(1, false), Gremlin(2, true)))
_ <- gremlinsTable.when("wet" === true).delete("number" === 1)
_ <- gremlinsTable.when("wet" === true).delete("number" === 2)
remainingGremlins <- gremlinsTable.scan()
} yield remainingGremlins
scanamo.exec(ops).futureValue should equal(
List(Right(Gremlin(1, false)))
)
}
}
it("transact table write (update) items") {
LocalDynamoDB.usingRandomTable(client)("location" -> S) { t =>
val forecastTable = Table[Forecast](t)
val ops: ScanamoOps[List[Either[DynamoReadError, Forecast]]] = for {
_ <- forecastTable.putAll(
Set(Forecast("London", "Sun", None), Forecast("Amsterdam", "Fog", None), Forecast("Manchester", "Rain", None))
)
_ <- forecastTable.transactUpdateAll(
List(
UniqueKey(KeyEquals("location", "London")) -> set("weather", "Rain"),
UniqueKey(KeyEquals("location", "Amsterdam")) -> set("weather", "Cloud")
)
)
items <- forecastTable.scan()
} yield items
scanamo.exec(ops).futureValue should equal(
List(
Right(Forecast("Amsterdam", "Cloud", None)),
Right(Forecast("London", "Rain", None)),
Right(Forecast("Manchester", "Rain", None))
)
)
}
}
it("transact write (update) items in multiple tables") {
LocalDynamoDB.usingRandomTable(client)("number" -> N) { t1 =>
LocalDynamoDB.usingRandomTable(client)("location" -> S) { t2 =>
val gremlinTable = Table[Gremlin](t1)
val forecastTable = Table[Forecast](t2)
val ops = for {
_ <- gremlinTable.putAll(Set(Gremlin(1, wet = false), Gremlin(2, wet = true)))
_ <- forecastTable.putAll(Set(Forecast("London", "Sun", None), Forecast("Amsterdam", "Fog", None)))
_ <- forecastTable.transactUpdateAll(
List(
UniqueKey(KeyEquals("location", "London")) -> set("weather", "Rain")
)
)
_ <- gremlinTable.transactUpdateAll(
List(
UniqueKey(KeyEquals("number", 2)) -> set("wet", true)
)
)
gremlins <- gremlinTable.scan()
forecasts <- forecastTable.scan()
} yield (gremlins, forecasts)
scanamo.exec(ops).futureValue should equal(
(
List(Right(Gremlin(2, wet = true)), Right(Gremlin(1, wet = false))),
List(Right(Forecast("Amsterdam", "Fog", None)), Right(Forecast("London", "Rain", None)))
)
)
}
}
}
it("transact table write (delete) items") {
LocalDynamoDB.usingRandomTable(client)("location" -> S) { t =>
val forecastTable = Table[Forecast](t)
val ops: ScanamoOps[List[Either[DynamoReadError, Forecast]]] = for {
_ <- forecastTable.putAll(
Set(Forecast("London", "Sun", None), Forecast("Amsterdam", "Fog", None), Forecast("Manchester", "Rain", None))
)
_ <- forecastTable.transactDeleteAll(
List(
UniqueKey(KeyEquals("location", "London")),
UniqueKey(KeyEquals("location", "Amsterdam"))
)
)
items <- forecastTable.scan()
} yield items
scanamo.exec(ops).futureValue should equal(
List(Right(Forecast("Manchester", "Rain", None)))
)
}
}
it("transact write (delete) items in multiple tables") {
LocalDynamoDB.usingRandomTable(client)("number" -> N) { t1 =>
LocalDynamoDB.usingRandomTable(client)("location" -> S) { t2 =>
val gremlinTable = Table[Gremlin](t1)
val forecastTable = Table[Forecast](t2)
val ops = for {
_ <- gremlinTable.putAll(Set(Gremlin(1, wet = false), Gremlin(2, wet = true)))
_ <- forecastTable.putAll(Set(Forecast("London", "Sun", None), Forecast("Amsterdam", "Fog", None)))
_ <- forecastTable.transactDeleteAll(
List(
UniqueKey(KeyEquals("location", "London"))
)
)
_ <- gremlinTable.transactDeleteAll(
List(
UniqueKey(KeyEquals("number", 2))
)
)
gremlins <- gremlinTable.scan()
forecasts <- forecastTable.scan()
} yield (gremlins, forecasts)
scanamo.exec(ops).futureValue should equal(
(List(Right(Gremlin(1, wet = false))), List(Right(Forecast("Amsterdam", "Fog", None))))
)
}
}
}
}
|
scanamo/scanamo
|
scanamo/src/test/scala-2.x/org/scanamo/ScanamoAsyncTest.scala
|
Scala
|
apache-2.0
| 25,321 |
/**
* Copyright (C) 2014 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xml
import javax.xml.namespace.{QName ⇒ JQName}
// Helpers for javax.xml.namespace.QName
object JXQName {
def apply(local: String) = new JQName(local)
def apply(uri: String, local: String) = new JQName(uri, local)
def apply(uriLocal: (String, String)) = new JQName(uriLocal._1, uriLocal._2)
def unapply(c: javax.xml.namespace.QName) = Some(c.getNamespaceURI, c.getLocalPart)
implicit def tupleToJQName(tuple: (String, String)) = JXQName(tuple._1, tuple._2)
implicit def stringToJQname(s: String) = JXQName(s)
}
|
wesley1001/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xml/JXQName.scala
|
Scala
|
lgpl-2.1
| 1,214 |
/*
* Copyright (C) 2009 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo
package staging
import org.junit.Test
import org.junit.Assert._
import net.kogics.kojo.util._
class ColorTest extends StagingTestBase {
/* Testing manifest
*
* def grayColors(grayMax: Int) = ColorMaker(GRAY(grayMax))
* def grayColorsWithAlpha(grayMax: Int, alphaMax: Int) =
* def rgbColors(rMax: Int, gMax: Int, bMax: Int) =
* def rgbColorsWithAlpha(rMax: Int, gMax: Int, bMax: Int, alphaMax: Int) =
* def hsbColors(hMax: Int, sMax: Int, bMax: Int) =
* def color(s: String) = ColorMaker.color(s)
* def fill(c: Color) = Impl.figure0.setFillColor(c)
* def noFill = Impl.figure0.setFillColor(null)
* def stroke(c: Color) = Impl.figure0.setPenColor(c)
* def noStroke = Impl.figure0.setPenColor(null)
* def strokeWidth(w: Double) = Impl.figure0.setPenThickness(w)
* def withStyle(fc: Color, sc: Color, sw: Double)(body: => Unit) =
* def saveStyle = Style.save
* def restoreStyle = Style.restore
* implicit def ColorToRichColor (c: java.awt.Color) = RichColor(c)
* def lerpColor(from: RichColor, to: RichColor, amt: Double) =
*
* (manually added:)
* def alpha = c.getAlpha
* def red = c.getRed
* def blue = c.getBlue
* def green = c.getGreen
* def hue = {
* def saturation = (this.hsb(1) * 255).toInt
* def brightness = (this.hsb(2) * 255).toInt
*/
@Test
// lalit sez: if we have more than five tests, we run out of heap space - maybe
// a leak in the Scala interpreter/compiler subsystem. So we run (mostly)
// everything in one test
def test1 = {
//W
//W==Color==
//W
//WA color value can be created by calling one of the methods
//W
//W{{{
//Wcolor(red, green, blue)
//Wcolor(value)
//W}}}
//W
//Wwhich each yields an instance of `java.awt.Color`.
//W
//WAnother way to specify color is through a color maker. For instance, to get
//Wa grayscale color, call `grayColors`. The argument sets a limit to the
//Wnumber of colors that can be specified. E.g. `grayColors(100)` allows 101
//Wshades of gray to be specified (0 to 100). To get one of those colors, call
//Wthe color maker with the color number as argument.
//W
//W{{{
//Wval cm = grayColors(lim) ; cm(num)
Tester(
"import Staging._ ; val cm = grayColors(255) ; println(cm(22))",
Some("$java.awt.Color\\\\[r=22,g=22,b=22\\\\]import Staging._" +
"cm: net.kogics.kojo.staging.GrayColorMaker = net.kogics.kojo.staging.GrayColorMaker@.*")
)
//W}}}
//W
//Wcreates a grayscale color with a "whiteness" equal to `num` / `lim`.
//W
//WThe color maker can also take a `Double` as argument, in which case the
//Wresulting color has a "whiteness" of `val` (expected to be in the range
//W0.0 <= val <= 1.0):
//W
//W{{{
//Wval cm = grayColors(lim) ; cm(val) // where val is a Double
Tester(
"import Staging._ ; val cm = grayColors(255) ; println(cm(.1))",
Some("$java.awt.Color\\\\[r=26,g=26,b=26\\\\]import Staging._" +
"cm: net.kogics.kojo.staging.GrayColorMaker = net.kogics.kojo.staging.GrayColorMaker@.*")
)
//W}}}
//W
//WTo get a non-opaque grayscale color maker, call `grayColorsWithAlpha` with
//Wtwo limit values, one for the highest shade number and one for the highest
//Walpha number. The color maker takes corresponding numbers for shade and
//Walpha.
//W
//W{{{
//Wval cm = grayColorsWithAlpha(grayLim, alphaLim) ; cm(grayNum, alphaNum)
val gacm = "net.kogics.kojo.staging.GrayAlphaColorMaker"
Tester(
"import Staging._ ; val cm = grayColorsWithAlpha(255, 255) ; println(cm(22, 22))",
Some("$java.awt.Color\\\\[r=22,g=22,b=22\\\\]import Staging._" +
"cm: " + gacm + " = " + gacm + "@.*")
)
//W}}}
//W
//Wcreates a grayscale color with a "whiteness" equal to `grayNum` / `grayLim`
//Wand a opacity equal to `alphaNum` / `alphaLim`.
//W
//WThe color maker can also take `Double` arguments, in which case the
//Wresulting color has a "whiteness" of `grayVal` and opacity of `alphaVal`
//W(both expected to be in the range 0.0 <= val <= 1.0):
//W
//W{{{
//Wval cm = grayColorsWithAlpha(grayLim, alphaLim) ; cm(grayVal, alphaVal)
Tester(
"import Staging._ ; val cm = grayColorsWithAlpha(255, 255) ; println(cm(.1, .1))",
Some("$java.awt.Color\\\\[r=26,g=26,b=26\\\\]import Staging._" +
"cm: " + gacm + " = " + gacm + "@.*")
)
//W}}}
//W
//WTo create a color maker for RGB colors, use `rgbColors` and pass three
//Wlimit values to it (for red, green, and blue)
//W
//W{{{
//Wval cm = rgbColors(redLim, greenLim, blueLim) ; cm(redNum, greenNum, blueNum)
val rcm = "net.kogics.kojo.staging.RgbColorMaker"
Tester(
"import Staging._ ; val cm = rgbColors(255, 255, 255) ; println(cm(22, 22, 22))",
Some("$java.awt.Color\\\\[r=22,g=22,b=22\\\\]import Staging._" +
"cm: " + rcm + " = " + rcm + "@.*")
)
//W}}}
//W
//Wcreates a color with a "redness" equal to `redNum` / `redLim`, etc.
//W
//WThe color maker can also take `Double` arguments, in which case the
//Wresulting color has a "redness" of `redVal`, etc (all expected to be in
//Wthe range 0.0 <= val <= 1.0):
//W
//W{{{
//Wval cm = rgbColors(redLim, greenLim, blueLim) ; cm(redVal, greenVal, blueVal)
Tester(
"import Staging._ ; val cm = rgbColors(255, 255, 255) ; println(cm(.1, .1, .1))",
Some("$java.awt.Color\\\\[r=26,g=26,b=26\\\\]import Staging._" +
"cm: " + rcm + " = " + rcm + "@.*")
)
//W}}}
//W
//WTo create a color maker for RGB colors with transparency, use
//W`rgbColorsWithAlpha` and pass four limit values to it (for red, green, blue,
//Wand alpha)
//W
//W{{{
//Wval cm = rgbColorsWithAlpha(redLim, greenLim, blueLim, alphaLim) ; cm(redNum, greenNum, blueNum, alphaNum)
val racm = "net.kogics.kojo.staging.RgbAlphaColorMaker"
Tester(
"import Staging._ ; val cm = rgbColorsWithAlpha(255, 255, 255, 255) ; println(cm(22, 22, 22, 22))",
Some("$java.awt.Color\\\\[r=22,g=22,b=22\\\\]import Staging._" +
"cm: " + racm + " = " + racm + "@.*")
)
//W}}}
//W
//Wcreates a color with a "redness" equal to `redNum` / `redLim`, etc.
//W
//WThe color maker can also take `Double` arguments, in which case the
//Wresulting color has a "redness" of `redVal`, etc (all expected to be in
//Wthe range 0.0 <= val <= 1.0):
//W
//W{{{
//Wval cm = rgbColorsWithAlpha(redLim, greenLim, blueLim, alphaLim) ; cm(redVal, greenVal, blueVal, alphaVal)
Tester(
"import Staging._ ; val cm = rgbColorsWithAlpha(255, 255, 255, 255) ; println(cm(.1, .1, .1, .1))",
Some("$java.awt.Color\\\\[r=26,g=26,b=26\\\\]import Staging._" +
"cm: " + racm + " = " + racm + "@.*")
)
//W}}}
//W
//WTo create a color maker for HSB colors, use `hsbColors` and pass three
//Wlimit values to it (for hue, saturation, and brightness).
//W
//W{{{
//Wval cm = hsbColors(hueLim, saturationLim, brightnessLim) ; cm(hueNum, saturationNum, brightnessNum)
val hcm = "net.kogics.kojo.staging.HsbColorMaker"
Tester(
"import Staging._ ; val cm = hsbColors(255, 255, 255) ; println(cm(22, 22, 22))",
Some("$java.awt.Color\\\\[r=22,g=21,b=20\\\\]import Staging._" +
"cm: " + hcm + " = " + hcm + "@.*")
)
//W}}}
//W
//Wcreates a color with an effective hue of `hueNum` / `hueLim`, etc.
//W
//WThe color maker can also take `Double` arguments, in which case the
//Wresulting color has an effective hue of `hueVal`, etc (all expected to be in
//Wthe range 0.0 <= val <= 1.0):
//W
//W{{{
//Wval cm = hsbColors(hueLim, saturationLim, brightnessLim) ; cm(hueVal, saturationVal, brightnessVal)
Tester(
"import Staging._ ; val cm = hsbColors(255, 255, 255) ; println(cm(.1, .1, .1))",
Some("$java.awt.Color\\\\[r=26,g=24,b=23\\\\]import Staging._" +
"cm: " + hcm + " = " + hcm + "@.*")
)
//W}}}
//W
//WFinally,
//W
//W{{{
//WnamedColor(colorName)
Tester(
"""import Staging._ ; println(namedColor("#99ccDD"))""",
Some("java.awt.Color[r=153,g=204,b=221]import Staging._")
)
Tester(
"""import Staging._ ; println(namedColor("aliceblue"))""",
Some("java.awt.Color[r=240,g=248,b=255]import Staging._")
)
//W}}}
//W
//Wwhere _colorName_ is either
//W
//W * "none",
//W * one of the names in this list: http://www.w3.org/TR/SVG/types.html#ColorKeywords, or
//W * a string with the format "#rrggbb" (in hexadecimal)
//W
//Wreturns the described color.
//W
//WLinear interpolation between two colors is done using `lerpColor`:
//W
//W{{{
//WlerpColor(colorFrom, colorTo, amount)
Tester(
"""import Staging._
|
|val a = namedColor("#99ccDD")
|val b = namedColor("#003366")
|println(lerpColor(a, b, 0))""".stripMargin,
Some("java.awt.Color[r=153,g=204,b=221]import Staging._" +
"a: java.awt.Color = java.awt.Color[r=153,g=204,b=221]" +
"b: java.awt.Color = java.awt.Color[r=0,g=51,b=102]")
)
Tester(
"""import Staging._
|
|val a = namedColor("#99ccDD")
|val b = namedColor("#003366")
|println(lerpColor(a, b, 0.3))""".stripMargin,
Some("java.awt.Color[r=107,g=158,b=185]import Staging._" +
"a: java.awt.Color = java.awt.Color[r=153,g=204,b=221]" +
"b: java.awt.Color = java.awt.Color[r=0,g=51,b=102]")
)
Tester(
"""import Staging._
|
|val a = namedColor("#99ccDD")
|val b = namedColor("#003366")
|println(lerpColor(a, b, 1))""".stripMargin,
Some("java.awt.Color[r=0,g=51,b=102]import Staging._" +
"a: java.awt.Color = java.awt.Color[r=153,g=204,b=221]" +
"b: java.awt.Color = java.awt.Color[r=0,g=51,b=102]")
)
//W}}}
//W
//WWhen drawing figures, the _fill_ color, which is used for the insides, and
//Wthe _stroke_ color, which is used for the edges, can be set and unset.
//W
//WTo set the fill color, call `fill`.
//W
//W{{{
//Wfill(color)
Tester(
"""import Staging._
|
|val a = namedColor("#99ccDD")
|fill(a)""".stripMargin,
Some("import Staging._" +
"a: java.awt.Color = java.awt.Color[r=153,g=204,b=221]")
)
assertEquals("java.awt.Color[r=153,g=204,b=221]", SpriteCanvas.instance.figure0.fillColor.toString)
//W}}}
//W
}
@Test
def test2 = {
//WTo unset the fill color, call `noFill`, or `fill` with a `null` argument.
//W
//W{{{
//WnoFill
//Wfill(null)
Tester("import Staging._ ; fill(namedColor(\\"#99ccDD\\")) ; noFill")
assertNull(SpriteCanvas.instance.figure0.fillColor)
Tester("import Staging._ ; fill(namedColor(\\"#99ccDD\\")) ; fill(null)")
assertNull(SpriteCanvas.instance.figure0.fillColor)
//W}}}
//W
//WTo set the stroke color, call `stroke`.
//W
//W{{{
//Wstroke(color)
Tester(
"""import Staging._ ; stroke(namedColor("#99ccDD"))""",
Some("import Staging._")
)
assertEquals("java.awt.Color[r=153,g=204,b=221]", SpriteCanvas.instance.figure0.lineColor.toString)
//W}}}
//W
//WTo unset the stroke color, call `noStroke`, or `stroke` with a `null` argument.
//W
//W{{{
//WnoStroke
//Wstroke(null)
Tester("import Staging._ ; stroke(null)", Some("import Staging._"))
assertNull(SpriteCanvas.instance.figure0.lineColor)
Tester(
"""import Staging._ ; stroke(namedColor("#99ccDD")) ; noStroke""",
Some("import Staging._")
)
assertNull(SpriteCanvas.instance.figure0.lineColor)
//W}}}
//W
//WTo set the stroke width, call `strokeWidth`.
//W
//W{{{
//WstrokeWidth(value)
Tester("""import Staging._ ; stroke(red) ; strokeWidth(2)""", Some("import Staging._"))
Tester("""import Staging._ ; strokeWidth(2)""", Some("import Staging._"))
assertEquals(2.0, SpriteCanvas.instance.figure0.lineStroke.asInstanceOf[java.awt.BasicStroke].getLineWidth, 0.01)
Tester("""import Staging._ ; strokeWidth(2.0)""", Some("import Staging._"))
assertEquals(2.0, SpriteCanvas.instance.figure0.lineStroke.asInstanceOf[java.awt.BasicStroke].getLineWidth, 0.01)
Tester("""import Staging._ ; strokeWidth(.2)""", Some("import Staging._"))
//W}}}
//W
//WTo set the fill, stroke, and stroke width just for the extent of some lines
//Wof code, use `withStyle`.
//W
//W{{{
//WwithStyle(fillColor, strokeColor, strokeWidth) { ...code... }
Tester("""import Staging._
|
|fill(green)
|stroke(black)
|strokeWidth(1.0)
|withStyle(red, blue, 4) {/**/}""".stripMargin, Some("import Staging._"))
assertEquals("java.awt.Color[r=0,g=255,b=0]", SpriteCanvas.instance.figure0.fillColor.toString)
assertEquals("java.awt.Color[r=0,g=0,b=0]", SpriteCanvas.instance.figure0.lineColor.toString)
assertEquals(1.0, SpriteCanvas.instance.figure0.lineStroke.asInstanceOf[java.awt.BasicStroke].getLineWidth, 0.01)
//W}}}
//M
//MThe fill, stroke, and stroke width can also be saved with `saveStyle` and
//Mlater restored with `restoreStyle` (the latter quietly fails if no style
//Mhas been saved yet).
//M
//M{{{
//MsaveStyle
//MrestoreStyle
/*
Tester("""import Staging._
|
|fill(green)
|stroke(black)
|strokeWidth(1.0)
|saveStyle
|fill(red)
|stroke(blue)
|strokeWidth(4)
|restoreStyle""".stripMargin, Some("import Staging._"))
assertEquals("java.awt.Color[r=0,g=255,b=0]", SpriteCanvas.instance.figure0.fillColor.toString)
assertEquals("java.awt.Color[r=0,g=0,b=0]", SpriteCanvas.instance.figure0.lineColor.toString)
assertEquals(1.0, SpriteCanvas.instance.figure0.lineStroke.asInstanceOf[java.awt.BasicStroke].getLineWidth, 0.01)
*/
//M}}}
//W
//WThe Color type is 'pimped' with the following accessors:
//W
//W{{{
//Walpha
Tester(
"""import Staging._
|
|println(rgbColorsWithAlpha(255, 255, 255, 255)(.1, .1, .1, .1).alpha)
""".stripMargin,
Some("26import Staging._")
)
//Wred
Tester(
"""import Staging._
|
|println(rgbColors(255, 255, 255)(.1, .1, .1).red)
""".stripMargin,
Some("26import Staging._")
)
//Wblue
Tester(
"""import Staging._
|
|println(rgbColors(255, 255, 255)(.1, .1, .1).blue)
""".stripMargin,
Some("26import Staging._")
)
//Wgreen
Tester(
"""import Staging._
|
|println(rgbColors(255, 255, 255)(.1, .1, .1).green)
""".stripMargin,
Some("26import Staging._")
)
//Whue
Tester(
"""import Staging._
|
|println(hsbColors(255, 255, 255)(.1, .1, .1).hue)
""".stripMargin,
Some("15import Staging._")
)
//Wsaturation
Tester(
"""import Staging._
|
|println(hsbColors(255, 255, 255)(.1, .1, .1).saturation)
""".stripMargin,
Some("29import Staging._")
)
//Wbrightness
Tester(
"""import Staging._
|
|println(hsbColors(255, 255, 255)(.1, .1, .1).brightness)
""".stripMargin,
Some("26import Staging._")
)
//W}}}
//W
}
}
|
richardfontana/fontana2007-t
|
KojoEnv/test/unit/src/net/kogics/kojo/staging/ColorTest.scala
|
Scala
|
gpl-3.0
| 15,935 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.plugin.input.websocket
import java.io.{Serializable => JSerializable}
import com.stratio.sparta.sdk.Input
import com.stratio.sparta.sdk.ValidatingPropertyMap._
import org.apache.spark.sql.Row
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
class WebSocketInput(properties: Map[String, JSerializable]) extends Input(properties) {
def setUp(ssc: StreamingContext, sparkStorageLevel: String): DStream[Row] = {
ssc.receiverStream(new WebSocketReceiver(properties.getString("url"), storageLevel(sparkStorageLevel)))
.map(data => Row(data))
}
}
|
danielcsant/sparta
|
plugins/src/main/scala/com/stratio/sparta/plugin/input/websocket/WebSocketInput.scala
|
Scala
|
apache-2.0
| 1,257 |
package sttp.client3.asynchttpclient
import sttp.client3._
import _root_.zio._
import sttp.capabilities.zio.ZioStreams
import sttp.capabilities.{Effect, WebSockets}
import sttp.client3.impl.zio.{ExtendEnv, SttpClientStubbingBase}
package object zio {
/** ZIO-environment service definition, which is an SttpBackend. */
type SttpClient = SttpBackend[Task, ZioStreams with WebSockets]
type SttpClientStubbing = SttpClientStubbing.SttpClientStubbing
/** Sends the request. Only requests for which the method & URI are specified can be sent.
*
* @return
* An effect resulting in a [[Response]], containing the body, deserialized as specified by the request (see
* [[RequestT.response]]), if the request was successful (1xx, 2xx, 3xx response codes), or if there was a
* protocol-level failure (4xx, 5xx response codes).
*
* A failed effect, if an exception occurred when connecting to the target host, writing the request or reading the
* response.
*
* Known exceptions are converted to one of [[SttpClientException]]. Other exceptions are kept unchanged.
*/
def send[T](
request: Request[T, Effect[Task] with ZioStreams with WebSockets]
): ZIO[SttpClient, Throwable, Response[T]] =
ZIO.environmentWithZIO(env => env.get[SttpClient].send(request))
/** A variant of [[send]] which allows the effects that are part of the response handling specification (when using
* websockets or resource-safe streaming) to use an `R` environment.
*/
def sendR[T, R](
request: Request[T, Effect[RIO[R, *]] with ZioStreams with WebSockets]
): ZIO[SttpClient with R, Throwable, Response[T]] =
ZIO.environmentWithZIO(env => env.get[SttpClient].extendEnv[R].send(request))
object SttpClientStubbing extends SttpClientStubbingBase[Any, ZioStreams with WebSockets] {
override private[sttp] def serviceTag: Tag[SttpClientStubbing.SttpClientStubbing] = implicitly
override private[sttp] def sttpBackendTag: Tag[SttpClient] = implicitly
}
object stubbing {
import SttpClientStubbing.StubbingWhenRequest
def whenRequestMatches(p: Request[_, _] => Boolean): StubbingWhenRequest =
StubbingWhenRequest(p)
val whenAnyRequest: StubbingWhenRequest =
StubbingWhenRequest(_ => true)
def whenRequestMatchesPartial(
partial: PartialFunction[Request[_, _], Response[_]]
): URIO[SttpClientStubbing, Unit] =
ZIO.environmentWithZIO(_.get.whenRequestMatchesPartial(partial))
}
}
|
softwaremill/sttp
|
async-http-client-backend/zio/src/main/scala/sttp/client3/asynchttpclient/zio/package.scala
|
Scala
|
apache-2.0
| 2,502 |
import sbt._
import sbt.Keys._
object LiftModuleBuild extends Build {
import BuildSettings._
val project = Project("squerylauth", file("."))
.settings(basicSettings:_*)
.settings(publishSettings:_*)
.settings(libraryDependencies <++= (liftVersion, scalaVersion) { (liftVersion, scalaVersion) =>
val scalaTestVer = scalaVersion match {
case v if (v.startsWith("2.10") || v.startsWith("2.11")) => "2.2.1"
case _ => "1.9.2"
}
Seq(
"net.liftweb" %% "lift-squeryl-record" % liftVersion % "provided",
"net.liftweb" %% "lift-webkit" % liftVersion % "provided",
"ch.qos.logback" % "logback-classic" % "1.0.3" % "provided",
"org.scalatest" %% "scalatest" % scalaTestVer % "test",
"org.mindrot" % "jbcrypt" % "0.3m" % "compile",
"joda-time" % "joda-time" % "2.3" % "provided",
"com.h2database" % "h2" % "1.3.167"
)
})
}
|
gensosrl/squeryl-auth-module
|
project/Build.scala
|
Scala
|
apache-2.0
| 929 |
class TC
object A {
given tc as TC
def foo(using TC) = ()
}
object B {
import A._
foo // error: no implicit argument was found
foo(using tc) // error: not found: tc
foo(using A.tc) // ok
}
object C {
import A._
import A.tc
foo // ok
foo(using tc) // ok
}
object D {
import A.{foo, given _}
foo // ok
foo(using tc) // ok
}
object E {
import A.{_, given _}
foo // ok
foo(using tc) // ok
}
|
som-snytt/dotty
|
tests/neg/import-implied.scala
|
Scala
|
apache-2.0
| 469 |
package services.forms
import models.ClaimForm
/**
* Service providing logic related to forms and their data elements.
*/
trait ClaimService {
def calculateProgress(claimForm: ClaimForm): ClaimForm
}
|
vetafi/vetafi-web
|
app/services/forms/ClaimService.scala
|
Scala
|
apache-2.0
| 207 |
package lectures
package dataparallelism
import org.scalameter._
object WordCount {
val standardConfig = config(
Key.exec.minWarmupRuns -> 50,
Key.exec.maxWarmupRuns -> 100,
Key.exec.benchRuns -> 40,
Key.verbose -> true
) withWarmer(new Warmer.Default)
val txt = "A short text... " * 250000
val ps = new ParString(txt)
def main(args: Array[String]) {
val seqtime = standardConfig measure {
txt.foldLeft((0, true)) {
case ((wc, _), ' ') => (wc, true)
case ((wc, true), x) => (wc + 1, false)
case ((wc, false), x) => (wc, false)
}
}
println(s"sequential time: $seqtime ms")
val partime = standardConfig measure {
ps.aggregate((0, 0, 0))({ (x, y) =>
if (x._2 > 0) {
if (y != ' ') x match {
case (ls, wc, 0) => (ls, wc, 0)
case (ls, wc, rs) => (ls, wc + 1, 0)
} else x match {
case (ls, wc, rs) => (ls, wc, rs + 1)
}
} else {
if (y != ' ') x match {
case (ls, 0, _) => (ls + 1, 0, ls + 1)
} else x match {
case (ls, 0, _) => (ls + 1, 1, 0)
}
}
}, {
case ((0, 0, 0), res) => res
case (res, (0, 0, 0)) => res
case ((lls, lwc, 0), (0, rwc, rrs)) => (lls, lwc + rwc - 1, rrs)
case ((lls, lwc, _), (_, rwc, rrs)) => (lls, lwc + rwc, rrs)
})
}
println(s"parallel time: $partime ms")
println(s"speedup: ${seqtime / partime}")
}
}
|
twistedgut/scala_coursera
|
parprog-snippets/src/main/scala/lectures/dataparallelism/WordCount.scala
|
Scala
|
gpl-3.0
| 1,524 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.