code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* This file is part of Beez.
*
* Copyright 2014 Zengularity
*
* Beez is free software: you can redistribute it and/or modify
* it under the terms of the AFFERO GNU General Public License as published by
* the Free Software Foundation.
*
* Beez is distributed "AS-IS" AND WITHOUT ANY WARRANTY OF ANY KIND,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,
* NON-INFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. See
* the AFFERO GNU General Public License for the complete license terms.
*
* You should have received a copy of the AFFERO GNU General Public License
* along with Beez. If not, see <http://www.gnu.org/licenses/agpl-3.0.html>
*/
package controllers
import play.api._
import play.api.mvc._
import play.api.libs.iteratee._
import play.api.libs.json._
import actors._
import scala.util.{Success, Failure}
import play.api.libs.concurrent.Execution.Implicits._
object Application extends Controller {
import Play.current
private val urlPrefixSlug = Play.configuration.getString("application.urlPrefixSlug").getOrElse("http://localhost:9000/")
def makeSlugUrl (slug: String): String = urlPrefixSlug + slug
def index = Action {
Ok(views.html.index())
}
def newHive = Action {
Async {
val slug = Rooms.genSlug(5)
Rooms.create(slug).map { _ =>
Found(routes.Application.hive(slug).url)
} recover { case e =>
Forbidden(e.getMessage)
}
}
}
def bee (slug: String) = Action { implicit r =>
Async {
Rooms.get(slug).map { _ =>
Ok(views.html.bee(slug))
} recover { case e =>
NotFound(views.html.roomnotfound(slug))
}
}
}
def hive (slug: String) = Action { implicit r =>
Async {
Rooms.get(slug).map { _ =>
Ok(views.html.hive(slug, makeSlugUrl(slug)))
} recover { case e =>
NotFound(views.html.roomnotfound(slug))
}
}
}
def redirectSlug (slug: String) = Action { implicit r =>
if (r.headers.get("User-Agent").map(_.contains("Mobile")).getOrElse(false))
Redirect(routes.Application.bee(slug))
else
Redirect(routes.Application.hive(slug))
}
def faviconIco = Action(NotFound)
def join(slug: String, id: String) = WebSocket.async[JsValue] { request =>
Rooms.join(slug, id)
}
}
|
gre/beez
|
app/controllers/Application.scala
|
Scala
|
agpl-3.0
| 2,296 |
package scalarules.test.scalac.srcjars
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.must.Matchers._
import test.{A, ADuplicate}
class DuplicatesTest extends AnyFunSuite {
test("all classes from duplicated files are available") {
noException should be thrownBy classOf[A]
noException should be thrownBy classOf[ADuplicate]
}
}
|
bazelbuild/rules_scala
|
test/scalac/srcjars/DuplicatesTest.scala
|
Scala
|
apache-2.0
| 367 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sat Jan 9 21:48:57 EST 2016
* @see LICENSE (MIT style license file).
*/
package scalation.analytics.classifier
import scalation.linalgebra.{MatrixD, VectoD, VectorD, VectoI, VectorI}
import scalation.stat.StatVector.{cov, mean}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `LDA` class implements a Linear Discriminant Analysis 'LDA' classifier.
* @see en.wikipedia.org/wiki/Linear_discriminant_analysis
* @param x the real-valued training/test data vectors stored as rows of a matrix
* @param y the training/test classification vector, where y_i = class for row i of the matrix x
* @param fn the names for all features/variables
*/
class LDA (x: MatrixD, y: VectoI, fn: Array [String])
extends ClassifierReal (x, y, fn, 2, Array ("no", "yes"))
{
private val DEBUG = true
private val x0 = (MatrixD (for (i <- 0 until x.dim1 if y(i) == 0) yield x(i))).t
private val x1 = (MatrixD (for (i <- 0 until x.dim1 if y(i) == 1) yield x(i))).t
if (DEBUG) {
println ("x0 = " + x0)
println ("x1 = " + x1)
} // if
private val mu0 = mean (x0)
private val mu1 = mean (x1)
private val sig = cov (x)
private val sig0 = cov (x0)
private val sig1 = cov (x1)
private var c = 0.5
private var w: VectoD = null
if (DEBUG) {
println ("mu0 = " + mu0)
println ("mu1 = " + mu1)
println ("sig0 = " + sig0)
println ("sig1 = " + sig1)
} // if
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/**
*/
def setCutoff (thres: Double)
{
c = 0.5 * (thres - (mu0 dot sig0.inverse * mu0) + (mu1 dot sig1.inverse * mu1))
} // setCutoff
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/**
*/
def train (testStart: Int, testEnd: Int)
{
w = sig.inverse * (mu0 - mu1)
} // train
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/**
*/
def reset (): Unit = ???
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/**
*/
def classify (z: VectoD): (Int, String, Double) =
{
if ((w dot z) > c) (1, "yes", 1.0) else (0, "no", 0.0)
} // classify
} // LDA class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `LDATest` is used to test the `LDA` class.
* @see people.revoledu.com/kardi/tutorial/LDA/Numerical%20Example.html
* > run-main scalation.analytics.classifier.LDATest
*/
object LDATest extends App
{
// features/variable:
// x0: curvature
// x0: diameter
// x0 x1
val x = new MatrixD ((7, 2), 2.95, 6.63,
2.53, 7.79,
3.57, 5.65,
3.16, 5.47,
2.58, 4.46,
2.16, 6.22,
3.27, 3.52)
val y = VectorI (0, 0, 0, 0, 1, 1, 1)
val cl = new LDA (x, y, Array ("curvature", "diameter"))
cl.train ()
val z = VectorD (2.81, 5.46)
println (s"classify ($z) = ${cl.classify (z)}")
} // LDATestObject
|
scalation/fda
|
scalation_1.2/src/main/scala/scalation/analytics/classifier/LDA.scala
|
Scala
|
mit
| 3,397 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.core.types
import io.rdbc.pgsql.core.Oid
case object PgCharType extends PgType[PgChar] {
val oid = Oid(1042)
val valCls = classOf[PgChar]
val name = "char"
}
final case class PgChar(value: String) extends PgVal[String] {
val typ = PgCharType
}
|
rdbc-io/rdbc-pgsql
|
rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgChar.scala
|
Scala
|
apache-2.0
| 879 |
package uk.ac.ncl.openlab.intake24.foodsql.user
import javax.sql.DataSource
import anorm.{Macro, SQL, SqlParser, sqlToSimple}
import com.google.inject.name.Named
import com.google.inject.{Inject, Singleton}
import org.slf4j.LoggerFactory
import uk.ac.ncl.openlab.intake24.errors.{FoodCompositionTableError, RecordNotFound, UnexpectedDatabaseError}
import uk.ac.ncl.openlab.intake24.services.nutrition.{FoodCompositionRecord, FoodCompositionService, NutrientDescription}
import uk.ac.ncl.openlab.intake24.sql.{SqlDataService, SqlResourceLoader}
@Singleton
class FoodCompositionServiceImpl @Inject()(@Named("intake24_foods") val dataSource: DataSource) extends FoodCompositionService with SqlDataService with SqlResourceLoader {
private val logger = LoggerFactory.getLogger(classOf[FoodCompositionServiceImpl])
private case class NutrientDescriptionRow(id: Long, description: String, symbol: String)
case class FoodNutrientValueRow(code: String, description: String, nutrient_type_id: Long, units_per_100g: Double)
def getSupportedNutrients(): Either[UnexpectedDatabaseError, Seq[NutrientDescription]] = tryWithConnection {
implicit conn =>
Right(SQL("SELECT nutrient_types.id, nutrient_types.description, nutrient_units.symbol FROM nutrient_types INNER JOIN nutrient_units ON nutrient_types.unit_id = nutrient_units.id")
.executeQuery()
.as(Macro.namedParser[NutrientDescriptionRow].*)
.map {
row => NutrientDescription(row.id, row.description, row.symbol)
})
}
private case class NutrientsRow(nutrient_type_id: Long, units_per_100g: Double)
private case class FieldsRow(field_name: String, field_value: String)
def getFoodCompositionRecord(table_id: String, record_id: String): Either[FoodCompositionTableError, FoodCompositionRecord] = tryWithConnection {
implicit conn =>
val validation = SQL("SELECT 1 FROM nutrient_table_records WHERE id={record_id} AND nutrient_table_id={table_id}")
.on('record_id -> record_id, 'table_id -> table_id)
.executeQuery().as(SqlParser.long(1).singleOpt).isDefined
if (!validation)
Left(RecordNotFound(new RuntimeException(s"table_id: $table_id, record_id: $record_id")))
else {
val nutrientRows = SQL("SELECT nutrient_type_id, units_per_100g FROM nutrient_table_records_nutrients WHERE nutrient_table_record_id={record_id} and nutrient_table_id={table_id}")
.on('record_id -> record_id, 'table_id -> table_id)
.as(Macro.namedParser[NutrientsRow].*)
val nutrientsMap = nutrientRows.map(row => row.nutrient_type_id -> row.units_per_100g).toMap
val fieldRows = SQL("SELECT field_name, field_value FROM nutrient_table_records_fields WHERE nutrient_table_record_id={record_id} and nutrient_table_id={table_id}")
.on('record_id -> record_id, 'table_id -> table_id)
.as(Macro.namedParser[FieldsRow].*)
val fieldsMap = fieldRows.map(row => row.field_name -> row.field_value).toMap
Right(FoodCompositionRecord(fieldsMap, nutrientsMap))
}
}
override def listFoodNutrients(tableId: String, localeId: String): Either[FoodCompositionTableError, Map[String, Map[Long, Double]]] = tryWithConnection {
implicit conn =>
val q =
"""
|SELECT
| code,
| nt.description,
| ntrn.nutrient_type_id,
| ntrn.units_per_100g
|FROM foods
| JOIN foods_nutrient_mapping AS fnm ON foods.code = fnm.food_code
| JOIN nutrient_table_records_nutrients AS ntrn
| ON ntrn.nutrient_table_record_id = fnm.nutrient_table_record_id
| AND ntrn.nutrient_table_id = fnm.nutrient_table_id
| JOIN nutrient_types nt ON ntrn.nutrient_type_id = nt.id
|WHERE ntrn.nutrient_table_id = {table_id} AND fnm.locale_id = {locale}
""".stripMargin
val rows = SQL(q).on('table_id -> tableId, 'locale -> localeId).as(Macro.namedParser[FoodNutrientValueRow].*)
val mp = rows.groupBy(_.code).map(g => g._1 -> g._2.map(r => r.nutrient_type_id -> r.units_per_100g).toMap)
Right(mp)
}
def getEnergyKcalNutrientId(): Long = 1
}
|
digitalinteraction/intake24
|
FoodDataSQL/src/main/scala/uk/ac/ncl/openlab/intake24/foodsql/user/FoodCompositionServiceImpl.scala
|
Scala
|
apache-2.0
| 4,201 |
/*
* Copyright (c) 2016. Y Experiment (yexperiment.com) MIT License
*/
package helpers
import com.jme3.math.ColorRGBA
object ColorPalette {
// http://paletton.com/#uid=72R0X0kllllaFw0g0qFqFg0w0aF
val MAIN_COLOR = new ColorRGBA(.173f, .518f, .216f, 1f)
val MAIN_LIGHT = new ColorRGBA(.518f, .776f, .549f, 1f)
val MAIN_DARK = new ColorRGBA(0f, .259f, .031f, 1f)
val SECOND_COLOR = new ColorRGBA(.184f, .255f, .447f, 1f)
val SECOND_LIGHT = new ColorRGBA(.475f, .525f, .675f, 1f)
val SECOND_DARK = new ColorRGBA(.024f, .078f, .224f, 1f)
val HIGH_COLOR = new ColorRGBA(.667f, .247f, .224f, 1f)
val HIGH_LIGHT = new ColorRGBA(1f, .682f, .667f, 1f)
val HIGH_DARK = new ColorRGBA(.333f, .016f, 0f, 1f)
val LOW_COLOR = new ColorRGBA(.667f, .525f, .224f, 1f)
val LOW_LIGHT = new ColorRGBA(1f, .894f, .667f, 1f)
val LOW_DARK = new ColorRGBA(.333f, .227f, 0f, 1f)
}
|
Y-Experiment/LW3D
|
src/main/scala/helpers/ColorPalette.scala
|
Scala
|
mit
| 888 |
package dk.itu.wsq.cases.spanningtree
import dk.itu.wsq._
import scala.collection.mutable
import java.util.concurrent.atomic.AtomicBoolean
case class SpanningTreeNode(val id: Int) extends Node {
private var _color: Option[Int] = None
private def adopt(child: SpanningTreeNode): Unit = {
child.parent = Some(this)
}
var visited: AtomicBoolean = new AtomicBoolean(false)
def color = _color
def children: mutable.HashSet[SpanningTreeNode] = {
neighbors filter { n => n.parent == Some(this) }
}
val neighbors: mutable.HashSet[SpanningTreeNode] = mutable.HashSet[SpanningTreeNode]()
var parent: Option[SpanningTreeNode] = None
def paint(brush: Int): Unit = {
_color = Some(brush)
}
def visit(worker: SpanningTreeWorker): Unit = {
if (visited.compareAndSet(false, true)) {
worker.visitCounter += 1
}
paint(worker.color)
}
def traverse(worker: SpanningTreeWorker): Unit = {
visit(worker)
for (n <- neighbors) { // BFS
if (n.color.isEmpty) {
n.visit(worker)
adopt(n)
worker.addToQueue(n)
}
}
}
override def equals(that: Any): Boolean = {
that match {
case other: SpanningTreeNode => this.id == other.id
case _ => false
}
}
def reset(): Unit = {
_color = None
parent = None
visited.set(false)
}
}
|
christianharrington/WorkStealingQueues
|
Scala/src/main/scala/dk/itu/wsq/cases/spanningtree/SpanningTreeNode.scala
|
Scala
|
unlicense
| 1,362 |
package kneelnrise.warp10scala.services
import java.util.UUID
import akka.NotUsed
import akka.http.scaladsl.model._
import akka.stream.scaladsl.{Flow, Source}
import akka.util.ByteString
import kneelnrise.warp10scala.model.{FetchQuery, GTS}
import scala.collection.immutable
import scala.util.{Failure, Success}
object Warp10FetchClient {
def fetch(implicit warp10ClientContext: Warp10ClientContext): Flow[FetchQuery, GTS, NotUsed] = {
val uuid = UUID.randomUUID().toString
Flow[FetchQuery]
.map(createRequest)
.map(request => request -> uuid)
.via(warp10ClientContext.poolClientFlow)
.filter(result => result._2 == uuid) // We ignore results from other requests
.map(result => result._1)
.map {
case Success(httpResponse) => httpResponse
case Failure(exception) => throw exception
}
.via(transformResponse)
.via(byteStringToGTS)
}
private[services] def createRequest(query: FetchQuery)(implicit warp10ClientContext: Warp10ClientContext) =
HttpRequest(
method = HttpMethods.GET,
uri = warp10ClientContext.configuration.fetchUrl + "?" + query.serialize,
headers = immutable.Seq(`X-Warp10-Token`(warp10ClientContext.configuration.readToken))
)
private[services] def transformResponse(implicit warp10ClientContext: Warp10ClientContext): Flow[HttpResponse, ByteString, NotUsed] = {
import warp10ClientContext._
Flow[HttpResponse]
.flatMapConcat { httpResponse =>
if (httpResponse.status == StatusCodes.OK) {
httpResponse.entity.dataBytes
} else {
Source.fromFuture(
Warp10CommonClient
.readAllDataBytes(httpResponse.entity.dataBytes)
.map(content => Warp10Exception(httpResponse.status.intValue(), content))
.map(throw _)
)
}
}
}
private[services] def byteStringToGTS: Flow[ByteString, GTS, NotUsed] =
Flow[ByteString]
.via(Warp10CommonClient.lineByLineNoEmpty)
.map(GTS.parse)
// TODO: What should we do when the server returns an invalid entity?
.filter(_.isRight)
.map(_.right.get)
}
|
kneelnrise/warp10-scala
|
src/main/scala/kneelnrise/warp10scala/services/Warp10FetchClient.scala
|
Scala
|
mit
| 2,168 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
// $example on$
import org.apache.spark.ml.feature.MinHashLSH
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.col
// $example off$
/**
* An example demonstrating MinHashLSH.
* Run with:
* bin/run-example ml.MinHashLSHExample
*/
object MinHashLSHExample {
def main(args: Array[String]): Unit = {
// Creates a SparkSession
val spark = SparkSession
.builder
.appName("MinHashLSHExample")
.getOrCreate()
// $example on$
val dfA = spark.createDataFrame(Seq(
(0, Vectors.sparse(6, Seq((0, 1.0), (1, 1.0), (2, 1.0)))),
(1, Vectors.sparse(6, Seq((2, 1.0), (3, 1.0), (4, 1.0)))),
(2, Vectors.sparse(6, Seq((0, 1.0), (2, 1.0), (4, 1.0))))
)).toDF("id", "features")
val dfB = spark.createDataFrame(Seq(
(3, Vectors.sparse(6, Seq((1, 1.0), (3, 1.0), (5, 1.0)))),
(4, Vectors.sparse(6, Seq((2, 1.0), (3, 1.0), (5, 1.0)))),
(5, Vectors.sparse(6, Seq((1, 1.0), (2, 1.0), (4, 1.0))))
)).toDF("id", "features")
val key = Vectors.sparse(6, Seq((1, 1.0), (3, 1.0)))
val mh = new MinHashLSH()
.setNumHashTables(5)
.setInputCol("features")
.setOutputCol("hashes")
val model = mh.fit(dfA)
// Feature Transformation
println("The hashed dataset where hashed values are stored in the column 'hashes':")
model.transform(dfA).show()
// Compute the locality sensitive hashes for the input rows, then perform approximate
// similarity join.
// We could avoid computing hashes by passing in the already-transformed dataset, e.g.
// `model.approxSimilarityJoin(transformedA, transformedB, 0.6)`
println("Approximately joining dfA and dfB on Jaccard distance smaller than 0.6:")
model.approxSimilarityJoin(dfA, dfB, 0.6, "JaccardDistance")
.select(col("datasetA.id").alias("idA"),
col("datasetB.id").alias("idB"),
col("JaccardDistance")).show()
// Compute the locality sensitive hashes for the input rows, then perform approximate nearest
// neighbor search.
// We could avoid computing hashes by passing in the already-transformed dataset, e.g.
// `model.approxNearestNeighbors(transformedA, key, 2)`
// It may return less than 2 rows when not enough approximate near-neighbor candidates are
// found.
println("Approximately searching dfA for 2 nearest neighbors of the key:")
model.approxNearestNeighbors(dfA, key, 2).show()
// $example off$
spark.stop()
}
}
// scalastyle:on println
|
lhfei/spark-in-action
|
spark-2.x/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala
|
Scala
|
apache-2.0
| 3,513 |
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.blogcode
import java.time.LocalDate
import eu.cdevreeze.yaidom.core.EName
import eu.cdevreeze.yaidom.core.Path
import eu.cdevreeze.yaidom.core.QName
import eu.cdevreeze.yaidom.core.Scope
import eu.cdevreeze.yaidom.indexed
import eu.cdevreeze.yaidom.parse.DocumentParserUsingSax
import eu.cdevreeze.yaidom.print.DocumentPrinterUsingDom
import eu.cdevreeze.yaidom.queryapi.ClarkElemApi.withEName
import eu.cdevreeze.yaidom.resolved
import eu.cdevreeze.yaidom.simple
import eu.cdevreeze.yaidom.utils.NamespaceUtils
import org.scalatest.funsuite.AnyFunSuite
/**
* Code of yaidom XBRL blog ("XBRL, Scala and yaidom"). The blog introduces yaidom in the context
* of XBRL instances. The examples show alternative "implementations" of some XBRL formulas.
*
* The examples are from www.xbrlsite.com, by Charles Hoffman.
*
* Encourage the reader to play with Scala and yaidom in the REPL.
*
* @author Chris de Vreeze
*/
class Blog2XbrlTest extends AnyFunSuite {
private val sampleXbrlInstanceFile: java.io.File =
(new java.io.File(classOf[Blog2XbrlTest].getResource("company-instance.xml").toURI))
private val XbrliNs = "http://www.xbrl.org/2003/instance"
private val XbrldiNs = "http://xbrl.org/2006/xbrldi"
private val LinkNs = "http://www.xbrl.org/2003/linkbase"
private val GaapNs = "http://xasb.org/gaap"
private val FrmNs = "http://www.xbrlsite.com/Schemas/frm"
private val CompanyNs = "http://www.ABCCompany.com/company"
/**
* Simple XBRL instance queries. Shows XBRL instances, and introduces yaidom ScopedElemApi query API, as well as
* Scala Collections. If you know ElemApi method `filterElemsOrSelf`, you basically know all of its methods.
*/
test("testSimpleInstanceQueries") {
// Let variable "doc" in principle be a document of any yaidom document type (here it is "simple")
val doc = docParser.parse(sampleXbrlInstanceFile)
val docElem = doc.documentElement
// Check that all gaap:AverageNumberEmployees facts have unit U-Pure.
val xbrliNs = "http://www.xbrl.org/2003/instance"
val gaapNs = "http://xasb.org/gaap"
val avgNumEmployeesFacts =
docElem.filterChildElems(withEName(gaapNs, "AverageNumberEmployees"))
assertResult(10) {
avgNumEmployeesFacts.size
}
assertResult(true) {
avgNumEmployeesFacts.forall(fact => fact.attributeOption(EName("unitRef")).contains("U-Pure"))
}
val onlyUPure =
avgNumEmployeesFacts.forall(_.attributeOption(EName("unitRef")).contains("U-Pure"))
assertResult(true) {
onlyUPure
}
// Check the unit itself, minding the default namespace
val uPureUnit =
docElem.getChildElem(e => e.resolvedName == EName(xbrliNs, "unit") && (e \\@ EName("id")).contains("U-Pure"))
assertResult("pure") {
uPureUnit.getChildElem(withEName(XbrliNs, "measure")).text
}
// Mind the default namespace. Note the precision of yaidom and its namespace support that makes this easy.
assertResult(EName(XbrliNs, "pure")) {
uPureUnit.getChildElem(withEName(XbrliNs, "measure")).textAsResolvedQName
}
// Now we get the measure element text, as QName, resolving it to an EName (expanded name)
assertResult(EName(xbrliNs, "pure")) {
uPureUnit.getChildElem(withEName(xbrliNs, "measure")).textAsResolvedQName
}
// Having the same unit, the gaap:AverageNumberEmployees facts are uniquely identified by contexts.
// Method mapValues deprecated since Scala 2.13.0.
val avgNumEmployeesFactsByContext =
avgNumEmployeesFacts.groupBy(_.attribute(EName("contextRef")))
.map { case (ctxRef, facts) => ctxRef -> facts.head }
assertResult(
Set("D-2006", "D-2007", "D-2008", "D-2009", "D-2010", "D-2010-BS1", "D-2010-BS2", "D-2010-CON", "D-2010-E", "D-2010-ALL")) {
avgNumEmployeesFactsByContext.keySet
}
assertResult("220") {
avgNumEmployeesFactsByContext("D-2006").text
}
}
/**
* XBRL instance queries creating an alternative representation, emphasizing aspects. Now we use indexed elements
* (needed for location aspect), with the same query API.
*
* Try to do this using XSLT instead (or XQuery)...
*
* See http://xbrlreview.blogspot.nl/p/json-financialreport-network-httpwww.html for a similar representation,
* but in JSON.
*/
test("testFactAspectQueries") {
val fact =
(idoc.documentElement findChildElem { e =>
e.resolvedName == EName(GaapNs, "AverageNumberEmployees") &&
e.attributeOption(EName("contextRef")).contains("D-2007")
}).head
assertResult(240) {
fact.text.toInt
}
assertResult(EName(GaapNs, "AverageNumberEmployees")) {
conceptAspect(fact)
}
assertResult(Path.Empty) {
locationAspect(fact)
}
assertResult(Some(("http://regulator.gov/id", "1234567890"))) {
entityIdentifierAspectOption(fact)
}
// Note the precise namespace support of yaidom
assertResult(Map(
EName(FrmNs, "ReportingScenarioAxis") -> EName(FrmNs, "ActualMember"),
EName(FrmNs, "ReportDateAxis") -> EName(CompanyNs, "ReportedAsOfMarch182011Member"))) {
explicitDimensionAspects(fact)
}
// Build alternative representation of the instance
val altInstanceElem = makeAlternativeInstance(idoc)
val docPrinter = DocumentPrinterUsingDom.newInstance()
val altInstanceXmlString =
docPrinter.print(altInstanceElem.prettify(2))
if (System.getProperty("Blog2XbrlTest.debug", "false").toBoolean) println(altInstanceXmlString)
val topLevelFacts =
idoc.documentElement.filterChildElems(e => !Set(XbrliNs, LinkNs).contains(e.resolvedName.namespaceUriOption.getOrElse("")))
val facts = topLevelFacts.flatMap(_.findAllElemsOrSelf)
assertResult(facts.map(_.resolvedName).toSet) {
altInstanceElem.filterElems(withEName(None, "conceptAspect")).map(e => EName.parse(e.text)).toSet
}
assertResult(Set(Path.Empty)) {
altInstanceElem.filterElems(withEName(None, "locationAspect")).map(e => Path.fromResolvedCanonicalXPath(e.text)).toSet
}
assertResult(Set("http://regulator.gov/id")) {
altInstanceElem.filterElems(withEName(None, "entityIdentifierAspect")).flatMap(e => e.attributeOption(EName("scheme"))).toSet
}
assertResult(Set("1234567890")) {
altInstanceElem.filterElems(withEName(None, "entityIdentifierAspect")).map(_.text).toSet
}
assertResult(Set(EName(XbrliNs, "instant"), EName(XbrliNs, "startDate"), EName(XbrliNs, "endDate"))) {
altInstanceElem.filterElems(withEName(None, "periodAspect")).flatMap(_.findAllChildElems.map(_.resolvedName)).toSet
}
assertResult(Map(EName(FrmNs, "ReportingScenarioAxis") -> EName(FrmNs, "ReportingScenariosAllMember"))) {
val dims = altInstanceElem.filterElems(withEName(None, "dimensionAspect"))
dims.map(e => (EName.parse(e.attribute(EName("dimension"))) -> EName.parse(e.text))).toMap filter { kv =>
kv._1 == EName(FrmNs, "ReportingScenarioAxis") && kv._2 == EName(FrmNs, "ReportingScenariosAllMember")
}
}
}
/**
* Simulating an assertion, using the aspect querying functions. The simulation helps understand the assertion.
*
* The assertion is <code>$v:VARIABLE_BalanceStart + $v:VARIABLE_Change = $v:VARIABLE_BalanceEnd</code> (the first occurrence).
*
* See http://www.xbrlsite.com/DigitalFinancialReporting/ComprehensiveExample/2011-07-15/gaap-formula.xml.
*/
test("testSimulatedAssertion1") {
val topLevelFacts =
idocElem.filterChildElems(e =>
!Set(XbrliNs, LinkNs).contains(e.resolvedName.namespaceUriOption.getOrElse("")))
val facts = topLevelFacts.flatMap(_.findAllElemsOrSelf)
val balanceFacts =
facts.filter(withEName(GaapNs, "CashAndCashEquivalentsPerCashFlowStatement"))
val changeFacts =
facts.filter(withEName(GaapNs, "CashFlowNet"))
// Implicit filtering, to filter the cartesian product of 3 fact value spaces
def mustBeEvaluated(
balanceStartFact: indexed.Elem,
changeFact: indexed.Elem,
balanceEndFact: indexed.Elem): Boolean = {
// Compare on so-called uncovered aspects, so all ones except concept and period
val currFacts = List(balanceStartFact, changeFact, balanceEndFact)
val dimensions = currFacts.flatMap(e => explicitDimensionAspects(e).keySet).toSet
currFacts.map(e => locationAspect(e)).distinct.size == 1 &&
currFacts.map(e => entityIdentifierAspectOption(e)).distinct.size == 1 &&
dimensions.forall(dim =>
currFacts.map(e => explicitDimensionAspectOption(e, dim)).toSet.size == 1) &&
currFacts.map(e => unitAspectOption(e)).distinct.size == 1 && {
// Instant-duration
// The comparison is naive, but still verbose
import LocalDate.parse
val balanceStartInstantOption =
periodAspectOption(balanceStartFact).flatMap(
_.findElem(withEName(XbrliNs, "instant"))).map(e => parse(e.text))
val balanceEndInstantOption =
periodAspectOption(balanceEndFact).flatMap(
_.findElem(withEName(XbrliNs, "instant"))).map(e => parse(e.text))
val changeStartOption =
periodAspectOption(changeFact).flatMap(
_.findElem(withEName(XbrliNs, "startDate"))).map(e => parse(e.text))
val changeEndOption =
periodAspectOption(changeFact).flatMap(
_.findElem(withEName(XbrliNs, "endDate"))).map(e => parse(e.text))
balanceStartInstantOption.isDefined && balanceEndInstantOption.isDefined &&
changeStartOption.isDefined && changeEndOption.isDefined && {
val balanceStart = balanceStartInstantOption.get
val balanceEnd = balanceEndInstantOption.get
val changeStart = changeStartOption.get
val changeEnd = changeEndOption.get
(balanceStart == changeStart || balanceStart.plusDays(1) == changeStart) &&
(balanceEnd == changeEnd)
}
}
}
// The assertion test itself
def performAssertionTest(
balanceStartFact: indexed.Elem,
changeFact: indexed.Elem,
balanceEndFact: indexed.Elem): Blog2XbrlTest.EvaluationResult = {
// Here we recognize the XPath expression shown earlier
val result =
balanceStartFact.text.toInt + changeFact.text.toInt == balanceEndFact.text.toInt
Blog2XbrlTest.EvaluationResult(
Map(
"startBalance" -> balanceStartFact,
"change" -> changeFact,
"endBalance" -> balanceEndFact), result)
}
// Executing the assertion
val evalResults =
for {
startBalance <- balanceFacts
change <- changeFacts
endBalance <- balanceFacts
if mustBeEvaluated(startBalance, change, endBalance)
} yield {
performAssertionTest(startBalance, change, endBalance)
}
assertResult(2) {
evalResults.size
}
assertResult(true) {
evalResults.forall(_.result)
}
assertResult(Set(
Map("startBalance" -> 1000, "change" -> -1000, "endBalance" -> 0),
Map("startBalance" -> -3000, "change" -> 4000, "endBalance" -> 1000))) {
// Method mapValues deprecated since Scala 2.13.0.
evalResults.map(_.facts.map { case (k, facts) => k -> facts.text.toInt }.toMap).toSet
}
}
private val docParser = DocumentParserUsingSax.newInstance()
private val idoc = indexed.Document(docParser.parse(sampleXbrlInstanceFile))
private val idocElem = idoc.documentElement
// Method mapValues deprecated since Scala 2.13.0.
val contextsById: Map[String, indexed.Elem] =
idocElem.filterChildElems(withEName(XbrliNs, "context"))
.groupBy(_.attribute(EName("id")))
.map { case (id, ctxs) => id -> ctxs.head }
.toMap
// Method mapValues deprecated since Scala 2.13.0.
val unitsById: Map[String, indexed.Elem] =
idocElem.filterChildElems(withEName(XbrliNs, "unit"))
.groupBy(_.attribute(EName("id")))
.map { case (id, uns) => id -> uns.head }
.toMap
// See http://www.xbrl.org/Specification/variables/REC-2009-06-22/.
def conceptAspect(fact: indexed.Elem): EName = fact.resolvedName
// Yaidom Paths wijzen een element binnen een element tree aan.
def locationAspect(fact: indexed.Elem): Path =
fact.path.parentPathOption.getOrElse(Path.Empty)
def entityIdentifierAspectOption(fact: indexed.Elem): Option[(String, String)] = {
val contextOption =
fact.attributeOption(EName("contextRef")).map(id => contextsById(id))
val identifierOption =
contextOption.flatMap(_.findElem(withEName(XbrliNs, "identifier")))
val schemeOption =
identifierOption.flatMap(_.attributeOption(EName("scheme")))
val identifierValueOption =
identifierOption.map(_.text)
for {
scheme <- schemeOption
identifierValue <- identifierValueOption
} yield (scheme, identifierValue)
}
def periodAspectOption(fact: indexed.Elem): Option[simple.Elem] = {
val contextOption =
fact.attributeOption(EName("contextRef")).map(id => contextsById(id))
val periodOption =
contextOption.flatMap(_.findElem(withEName(XbrliNs, "period")))
periodOption.map(_.underlyingElem)
}
// Forgetting about complete segment, non-XDT segment, complete scenario and
// non-XDT scenario for now. Also ignoring typed dimensions.
def explicitDimensionAspects(fact: indexed.Elem): Map[EName, EName] = {
val contextOption =
fact.attributeOption(EName("contextRef")).map(id => contextsById(id))
val memberElems =
contextOption.toVector.flatMap(_.filterElems(withEName(XbrldiNs, "explicitMember")))
memberElems.map(e =>
(e.attributeAsResolvedQName(EName("dimension")) -> e.textAsResolvedQName)).toMap
}
// Convenience method
def explicitDimensionAspectOption(
fact: indexed.Elem,
dimension: EName): Option[EName] = {
// Method filterKeys deprecated since Scala 2.13.0.
explicitDimensionAspects(fact).filter { case (dim, mem) => Set(dimension).contains(dim) }.headOption.map(_._2)
}
def unitAspectOption(fact: indexed.Elem): Option[simple.Elem] = {
val unitOption =
fact.attributeOption(EName("unitRef")).map(id => unitsById(id))
unitOption.map(_.underlyingElem)
}
// Compare aspects, naively, and without knowledge about XML Schema types.
// Period aspect comparisons are more tricky than this naive implementation suggests.
// Use equality on the results of the functions below for (period and unit) aspect
// comparisons.
def comparablePeriodAspectOption(fact: indexed.Elem): Option[Set[resolved.Elem]] = {
periodAspectOption(fact).map(e =>
e.findAllChildElems.map(che => resolved.Elem.from(che).removeAllInterElementWhitespace).toSet)
}
def comparableUnitAspectOption(fact: indexed.Elem): Option[Set[resolved.Elem]] = {
unitAspectOption(fact).map(e =>
e.findAllChildElems.map(che => resolved.Elem.from(che).removeAllInterElementWhitespace).toSet)
}
// Create alternative XBRL instance representation
def makeAlternativeInstance(indexedDoc: indexed.Document): simple.Elem = {
import simple.Node._
val sc = Scope.Empty
val topLevelFacts =
indexedDoc.documentElement.filterChildElems(e => !Set(XbrliNs, LinkNs).contains(e.resolvedName.namespaceUriOption.getOrElse("")))
val facts = topLevelFacts.flatMap(_.findAllElemsOrSelf)
val altInstanceFacts =
facts map { fact =>
val aspectsElem =
emptyElem(QName("aspects"), sc).
plusChild(textElem(QName("conceptAspect"), sc, conceptAspect(fact).toString)).
plusChild(textElem(QName("locationAspect"), sc, locationAspect(fact).toResolvedCanonicalXPath)).
plusChildOption(entityIdentifierAspectOption(fact).map(kv => textElem(QName("entityIdentifierAspect"), Vector(QName("scheme") -> kv._1), sc, kv._2.toString))).
plusChildOption(periodAspectOption(fact).map(p => elem(QName("periodAspect"), sc, p.findAllChildElems))).
plusChildren(explicitDimensionAspects(fact).toVector.map(kv => textElem(QName("dimensionAspect"), Vector(QName("dimension") -> kv._1.toString), sc, kv._2.toString))).
plusChildOption(unitAspectOption(fact).map(u => elem(QName("unitAspect"), sc, u.findAllChildElems)))
val factElem =
emptyElem(QName("fact"), sc).plusChild(aspectsElem).plusChild(textElem(QName("factValue"), sc, fact.text))
factElem
}
val altInstanceElem =
elem(QName("xbrlInstance"), sc, altInstanceFacts)
NamespaceUtils.pushUpPrefixedNamespaces(altInstanceElem)
}
}
object Blog2XbrlTest {
final case class EvaluationResult(val facts: Map[String, indexed.Elem], val result: Boolean) {
override def toString: String = {
// Method mapValues deprecated since Scala 2.13.0.
s"EvaluationResult(result: $result, facts: ${facts.map { case (k, fact) => k -> fact.underlyingElem }})"
}
}
}
|
dvreeze/yaidom
|
jvm/src/test/scala/eu/cdevreeze/yaidom/blogcode/Blog2XbrlTest.scala
|
Scala
|
apache-2.0
| 17,627 |
package org.eso.ias.transfer.test.utilitymodule
import org.eso.ias.tranfer.utlitymodule.StatusWord
/** Build the status word with the desired bits set
*
* @param bitsSet the positions of the bits to set (first bit is in position 0)
*/
class StatusWordBuilder(bitsSet: List[Int]) {
require(Option(bitsSet).isDefined)
require(
bitsSet.forall(bitPos => bitPos>=0 && bitPos<StatusWord.monitorPointNames.size),
"Invalid position of bits to set "+bitsSet.mkString(","))
// The string representing the status word with the bits set
val statusWordString: String = {
val strings =for {i <- 0 until StatusWord.monitorPointNames.size
bitVal = if (bitsSet.contains(i)) "1" else "0"
bitStr = StatusWord.bitsToNames(i)+":"+bitVal
} yield bitStr
strings.mkString(",")
}
/** The StatusWord decoding the string */
val statusWord = new StatusWord(statusWordString)
}
|
IntegratedAlarmSystem-Group/ias
|
TransferFunctions/src/test/scala/org/eso/ias/transfer/test/utilitymodule/StatusWordBuilder.scala
|
Scala
|
lgpl-3.0
| 907 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2007-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.swing
package event
object ActionEvent {
def unapply(a: ActionEvent): Option[Component] = Some(a.source)
}
class ActionEvent(override val source: Component) extends ComponentEvent
|
benhutchison/scala-swing
|
src/main/scala/scala/swing/event/ActionEvent.scala
|
Scala
|
bsd-3-clause
| 726 |
package com.example.restapi
import javax.ws.rs._
import javax.ws.rs.core._
import com.eptcomputing.neo4j.Neo4jServer
import com.example.models.Moo
/**
* Example of a resource which uses a JAXB model object for the underlying logic and
* defines its HTTP methods manually. It's only one of several possible ways of building
* a domain-specific REST API around Neo4j -- see the other resource types in this
* package for alternatives.
*/
@Path("/moo")
class MooResource {
/**
* <tt>POST /moo</tt> with a JSON document as body sets the most recently seen cow
* colour to the colour specified in that document, and returns a JSON document
* confirming the colour setting.
*/
@POST
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MediaType.APPLICATION_JSON))
def setMostRecentlySeenCow(cow: Moo) = {
Neo4jServer.exec { neo => cow.save(neo) }
cow
}
/**
* <tt>GET /moo</tt> returns a description of the most recently seen cow as a JSON
* document.
*/
@GET
@Produces(Array(MediaType.APPLICATION_JSON))
def getCow = Neo4jServer.exec { neo => new Moo(neo) }
}
|
ept/neo4j-scala-template
|
src/main/scala/com/example/restapi/MooResource.scala
|
Scala
|
mit
| 1,123 |
/**
* Copyright: Copyright (C) 2016, ATS Advanced Telematic Systems GmbH
* License: MPL-2.0
*/
package org.genivi.sota.marshalling
import scala.util.control.NoStackTrace
/**
* Sometimes validation (refinement) fails, see
* RefinedMarshallingSupport.scala.
*/
case class RefinementError[T]( o: T, msg: String) extends Exception(msg) with NoStackTrace
|
PDXostc/rvi_sota_server
|
common/src/main/scala/org/genivi/sota/marshalling/RefinementError.scala
|
Scala
|
mpl-2.0
| 362 |
package org.locationtech.geomesa.stream.datastore
import java.awt.RenderingHints
import java.util.concurrent.{CopyOnWriteArrayList, Executors, TimeUnit}
import java.util.logging.Level
import java.{util => ju}
import com.google.common.cache.{Cache, CacheBuilder, RemovalListener, RemovalNotification}
import com.google.common.collect.Lists
import com.typesafe.config.ConfigFactory
import com.vividsolutions.jts.geom.Envelope
import org.geotools.data.DataAccessFactory.Param
import org.geotools.data._
import org.geotools.data.store._
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.FidFilterImpl
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.referencing.crs.DefaultGeographicCRS
import org.locationtech.geomesa.stream.SimpleFeatureStreamSource
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.index.{QuadTreeFeatureStore, SynchronizedQuadtree}
import org.opengis.feature.`type`.Name
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.spatial.{BBOX, BinarySpatialOperator, Within}
import org.opengis.filter.{And, Filter, IncludeFilter, Or}
import scala.collection.JavaConversions._
case class FeatureHolder(sf: SimpleFeature, env: Envelope) {
override def hashCode(): Int = sf.hashCode()
override def equals(obj: scala.Any): Boolean = obj match {
case other: FeatureHolder => sf.equals(other.sf)
case _ => false
}
}
class StreamDataStore(source: SimpleFeatureStreamSource, timeout: Int) extends ContentDataStore {
val sft = source.sft
source.init()
val qt = new SynchronizedQuadtree
val cb =
CacheBuilder
.newBuilder()
.expireAfterWrite(timeout, TimeUnit.SECONDS)
.removalListener(
new RemovalListener[String, FeatureHolder] {
def onRemoval(removal: RemovalNotification[String, FeatureHolder]) = {
qt.remove(removal.getValue.env, removal.getValue.sf)
}
}
)
val features: Cache[String, FeatureHolder] = cb.build()
val listeners = new CopyOnWriteArrayList[StreamListener]()
private val executor = Executors.newSingleThreadExecutor()
executor.submit(
new Runnable {
override def run(): Unit = {
while(true) {
try {
val sf = source.next
if(sf != null) {
val env = sf.geometry.getEnvelopeInternal
qt.insert(env, sf)
features.put(sf.getID, FeatureHolder(sf, env))
listeners.foreach { l =>
try {
l.onNext(sf)
} catch {
case t: Throwable => getLogger.log(Level.WARNING, "Unable to notify listener", t)
}
}
}
} catch {
case t: Throwable =>
// swallow
}
}
}
}
)
override def createFeatureSource(entry: ContentEntry): ContentFeatureSource =
new StreamFeatureStore(entry, null, features, qt, sft)
def registerListener(listener: StreamListener): Unit = listeners.add(listener)
override def createTypeNames(): ju.List[Name] = Lists.newArrayList(sft.getName)
def close(): Unit = {
try {
executor.shutdown()
} catch {
case t: Throwable => // swallow
}
}
}
class StreamFeatureStore(entry: ContentEntry,
query: Query,
features: Cache[String, FeatureHolder],
val qt: SynchronizedQuadtree,
val sft: SimpleFeatureType)
extends ContentFeatureStore(entry, query) with QuadTreeFeatureStore {
override def canFilter: Boolean = true
override def getBoundsInternal(query: Query) =
ReferencedEnvelope.create(new Envelope(-180, 180, -90, 90), DefaultGeographicCRS.WGS84)
override def buildFeatureType(): SimpleFeatureType = sft
override def getCountInternal(query: Query): Int =
getReaderInternal(query).getIterator.size
override def getReaderInternal(query: Query): FR = getReaderForFilter(query.getFilter)
def getReaderForFilter(f: Filter): FR =
f match {
case o: Or => or(o)
case i: IncludeFilter => include(i)
case w: Within => within(w)
case b: BBOX => bbox(b)
case a: And => and(a)
case id: FidFilterImpl => fid(id)
case _ =>
new FilteringFeatureReader[SimpleFeatureType, SimpleFeature](include(Filter.INCLUDE), f)
}
def include(i: IncludeFilter) = new DFR(sft, new DFI(features.asMap().valuesIterator.map(_.sf)))
def fid(ids: FidFilterImpl): FR = {
val iter = ids.getIDs.flatMap(id => Option(features.getIfPresent(id.toString)).map(_.sf)).iterator
new DFR(sft, new DFI(iter))
}
private val ff = CommonFactoryFinder.getFilterFactory2
def and(a: And): FR = {
// assume just one spatialFilter for now, i.e. 'bbox() && attribute equals ??'
val (spatialFilter, others) = a.getChildren.partition(_.isInstanceOf[BinarySpatialOperator])
val restFilter = ff.and(others)
val filterIter = spatialFilter.headOption.map(getReaderForFilter).getOrElse(include(Filter.INCLUDE))
new FilteringFeatureReader[SimpleFeatureType, SimpleFeature](filterIter, restFilter)
}
def or(o: Or): FR = {
val readers = o.getChildren.map(getReaderForFilter).map(_.getIterator)
val composed = readers.foldLeft(Iterator[SimpleFeature]())(_ ++ _)
new DFR(sft, new DFI(composed))
}
override def getWriterInternal(query: Query, flags: Int) = throw new IllegalArgumentException("Not allowed")
}
object StreamDataStoreParams {
val STREAM_DATASTORE_CONFIG = new Param("geomesa.stream.datastore.config", classOf[String], "", true)
val CACHE_TIMEOUT = new Param("geomesa.stream.datastore.cache.timeout", classOf[java.lang.Integer], "", true, 10)
}
class StreamDataStoreFactory extends DataStoreFactorySpi {
import StreamDataStoreParams._
override def createDataStore(params: ju.Map[String, java.io.Serializable]): DataStore = {
val confString = STREAM_DATASTORE_CONFIG.lookUp(params).asInstanceOf[String]
val timeout = Option(CACHE_TIMEOUT.lookUp(params)).map(_.asInstanceOf[Int]).getOrElse(10)
val conf = ConfigFactory.parseString(confString)
val source = SimpleFeatureStreamSource.buildSource(conf)
new StreamDataStore(source, timeout)
}
override def createNewDataStore(params: ju.Map[String, java.io.Serializable]): DataStore = ???
override def getDescription: String = "SimpleFeature Stream Source"
override def getParametersInfo: Array[Param] = Array(STREAM_DATASTORE_CONFIG)
override def getDisplayName: String = "SimpleFeature Stream Source"
override def canProcess(params: ju.Map[String, java.io.Serializable]): Boolean =
params.containsKey(STREAM_DATASTORE_CONFIG.key)
override def isAvailable: Boolean = true
override def getImplementationHints: ju.Map[RenderingHints.Key, _] = null
}
trait StreamListener {
def onNext(sf: SimpleFeature): Unit
}
object StreamListener {
def apply(f: Filter, fn: SimpleFeature => Unit) =
new StreamListener {
override def onNext(sf: SimpleFeature): Unit = if(f.evaluate(sf)) fn(sf)
}
def apply(fn: SimpleFeature => Unit) =
new StreamListener {
override def onNext(sf: SimpleFeature): Unit = fn(sf)
}
}
|
jnh5y/geomesa
|
geomesa-stream/geomesa-stream-datastore/src/main/scala/org/locationtech/geomesa/stream/datastore/StreamDataStore.scala
|
Scala
|
apache-2.0
| 7,368 |
package one.lockstep.monolock.client.pipeline
import java.io.IOException
import java.security.cert.X509Certificate
import java.util.concurrent.TimeUnit
import one.lockstep.lock.client.transport.TransportProvider
import one.lockstep.monolock.protocol._
import one.lockstep.util._
import one.lockstep.util.codec._
import one.lockstep.util.crypto.PublicKey
import scala.concurrent._
import scala.util.{Failure, Success}
import one.lockstep.util.protocol._
import scala.concurrent.duration.Duration
class Handshake(transport: TransportProvider, rootCertOpt: Option[X509Certificate] = None)
(implicit ec: ExecutionContext) extends Logging {
private[this] var currentHandshake: Future[PublicKey] = sendHandshake()
def join(): Future[PublicKey] = synchronized {
if (currentHandshake.value.exists(_.isFailure))
currentHandshake = sendHandshake()
currentHandshake
}
def sendHandshake(): Future[PublicKey] = {
logger.debug("sending handshake")
val serialized = Protocol.encode(HandshakeRequest())
transport.sendReceive("handshake", serialized).map { response =>
val certificates = Protocol.decode[HandshakeResponse](response).certificates
validate(certificates)
PublicKey(certificates.last.getPublicKey.getEncoded)
} andThen {
case Success(publicKey) =>
logger.debug(s"handshake completed successfully; public key: ${show(publicKey.raw)}")
case Failure(cause: IOException) if cause.getMessage.endsWith("Canceled") =>
logger.debug("handshake cancelled")
case Failure(cause) =>
logger.error("handshake failed: " + cause)
logger.debug("handshake failure details", cause)
}
}
private def validate(certificates: Seq[X509Certificate]) = {
// if rootCertOpt is not defined then we go trustless
logger.debug(s"handshake received certificate chain of size ${certificates.size}")
require(certificates.nonEmpty, "certificate chain must be non-empty")
require(certificates.size <= 2, "certificate chain must not be longer than 2")
cert.verify(rootCertOpt.toSeq ++ certificates)
val last = certificates.last
val validityDuration = Duration(last.getNotAfter.getTime - last.getNotBefore.getTime, TimeUnit.MILLISECONDS)
require(validityDuration.toDays <= 2, "certificate validitiy duration must be at most 2 days")
// todo validate specific attributes of the incoming certificates
}
}
|
lockstep-one/vault
|
vault-client/src/main/scala/one/lockstep/monolock/client/pipeline/Handshake.scala
|
Scala
|
agpl-3.0
| 2,437 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.language.implicitConversions
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.execution.aggregate.TypedAggregateExpression
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.types._
private[sql] object Column {
def apply(colName: String): Column = new Column(colName)
def apply(expr: Expression): Column = new Column(expr)
def unapply(col: Column): Option[Expression] = Some(col.expr)
private[sql] def generateAlias(e: Expression): String = {
e match {
case a: AggregateExpression if a.aggregateFunction.isInstanceOf[TypedAggregateExpression] =>
a.aggregateFunction.toString
case expr => toPrettySQL(expr)
}
}
}
/**
* A [[Column]] where an [[Encoder]] has been given for the expected input and return type.
* To create a [[TypedColumn]], use the `as` function on a [[Column]].
*
* @tparam T The input type expected for this expression. Can be `Any` if the expression is type
* checked by the analyzer instead of the compiler (i.e. `expr("sum(...)")`).
* @tparam U The output type of this column.
*
* @since 1.6.0
*/
@InterfaceStability.Stable
class TypedColumn[-T, U](
expr: Expression,
private[sql] val encoder: ExpressionEncoder[U])
extends Column(expr) {
/**
* Inserts the specific input type and schema into any expressions that are expected to operate
* on a decoded object.
*/
private[sql] def withInputType(
inputEncoder: ExpressionEncoder[_],
inputAttributes: Seq[Attribute]): TypedColumn[T, U] = {
val unresolvedDeserializer = UnresolvedDeserializer(inputEncoder.deserializer, inputAttributes)
val newExpr = expr transform {
case ta: TypedAggregateExpression if ta.inputDeserializer.isEmpty =>
ta.withInputInfo(
deser = unresolvedDeserializer,
cls = inputEncoder.clsTag.runtimeClass,
schema = inputEncoder.schema)
}
new TypedColumn[T, U](newExpr, encoder)
}
/**
* Gives the [[TypedColumn]] a name (alias).
* If the current `TypedColumn` has metadata associated with it, this metadata will be propagated
* to the new column.
*
* @group expr_ops
* @since 2.0.0
*/
override def name(alias: String): TypedColumn[T, U] =
new TypedColumn[T, U](super.name(alias).expr, encoder)
}
/**
* A column that will be computed based on the data in a `DataFrame`.
*
* A new column can be constructed based on the input columns present in a DataFrame:
*
* {{{
* df("columnName") // On a specific `df` DataFrame.
* col("columnName") // A generic column no yet associated with a DataFrame.
* col("columnName.field") // Extracting a struct field
* col("`a.column.with.dots`") // Escape `.` in column names.
* $"columnName" // Scala short hand for a named column.
* }}}
*
* [[Column]] objects can be composed to form complex expressions:
*
* {{{
* $"a" + 1
* $"a" === $"b"
* }}}
*
* @note The internal Catalyst expression can be accessed via [[expr]], but this method is for
* debugging purposes only and can change in any future Spark releases.
*
* @groupname java_expr_ops Java-specific expression operators
* @groupname expr_ops Expression operators
* @groupname df_ops DataFrame functions
* @groupname Ungrouped Support functions for DataFrames
*
* @since 1.3.0
*/
@InterfaceStability.Stable
class Column(val expr: Expression) extends Logging {
def this(name: String) = this(name match {
case "*" => UnresolvedStar(None)
case _ if name.endsWith(".*") =>
val parts = UnresolvedAttribute.parseAttributeName(name.substring(0, name.length - 2))
UnresolvedStar(Some(parts))
case _ => UnresolvedAttribute.quotedString(name)
})
override def toString: String = toPrettySQL(expr)
override def equals(that: Any): Boolean = that match {
case that: Column => that.expr.equals(this.expr)
case _ => false
}
override def hashCode: Int = this.expr.hashCode()
/** Creates a column based on the given expression. */
private def withExpr(newExpr: Expression): Column = new Column(newExpr)
/**
* Returns the expression for this column either with an existing or auto assigned name.
*/
private[sql] def named: NamedExpression = expr match {
// Wrap UnresolvedAttribute with UnresolvedAlias, as when we resolve UnresolvedAttribute, we
// will remove intermediate Alias for ExtractValue chain, and we need to alias it again to
// make it a NamedExpression.
case u: UnresolvedAttribute => UnresolvedAlias(u)
case u: UnresolvedExtractValue => UnresolvedAlias(u)
case expr: NamedExpression => expr
// Leave an unaliased generator with an empty list of names since the analyzer will generate
// the correct defaults after the nested expression's type has been resolved.
case g: Generator => MultiAlias(g, Nil)
case func: UnresolvedFunction => UnresolvedAlias(func, Some(Column.generateAlias))
// If we have a top level Cast, there is a chance to give it a better alias, if there is a
// NamedExpression under this Cast.
case c: Cast =>
c.transformUp {
case c @ Cast(_: NamedExpression, _, _) => UnresolvedAlias(c)
} match {
case ne: NamedExpression => ne
case _ => Alias(expr, toPrettySQL(expr))()
}
case a: AggregateExpression if a.aggregateFunction.isInstanceOf[TypedAggregateExpression] =>
UnresolvedAlias(a, Some(Column.generateAlias))
// Wait until the struct is resolved. This will generate a nicer looking alias.
case struct: CreateNamedStructLike => UnresolvedAlias(struct)
case expr: Expression => Alias(expr, toPrettySQL(expr))()
}
/**
* Provides a type hint about the expected return value of this column. This information can
* be used by operations such as `select` on a [[Dataset]] to automatically convert the
* results into the correct JVM types.
* @since 1.6.0
*/
def as[U : Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
/**
* Extracts a value or values from a complex type.
* The following types of extraction are supported:
*
* - Given an Array, an integer ordinal can be used to retrieve a single value.
* - Given a Map, a key of the correct type can be used to retrieve an individual value.
* - Given a Struct, a string fieldName can be used to extract that field.
* - Given an Array of Structs, a string fieldName can be used to extract filed
* of every struct in that array, and return an Array of fields
*
* @group expr_ops
* @since 1.4.0
*/
def apply(extraction: Any): Column = withExpr {
UnresolvedExtractValue(expr, lit(extraction).expr)
}
/**
* Unary minus, i.e. negate the expression.
* {{{
* // Scala: select the amount column and negates all values.
* df.select( -df("amount") )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.select( negate(col("amount") );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def unary_- : Column = withExpr { UnaryMinus(expr) }
/**
* Inversion of boolean expression, i.e. NOT.
* {{{
* // Scala: select rows that are not active (isActive === false)
* df.filter( !df("isActive") )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( not(df.col("isActive")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def unary_! : Column = withExpr { Not(expr) }
/**
* Equality test.
* {{{
* // Scala:
* df.filter( df("colA") === df("colB") )
*
* // Java
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").equalTo(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def === (other: Any): Column = withExpr {
val right = lit(other).expr
if (this.expr == right) {
logWarning(
s"Constructing trivially true equals predicate, '${this.expr} = $right'. " +
"Perhaps you need to use aliases.")
}
EqualTo(expr, right)
}
/**
* Equality test.
* {{{
* // Scala:
* df.filter( df("colA") === df("colB") )
*
* // Java
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").equalTo(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def equalTo(other: Any): Column = this === other
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") =!= df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group expr_ops
* @since 2.0.0
*/
def =!= (other: Any): Column = withExpr{ Not(EqualTo(expr, lit(other).expr)) }
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") !== df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
@deprecated("!== does not have the same precedence as ===, use =!= instead", "2.0.0")
def !== (other: Any): Column = this =!= other
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") !== df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def notEqual(other: Any): Column = withExpr { Not(EqualTo(expr, lit(other).expr)) }
/**
* Greater than.
* {{{
* // Scala: The following selects people older than 21.
* people.select( people("age") > 21 )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* people.select( people("age").gt(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def > (other: Any): Column = withExpr { GreaterThan(expr, lit(other).expr) }
/**
* Greater than.
* {{{
* // Scala: The following selects people older than 21.
* people.select( people("age") > lit(21) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* people.select( people("age").gt(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def gt(other: Any): Column = this > other
/**
* Less than.
* {{{
* // Scala: The following selects people younger than 21.
* people.select( people("age") < 21 )
*
* // Java:
* people.select( people("age").lt(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def < (other: Any): Column = withExpr { LessThan(expr, lit(other).expr) }
/**
* Less than.
* {{{
* // Scala: The following selects people younger than 21.
* people.select( people("age") < 21 )
*
* // Java:
* people.select( people("age").lt(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def lt(other: Any): Column = this < other
/**
* Less than or equal to.
* {{{
* // Scala: The following selects people age 21 or younger than 21.
* people.select( people("age") <= 21 )
*
* // Java:
* people.select( people("age").leq(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def <= (other: Any): Column = withExpr { LessThanOrEqual(expr, lit(other).expr) }
/**
* Less than or equal to.
* {{{
* // Scala: The following selects people age 21 or younger than 21.
* people.select( people("age") <= 21 )
*
* // Java:
* people.select( people("age").leq(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def leq(other: Any): Column = this <= other
/**
* Greater than or equal to an expression.
* {{{
* // Scala: The following selects people age 21 or older than 21.
* people.select( people("age") >= 21 )
*
* // Java:
* people.select( people("age").geq(21) )
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def >= (other: Any): Column = withExpr { GreaterThanOrEqual(expr, lit(other).expr) }
/**
* Greater than or equal to an expression.
* {{{
* // Scala: The following selects people age 21 or older than 21.
* people.select( people("age") >= 21 )
*
* // Java:
* people.select( people("age").geq(21) )
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def geq(other: Any): Column = this >= other
/**
* Equality test that is safe for null values.
*
* @group expr_ops
* @since 1.3.0
*/
def <=> (other: Any): Column = withExpr {
val right = lit(other).expr
if (this.expr == right) {
logWarning(
s"Constructing trivially true equals predicate, '${this.expr} <=> $right'. " +
"Perhaps you need to use aliases.")
}
EqualNullSafe(expr, right)
}
/**
* Equality test that is safe for null values.
*
* @group java_expr_ops
* @since 1.3.0
*/
def eqNullSafe(other: Any): Column = this <=> other
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def when(condition: Column, value: Any): Column = this.expr match {
case CaseWhen(branches, None) =>
withExpr { CaseWhen(branches :+ ((condition.expr, lit(value).expr))) }
case CaseWhen(branches, Some(_)) =>
throw new IllegalArgumentException(
"when() cannot be applied once otherwise() is applied")
case _ =>
throw new IllegalArgumentException(
"when() can only be applied on a Column previously generated by when() function")
}
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def otherwise(value: Any): Column = this.expr match {
case CaseWhen(branches, None) =>
withExpr { CaseWhen(branches, Option(lit(value).expr)) }
case CaseWhen(branches, Some(_)) =>
throw new IllegalArgumentException(
"otherwise() can only be applied once on a Column previously generated by when()")
case _ =>
throw new IllegalArgumentException(
"otherwise() can only be applied on a Column previously generated by when()")
}
/**
* True if the current column is between the lower bound and upper bound, inclusive.
*
* @group java_expr_ops
* @since 1.4.0
*/
def between(lowerBound: Any, upperBound: Any): Column = {
(this >= lowerBound) && (this <= upperBound)
}
/**
* True if the current expression is NaN.
*
* @group expr_ops
* @since 1.5.0
*/
def isNaN: Column = withExpr { IsNaN(expr) }
/**
* True if the current expression is null.
*
* @group expr_ops
* @since 1.3.0
*/
def isNull: Column = withExpr { IsNull(expr) }
/**
* True if the current expression is NOT null.
*
* @group expr_ops
* @since 1.3.0
*/
def isNotNull: Column = withExpr { IsNotNull(expr) }
/**
* Boolean OR.
* {{{
* // Scala: The following selects people that are in school or employed.
* people.filter( people("inSchool") || people("isEmployed") )
*
* // Java:
* people.filter( people("inSchool").or(people("isEmployed")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def || (other: Any): Column = withExpr { Or(expr, lit(other).expr) }
/**
* Boolean OR.
* {{{
* // Scala: The following selects people that are in school or employed.
* people.filter( people("inSchool") || people("isEmployed") )
*
* // Java:
* people.filter( people("inSchool").or(people("isEmployed")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def or(other: Column): Column = this || other
/**
* Boolean AND.
* {{{
* // Scala: The following selects people that are in school and employed at the same time.
* people.select( people("inSchool") && people("isEmployed") )
*
* // Java:
* people.select( people("inSchool").and(people("isEmployed")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def && (other: Any): Column = withExpr { And(expr, lit(other).expr) }
/**
* Boolean AND.
* {{{
* // Scala: The following selects people that are in school and employed at the same time.
* people.select( people("inSchool") && people("isEmployed") )
*
* // Java:
* people.select( people("inSchool").and(people("isEmployed")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def and(other: Column): Column = this && other
/**
* Sum of this expression and another expression.
* {{{
* // Scala: The following selects the sum of a person's height and weight.
* people.select( people("height") + people("weight") )
*
* // Java:
* people.select( people("height").plus(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def + (other: Any): Column = withExpr { Add(expr, lit(other).expr) }
/**
* Sum of this expression and another expression.
* {{{
* // Scala: The following selects the sum of a person's height and weight.
* people.select( people("height") + people("weight") )
*
* // Java:
* people.select( people("height").plus(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def plus(other: Any): Column = this + other
/**
* Subtraction. Subtract the other expression from this expression.
* {{{
* // Scala: The following selects the difference between people's height and their weight.
* people.select( people("height") - people("weight") )
*
* // Java:
* people.select( people("height").minus(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def - (other: Any): Column = withExpr { Subtract(expr, lit(other).expr) }
/**
* Subtraction. Subtract the other expression from this expression.
* {{{
* // Scala: The following selects the difference between people's height and their weight.
* people.select( people("height") - people("weight") )
*
* // Java:
* people.select( people("height").minus(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def minus(other: Any): Column = this - other
/**
* Multiplication of this expression and another expression.
* {{{
* // Scala: The following multiplies a person's height by their weight.
* people.select( people("height") * people("weight") )
*
* // Java:
* people.select( people("height").multiply(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def * (other: Any): Column = withExpr { Multiply(expr, lit(other).expr) }
/**
* Multiplication of this expression and another expression.
* {{{
* // Scala: The following multiplies a person's height by their weight.
* people.select( people("height") * people("weight") )
*
* // Java:
* people.select( people("height").multiply(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def multiply(other: Any): Column = this * other
/**
* Division this expression by another expression.
* {{{
* // Scala: The following divides a person's height by their weight.
* people.select( people("height") / people("weight") )
*
* // Java:
* people.select( people("height").divide(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def / (other: Any): Column = withExpr { Divide(expr, lit(other).expr) }
/**
* Division this expression by another expression.
* {{{
* // Scala: The following divides a person's height by their weight.
* people.select( people("height") / people("weight") )
*
* // Java:
* people.select( people("height").divide(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def divide(other: Any): Column = this / other
/**
* Modulo (a.k.a. remainder) expression.
*
* @group expr_ops
* @since 1.3.0
*/
def % (other: Any): Column = withExpr { Remainder(expr, lit(other).expr) }
/**
* Modulo (a.k.a. remainder) expression.
*
* @group java_expr_ops
* @since 1.3.0
*/
def mod(other: Any): Column = this % other
/**
* A boolean expression that is evaluated to true if the value of this expression is contained
* by the evaluated values of the arguments.
*
* @group expr_ops
* @since 1.5.0
*/
@scala.annotation.varargs
def isin(list: Any*): Column = withExpr { In(expr, list.map(lit(_).expr)) }
/**
* SQL like expression. Returns a boolean column based on a SQL LIKE match.
*
* @group expr_ops
* @since 1.3.0
*/
def like(literal: String): Column = withExpr { Like(expr, lit(literal).expr) }
/**
* SQL RLIKE expression (LIKE with Regex). Returns a boolean column based on a regex
* match.
*
* @group expr_ops
* @since 1.3.0
*/
def rlike(literal: String): Column = withExpr { RLike(expr, lit(literal).expr) }
/**
* An expression that gets an item at position `ordinal` out of an array,
* or gets a value by key `key` in a `MapType`.
*
* @group expr_ops
* @since 1.3.0
*/
def getItem(key: Any): Column = withExpr { UnresolvedExtractValue(expr, Literal(key)) }
/**
* An expression that gets a field by name in a `StructType`.
*
* @group expr_ops
* @since 1.3.0
*/
def getField(fieldName: String): Column = withExpr {
UnresolvedExtractValue(expr, Literal(fieldName))
}
/**
* An expression that returns a substring.
* @param startPos expression for the starting position.
* @param len expression for the length of the substring.
*
* @group expr_ops
* @since 1.3.0
*/
def substr(startPos: Column, len: Column): Column = withExpr {
Substring(expr, startPos.expr, len.expr)
}
/**
* An expression that returns a substring.
* @param startPos starting position.
* @param len length of the substring.
*
* @group expr_ops
* @since 1.3.0
*/
def substr(startPos: Int, len: Int): Column = withExpr {
Substring(expr, lit(startPos).expr, lit(len).expr)
}
/**
* Contains the other element. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def contains(other: Any): Column = withExpr { Contains(expr, lit(other).expr) }
/**
* String starts with. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def startsWith(other: Column): Column = withExpr { StartsWith(expr, lit(other).expr) }
/**
* String starts with another string literal. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def startsWith(literal: String): Column = this.startsWith(lit(literal))
/**
* String ends with. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def endsWith(other: Column): Column = withExpr { EndsWith(expr, lit(other).expr) }
/**
* String ends with another string literal. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def endsWith(literal: String): Column = this.endsWith(lit(literal))
/**
* Gives the column an alias. Same as `as`.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".alias("colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def alias(alias: String): Column = name(alias)
/**
* Gives the column an alias.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".as("colB"))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use `as` with explicitly empty metadata.
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: String): Column = name(alias)
/**
* (Scala-specific) Assigns the given aliases to the results of a table generating function.
* {{{
* // Renames colA to colB in select output.
* df.select(explode($"myMap").as("key" :: "value" :: Nil))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def as(aliases: Seq[String]): Column = withExpr { MultiAlias(expr, aliases) }
/**
* Assigns the given aliases to the results of a table generating function.
* {{{
* // Renames colA to colB in select output.
* df.select(explode($"myMap").as("key" :: "value" :: Nil))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def as(aliases: Array[String]): Column = withExpr { MultiAlias(expr, aliases) }
/**
* Gives the column an alias.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".as('colB))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use `as` with explicitly empty metadata.
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: Symbol): Column = name(alias.name)
/**
* Gives the column an alias with metadata.
* {{{
* val metadata: Metadata = ...
* df.select($"colA".as("colB", metadata))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: String, metadata: Metadata): Column = withExpr {
Alias(expr, alias)(explicitMetadata = Some(metadata))
}
/**
* Gives the column a name (alias).
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".name("colB"))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use `as` with explicitly empty metadata.
*
* @group expr_ops
* @since 2.0.0
*/
def name(alias: String): Column = withExpr {
expr match {
case ne: NamedExpression => Alias(expr, alias)(explicitMetadata = Some(ne.metadata))
case other => Alias(other, alias)()
}
}
/**
* Casts the column to a different data type.
* {{{
* // Casts colA to IntegerType.
* import org.apache.spark.sql.types.IntegerType
* df.select(df("colA").cast(IntegerType))
*
* // equivalent to
* df.select(df("colA").cast("int"))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def cast(to: DataType): Column = withExpr { Cast(expr, to) }
/**
* Casts the column to a different data type, using the canonical string representation
* of the type. The supported types are: `string`, `boolean`, `byte`, `short`, `int`, `long`,
* `float`, `double`, `decimal`, `date`, `timestamp`.
* {{{
* // Casts colA to integer.
* df.select(df("colA").cast("int"))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def cast(to: String): Column = cast(CatalystSqlParser.parseDataType(to))
/**
* Returns a sort expression based on the descending order of the column.
* {{{
* // Scala
* df.sort(df("age").desc)
*
* // Java
* df.sort(df.col("age").desc());
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def desc: Column = withExpr { SortOrder(expr, Descending) }
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear before non-null values.
* {{{
* // Scala: sort a DataFrame by age column in descending order and null values appearing first.
* df.sort(df("age").desc_nulls_first)
*
* // Java
* df.sort(df.col("age").desc_nulls_first());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def desc_nulls_first: Column = withExpr { SortOrder(expr, Descending, NullsFirst, Set.empty) }
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear after non-null values.
* {{{
* // Scala: sort a DataFrame by age column in descending order and null values appearing last.
* df.sort(df("age").desc_nulls_last)
*
* // Java
* df.sort(df.col("age").desc_nulls_last());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def desc_nulls_last: Column = withExpr { SortOrder(expr, Descending, NullsLast, Set.empty) }
/**
* Returns a sort expression based on ascending order of the column.
* {{{
* // Scala: sort a DataFrame by age column in ascending order.
* df.sort(df("age").asc)
*
* // Java
* df.sort(df.col("age").asc());
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def asc: Column = withExpr { SortOrder(expr, Ascending) }
/**
* Returns a sort expression based on ascending order of the column,
* and null values return before non-null values.
* {{{
* // Scala: sort a DataFrame by age column in ascending order and null values appearing first.
* df.sort(df("age").asc_nulls_first)
*
* // Java
* df.sort(df.col("age").asc_nulls_first());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def asc_nulls_first: Column = withExpr { SortOrder(expr, Ascending, NullsFirst, Set.empty) }
/**
* Returns a sort expression based on ascending order of the column,
* and null values appear after non-null values.
* {{{
* // Scala: sort a DataFrame by age column in ascending order and null values appearing last.
* df.sort(df("age").asc_nulls_last)
*
* // Java
* df.sort(df.col("age").asc_nulls_last());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def asc_nulls_last: Column = withExpr { SortOrder(expr, Ascending, NullsLast, Set.empty) }
/**
* Prints the expression to the console for debugging purposes.
*
* @group df_ops
* @since 1.3.0
*/
def explain(extended: Boolean): Unit = {
// scalastyle:off println
if (extended) {
println(expr)
} else {
println(expr.sql)
}
// scalastyle:on println
}
/**
* Compute bitwise OR of this expression with another expression.
* {{{
* df.select($"colA".bitwiseOR($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseOR(other: Any): Column = withExpr { BitwiseOr(expr, lit(other).expr) }
/**
* Compute bitwise AND of this expression with another expression.
* {{{
* df.select($"colA".bitwiseAND($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseAND(other: Any): Column = withExpr { BitwiseAnd(expr, lit(other).expr) }
/**
* Compute bitwise XOR of this expression with another expression.
* {{{
* df.select($"colA".bitwiseXOR($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseXOR(other: Any): Column = withExpr { BitwiseXor(expr, lit(other).expr) }
/**
* Defines a windowing column.
*
* {{{
* val w = Window.partitionBy("name").orderBy("id")
* df.select(
* sum("price").over(w.rangeBetween(Window.unboundedPreceding, 2)),
* avg("price").over(w.rowsBetween(Window.currentRow, 4))
* )
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def over(window: expressions.WindowSpec): Column = window.withAggregate(this)
/**
* Defines an empty analytic clause. In this case the analytic function is applied
* and presented for all rows in the result set.
*
* {{{
* df.select(
* sum("price").over(),
* avg("price").over()
* )
* }}}
*
* @group expr_ops
* @since 2.0.0
*/
def over(): Column = over(Window.spec)
}
/**
* A convenient class used for constructing schema.
*
* @since 1.3.0
*/
@InterfaceStability.Stable
class ColumnName(name: String) extends Column(name) {
/**
* Creates a new `StructField` of type boolean.
* @since 1.3.0
*/
def boolean: StructField = StructField(name, BooleanType)
/**
* Creates a new `StructField` of type byte.
* @since 1.3.0
*/
def byte: StructField = StructField(name, ByteType)
/**
* Creates a new `StructField` of type short.
* @since 1.3.0
*/
def short: StructField = StructField(name, ShortType)
/**
* Creates a new `StructField` of type int.
* @since 1.3.0
*/
def int: StructField = StructField(name, IntegerType)
/**
* Creates a new `StructField` of type long.
* @since 1.3.0
*/
def long: StructField = StructField(name, LongType)
/**
* Creates a new `StructField` of type float.
* @since 1.3.0
*/
def float: StructField = StructField(name, FloatType)
/**
* Creates a new `StructField` of type double.
* @since 1.3.0
*/
def double: StructField = StructField(name, DoubleType)
/**
* Creates a new `StructField` of type string.
* @since 1.3.0
*/
def string: StructField = StructField(name, StringType)
/**
* Creates a new `StructField` of type date.
* @since 1.3.0
*/
def date: StructField = StructField(name, DateType)
/**
* Creates a new `StructField` of type decimal.
* @since 1.3.0
*/
def decimal: StructField = StructField(name, DecimalType.USER_DEFAULT)
/**
* Creates a new `StructField` of type decimal.
* @since 1.3.0
*/
def decimal(precision: Int, scale: Int): StructField =
StructField(name, DecimalType(precision, scale))
/**
* Creates a new `StructField` of type timestamp.
* @since 1.3.0
*/
def timestamp: StructField = StructField(name, TimestampType)
/**
* Creates a new `StructField` of type binary.
* @since 1.3.0
*/
def binary: StructField = StructField(name, BinaryType)
/**
* Creates a new `StructField` of type array.
* @since 1.3.0
*/
def array(dataType: DataType): StructField = StructField(name, ArrayType(dataType))
/**
* Creates a new `StructField` of type map.
* @since 1.3.0
*/
def map(keyType: DataType, valueType: DataType): StructField =
map(MapType(keyType, valueType))
def map(mapType: MapType): StructField = StructField(name, mapType)
/**
* Creates a new `StructField` of type struct.
* @since 1.3.0
*/
def struct(fields: StructField*): StructField = struct(StructType(fields))
/**
* Creates a new `StructField` of type struct.
* @since 1.3.0
*/
def struct(structType: StructType): StructField = StructField(name, structType)
}
|
ddna1021/spark
|
sql/core/src/main/scala/org/apache/spark/sql/Column.scala
|
Scala
|
apache-2.0
| 36,793 |
package org.bitcoins.core.p2p
import org.bitcoins.testkitcore.gen.p2p.DataMessageGenerator
import org.bitcoins.testkitcore.util.BitcoinSUnitTest
class TransactionMessageTest extends BitcoinSUnitTest {
it must "have serialization symmetry" in {
forAll(DataMessageGenerator.transactionMessage) { txMsg =>
assert(TransactionMessage.fromHex(txMsg.hex) == txMsg)
}
}
it must "have a meaningful toString" in {
forAll(DataMessageGenerator.transactionMessage) { txMsg =>
assert(txMsg.toString.length < 120)
}
}
}
|
bitcoin-s/bitcoin-s
|
core-test/src/test/scala/org/bitcoins/core/p2p/TransactionMessageTest.scala
|
Scala
|
mit
| 546 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import scala.collection.mutable.MutableList
import scala.util.Random
import org.apache.log4j.{Level, Logger}
import org.scalatest.junit.JUnit3Suite
import java.util.Properties
import junit.framework.Assert._
import kafka.admin.AdminUtils
import kafka.common.FailedToSendMessageException
import kafka.consumer.{Consumer, ConsumerConfig, ConsumerTimeoutException}
import kafka.producer.{KeyedMessage, Producer}
import kafka.serializer.{DefaultEncoder, StringEncoder}
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.Utils
import kafka.utils.TestUtils._
import kafka.zk.ZooKeeperTestHarness
class UncleanLeaderElectionTest extends JUnit3Suite with ZooKeeperTestHarness {
val brokerId1 = 0
val brokerId2 = 1
val port1 = choosePort()
val port2 = choosePort()
// controlled shutdown is needed for these tests, but we can trim the retry count and backoff interval to
// reduce test execution time
val enableControlledShutdown = true
val configProps1 = createBrokerConfig(brokerId1, port1)
val configProps2 = createBrokerConfig(brokerId2, port2)
for (configProps <- List(configProps1, configProps2)) {
configProps.put("controlled.shutdown.enable", String.valueOf(enableControlledShutdown))
configProps.put("controlled.shutdown.max.retries", String.valueOf(1))
configProps.put("controlled.shutdown.retry.backoff.ms", String.valueOf(1000))
}
var configs: Seq[KafkaConfig] = Seq.empty[KafkaConfig]
var servers: Seq[KafkaServer] = Seq.empty[KafkaServer]
val random = new Random()
val topic = "topic" + random.nextLong
val partitionId = 0
val kafkaApisLogger = Logger.getLogger(classOf[kafka.server.KafkaApis])
val networkProcessorLogger = Logger.getLogger(classOf[kafka.network.Processor])
val syncProducerLogger = Logger.getLogger(classOf[kafka.producer.SyncProducer])
val eventHandlerLogger = Logger.getLogger(classOf[kafka.producer.async.DefaultEventHandler[Object, Object]])
override def setUp() {
super.setUp()
// temporarily set loggers to a higher level so that tests run quietly
kafkaApisLogger.setLevel(Level.FATAL)
networkProcessorLogger.setLevel(Level.FATAL)
syncProducerLogger.setLevel(Level.FATAL)
eventHandlerLogger.setLevel(Level.FATAL)
}
override def tearDown() {
servers.map(server => shutdownServer(server))
servers.map(server => Utils.rm(server.config.logDirs))
// restore log levels
kafkaApisLogger.setLevel(Level.ERROR)
networkProcessorLogger.setLevel(Level.ERROR)
syncProducerLogger.setLevel(Level.ERROR)
eventHandlerLogger.setLevel(Level.ERROR)
super.tearDown()
}
private def startBrokers(cluster: Seq[Properties]) {
for (props <- cluster) {
val config = new KafkaConfig(props)
val server = createServer(config)
configs ++= List(config)
servers ++= List(server)
}
}
def testUncleanLeaderElectionEnabled {
// unclean leader election is enabled by default
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)))
verifyUncleanLeaderElectionEnabled
}
def testUncleanLeaderElectionDisabled {
// disable unclean leader election
configProps1.put("unclean.leader.election.enable", String.valueOf(false))
configProps2.put("unclean.leader.election.enable", String.valueOf(false))
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)))
verifyUncleanLeaderElectionDisabled
}
def testUncleanLeaderElectionEnabledByTopicOverride {
// disable unclean leader election globally, but enable for our specific test topic
configProps1.put("unclean.leader.election.enable", String.valueOf(false))
configProps2.put("unclean.leader.election.enable", String.valueOf(false))
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election enabled
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", String.valueOf(true))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)),
topicProps)
verifyUncleanLeaderElectionEnabled
}
def testCleanLeaderElectionDisabledByTopicOverride {
// enable unclean leader election globally, but disable for our specific test topic
configProps1.put("unclean.leader.election.enable", String.valueOf(true))
configProps2.put("unclean.leader.election.enable", String.valueOf(true))
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election disabled
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", String.valueOf(false))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)),
topicProps)
verifyUncleanLeaderElectionDisabled
}
def testUncleanLeaderElectionInvalidTopicOverride {
startBrokers(Seq(configProps1))
// create topic with an invalid value for unclean leader election
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", "invalid")
intercept[IllegalArgumentException] {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, Map(partitionId -> Seq(brokerId1)), topicProps)
}
}
def verifyUncleanLeaderElectionEnabled {
// wait until leader is elected
val leaderIdOpt = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId)
assertTrue("Leader should get elected", leaderIdOpt.isDefined)
val leaderId = leaderIdOpt.get
debug("Leader for " + topic + " is elected to be: %s".format(leaderId))
assertTrue("Leader id is set to expected value for topic: " + topic, leaderId == brokerId1 || leaderId == brokerId2)
// the non-leader broker is the follower
val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1
debug("Follower for " + topic + " is: %s".format(followerId))
produceMessage(topic, "first")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
assertEquals(List("first"), consumeAllMessages(topic))
// shutdown follower server
servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server))
produceMessage(topic, "second")
assertEquals(List("first", "second"), consumeAllMessages(topic))
// shutdown leader and then restart follower
servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server))
servers.filter(server => server.config.brokerId == followerId).map(server => server.startup())
// wait until new leader is (uncleanly) elected
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId))
produceMessage(topic, "third")
// second message was lost due to unclean election
assertEquals(List("first", "third"), consumeAllMessages(topic))
}
def verifyUncleanLeaderElectionDisabled {
// wait until leader is elected
val leaderIdOpt = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId)
assertTrue("Leader should get elected", leaderIdOpt.isDefined)
val leaderId = leaderIdOpt.get
debug("Leader for " + topic + " is elected to be: %s".format(leaderId))
assertTrue("Leader id is set to expected value for topic: " + topic, leaderId == brokerId1 || leaderId == brokerId2)
// the non-leader broker is the follower
val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1
debug("Follower for " + topic + " is: %s".format(followerId))
produceMessage(topic, "first")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
assertEquals(List("first"), consumeAllMessages(topic))
// shutdown follower server
servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server))
produceMessage(topic, "second")
assertEquals(List("first", "second"), consumeAllMessages(topic))
// shutdown leader and then restart follower
servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server))
servers.filter(server => server.config.brokerId == followerId).map(server => server.startup())
// verify that unclean election to non-ISR follower does not occur
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(-1))
// message production and consumption should both fail while leader is down
intercept[FailedToSendMessageException] {
produceMessage(topic, "third")
}
assertEquals(List.empty[String], consumeAllMessages(topic))
// restart leader temporarily to send a successfully replicated message
servers.filter(server => server.config.brokerId == leaderId).map(server => server.startup())
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(leaderId))
produceMessage(topic, "third")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server))
// verify clean leader transition to ISR follower
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId))
// verify messages can be consumed from ISR follower that was just promoted to leader
assertEquals(List("first", "second", "third"), consumeAllMessages(topic))
}
private def shutdownServer(server: KafkaServer) = {
server.shutdown()
server.awaitShutdown()
}
private def produceMessage(topic: String, message: String) = {
val producer: Producer[String, Array[Byte]] = createProducer(
getBrokerListStrFromConfigs(configs),
keyEncoder = classOf[StringEncoder].getName)
producer.send(new KeyedMessage[String, Array[Byte]](topic, topic, message.getBytes))
producer.close()
}
private def consumeAllMessages(topic: String) : List[String] = {
// use a fresh consumer group every time so that we don't need to mess with disabling auto-commit or
// resetting the ZK offset
val consumerProps = createConsumerProperties(zkConnect, "group" + random.nextLong, "id", 1000)
val consumerConnector = Consumer.create(new ConsumerConfig(consumerProps))
val messageStream = consumerConnector.createMessageStreams(Map(topic -> 1))(topic).head
val messages = new MutableList[String]
val iter = messageStream.iterator
try {
while(iter.hasNext()) {
messages += new String(iter.next.message) // will throw a timeout exception if the message isn't there
}
} catch {
case e: ConsumerTimeoutException =>
debug("consumer timed out after receiving " + messages.length + " message(s).")
} finally {
consumerConnector.shutdown
}
messages.toList
}
}
|
bbaugher/kafka
|
core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala
|
Scala
|
apache-2.0
| 12,081 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.QueryTest
case class FunctionResult(f1: String, f2: String)
class UDFSuite extends QueryTest {
private lazy val ctx = org.apache.spark.sql.hive.test.TestHive
//UDFδΈεΊεε€§ε°ε
test("UDF case insensitive") {
ctx.udf.register("random0", () => { Math.random() })
ctx.udf.register("RANDOM1", () => { Math.random() })
ctx.udf.register("strlenScala", (_: String).length + (_: Int))
assert(ctx.sql("SELECT RANDOM0() FROM src LIMIT 1").head().getDouble(0) >= 0.0)
assert(ctx.sql("SELECT RANDOm1() FROM src LIMIT 1").head().getDouble(0) >= 0.0)
assert(ctx.sql("SELECT strlenscala('test', 1) FROM src LIMIT 1").head().getInt(0) === 5)
}
}
|
tophua/spark1.52
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
|
Scala
|
apache-2.0
| 1,534 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.stats.buffers
import scala.collection.mutable
import io.gatling.commons.util.Maps._
import io.gatling.charts.stats.UserRecord
import io.gatling.core.stats.IntVsTimePlot
import io.gatling.core.stats.message.{ End, Start }
private[stats] object SessionDeltas {
val Empty = SessionDeltas(0, 0)
}
private[stats] case class SessionDeltas(starts: Int, ends: Int) {
def addStart() = copy(starts = starts + 1)
def addEnd() = copy(ends = ends + 1)
}
private[stats] class SessionDeltaBuffer(minTimestamp: Long, maxTimestamp: Long, buckets: Array[Int], runDurationInSeconds: Int) {
private val startCounts: Array[Int] = Array.fill(runDurationInSeconds)(0)
private val endCounts: Array[Int] = Array.fill(runDurationInSeconds)(0)
def addStart(second: Int): Unit = startCounts(second) += 1
def addEnd(second: Int): Unit = endCounts(second) += 1
def endOrphan(): Unit = addEnd(runDurationInSeconds - 1)
private val bucketWidthInMillis = ((maxTimestamp - minTimestamp) / buckets.length).toInt
private def secondToBucket(second: Int): Int = math.min(second * 1000 / bucketWidthInMillis, buckets.length - 1)
def distribution: List[IntVsTimePlot] = {
val eachSecondActiveSessions = Array.fill(runDurationInSeconds)(0)
for { second <- 0 until runDurationInSeconds } {
val previousSessions = if (second == 0) 0 else eachSecondActiveSessions(second - 1)
val previousEnds = if (second == 0) 0 else endCounts(second - 1)
val bucketSessions = previousSessions - previousEnds + startCounts(second)
eachSecondActiveSessions.update(second, bucketSessions)
}
eachSecondActiveSessions.zipWithIndex.iterator
.map { case (sessions, second) => second -> sessions }
.groupByKey(secondToBucket)
.map {
case (bucket, sessionCounts) =>
val averageSessionCount = sessionCounts.sum / sessionCounts.size
val time = buckets(bucket)
IntVsTimePlot(time, averageSessionCount)
}.toList.sortBy(_.time)
}
}
private[stats] trait SessionDeltaPerSecBuffers {
this: Buckets with RunTimes =>
private val sessionDeltaPerSecBuffers = mutable.Map.empty[Option[String], SessionDeltaBuffer]
private val orphanStartRecords = mutable.Map.empty[String, UserRecord]
private val runDurationInSeconds = math.ceil((maxTimestamp - minTimestamp) / 1000.0).toInt
def getSessionDeltaPerSecBuffers(scenarioName: Option[String]): SessionDeltaBuffer =
sessionDeltaPerSecBuffers.getOrElseUpdate(scenarioName, new SessionDeltaBuffer(minTimestamp, maxTimestamp, buckets, runDurationInSeconds))
private def timestamp2SecondOffset(timestamp: Long) = {
val millisOffset = timestamp - minTimestamp
val includeRightBorderCorrection =
if (millisOffset > 0 && millisOffset % 1000 == 0) {
1
} else {
0
}
(millisOffset / 1000).toInt - includeRightBorderCorrection
}
def addSessionBuffers(record: UserRecord): Unit = {
record.event match {
case Start =>
val startSecond = timestamp2SecondOffset(record.start)
getSessionDeltaPerSecBuffers(None).addStart(startSecond)
getSessionDeltaPerSecBuffers(Some(record.scenario)).addStart(startSecond)
orphanStartRecords += record.userId -> record
case End =>
val endSecond = timestamp2SecondOffset(record.end)
getSessionDeltaPerSecBuffers(None).addEnd(endSecond)
getSessionDeltaPerSecBuffers(Some(record.scenario)).addEnd(endSecond)
orphanStartRecords -= record.userId
}
}
def endOrphanUserRecords(): Unit =
orphanStartRecords.values.foreach { start =>
getSessionDeltaPerSecBuffers(None).endOrphan()
getSessionDeltaPerSecBuffers(Some(start.scenario)).endOrphan()
}
}
|
MykolaB/gatling
|
gatling-charts/src/main/scala/io/gatling/charts/stats/buffers/SessionDeltaPerSecBuffers.scala
|
Scala
|
apache-2.0
| 4,389 |
import com.thesamet.proto.e2e.maps.{CustomMaps, MapsTest}
import com.thesamet.proto.e2e.maps2.{MapsTest2, CustomMaps2}
import com.thesamet.proto.e2e.repeatables.RepeatablesTest.Nested
import com.thesamet.pb.{PersonId, Years}
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest._
import org.scalatestplus.scalacheck._
class MapsSpec extends FlatSpec with ScalaCheckDrivenPropertyChecks with MustMatchers with OptionValues {
val nestedGen =
Arbitrary.arbitrary[Option[Int]].map(s => Nested(nestedField = s))
val boolColorPair = for {
b <- Gen.oneOf(true, false)
c <- Gen.oneOf(MapsTest.Color.BLUE, MapsTest.Color.GREEN, MapsTest.Color.NOCOLOR)
} yield (b, c)
val boolColorPair2 = for {
b <- Gen.oneOf(true, false)
c <- Gen.oneOf(MapsTest2.Color.BLUE, MapsTest2.Color.GREEN, MapsTest2.Color.NOCOLOR)
} yield (b, c)
val mapsGen = for {
strToStr <- Gen.listOf(Arbitrary.arbitrary[(String, String)]).map(_.toMap)
strToInt <- Gen.listOf(Arbitrary.arbitrary[(String, Int)]).map(_.toMap)
intToStr <- Gen.listOf(Arbitrary.arbitrary[(Int, String)]).map(_.toMap)
boolToColor <- Gen.listOf(boolColorPair).map(_.toMap)
} yield MapsTest(strToStr = strToStr, strToInt32 = strToInt, int32ToString = intToStr,
boolToColor = boolToColor)
val mapsGen2 = for {
strToStr <- Gen.listOf(Arbitrary.arbitrary[(String, String)]).map(_.toMap)
strToInt <- Gen.listOf(Arbitrary.arbitrary[(String, Int)]).map(_.toMap)
intToStr <- Gen.listOf(Arbitrary.arbitrary[(Int, String)]).map(_.toMap)
boolToColor <- Gen.listOf(boolColorPair2).map(_.toMap)
} yield MapsTest2(strToStr = strToStr, strToInt32 = strToInt, int32ToString = intToStr,
boolToColor = boolToColor)
def mergeMaps(x: MapsTest, y: MapsTest) = MapsTest(
strToStr = x.strToStr ++ y.strToStr,
strToInt32 = x.strToInt32 ++ y.strToInt32,
int32ToString = x.int32ToString ++ y.int32ToString,
boolToColor = x.boolToColor ++ y.boolToColor)
def mergeMaps2(x: MapsTest2, y: MapsTest2) = MapsTest2(
strToStr = x.strToStr ++ y.strToStr,
strToInt32 = x.strToInt32 ++ y.strToInt32,
int32ToString = x.int32ToString ++ y.int32ToString,
boolToColor = x.boolToColor ++ y.boolToColor)
"descriptor.isMapEntry" should "be true" in {
MapsTest.scalaDescriptor.findFieldByName("str_to_str").value.isMapField must be(true)
MapsTest.scalaDescriptor.findFieldByName("str_to_int32").value.isMapField must be(true)
MapsTest.scalaDescriptor.findFieldByName("int32_to_string").value.isMapField must be(true)
MapsTest.scalaDescriptor.findFieldByName("not_a_map").value.isMapField must be(false)
MapsTest.scalaDescriptor.findFieldByName("repeated_not_a_map").value.isMapField must be(false)
MapsTest2.scalaDescriptor.findFieldByName("str_to_str").value.isMapField must be(true)
MapsTest2.scalaDescriptor.findFieldByName("str_to_int32").value.isMapField must be(true)
MapsTest2.scalaDescriptor.findFieldByName("int32_to_string").value.isMapField must be(true)
MapsTest2.scalaDescriptor.findFieldByName("not_a_map").value.isMapField must be(false)
MapsTest2.scalaDescriptor.findFieldByName("repeated_not_a_map").value.isMapField must be(false)
}
"clear" should "empty the map" in {
forAll(mapsGen) {
map =>
map.clearStrToStr must be(map.copy(strToStr = Map.empty))
map.clearStrToInt32 must be(map.copy(strToInt32 = Map.empty))
map.clearInt32ToString must be(map.copy(int32ToString = Map.empty))
map.clearBoolToColor must be(map.copy(boolToColor = Map.empty))
}
}
"addAll" should "merge the maps" in {
forAll(mapsGen, mapsGen) {
(map, other) =>
map
.addAllStrToStr(other.strToStr)
.addAllStrToInt32(other.strToInt32)
.addAllInt32ToString(other.int32ToString)
.addAllBoolToColor(other.boolToColor) must be(mergeMaps(map, other))
}
}
"with" should "set the entire map" in {
forAll(mapsGen, mapsGen) {
(map, other) =>
map
.withStrToStr(other.strToStr)
.withStrToInt32(other.strToInt32)
.withInt32ToString(other.int32ToString)
.withBoolToColor(other.boolToColor) must be(other)
}
}
"updates" should "allow adding a key by assignment" in {
forAll(mapsGen) {
map =>
map.update(_.int32ToString(-17) := "foo").int32ToString must be(map.int32ToString.updated(-17, "foo"))
}
}
"updates" should "allow adding a key-value" in {
forAll(mapsGen) {
map =>
map.update(_.int32ToString :+= (12 -> "foo")).int32ToString must be(map.int32ToString.updated(12, "foo"))
}
}
"parse" should "be the inverse of toByteArray" in {
forAll(mapsGen) {
map =>
MapsTest.parseFrom(map.toByteArray) must be(map)
}
}
"parse" should "be the inverse of toByteArray for proto2" in {
forAll(mapsGen2) {
map =>
MapsTest2.parseFrom(map.toByteArray) must be(map)
}
}
"concatenate message" should "result in merged maps" in {
forAll(mapsGen, mapsGen) {
(map1, map2) =>
MapsTest.parseFrom(map1.toByteArray ++ map2.toByteArray) must be(
mergeMaps(map1, map2))
}
}
"concatenate message" should "result in merged maps for proto2" in {
forAll(mapsGen2, mapsGen2) {
(map1, map2) =>
MapsTest2.parseFrom(map1.toByteArray ++ map2.toByteArray) must be(
mergeMaps2(map1, map2))
}
}
"custom map types" should "provide custom key and value types" in {
val c1 = CustomMaps(
stringToYear = Map("314" -> Years(314)),
personToInt = Map(PersonId("315") -> 314),
personToYear = Map(PersonId("275") -> Years(188)))
val c2 = CustomMaps2(
stringToYear = Map("314" -> Years(314)),
personToInt = Map(PersonId("315") -> 314),
personToYear = Map(PersonId("275") -> Years(188)))
CustomMaps.parseFrom(c1.toByteArray) must be(c1)
CustomMaps.fromAscii(c1.toProtoString) must be(c1)
CustomMaps.fromJavaProto(CustomMaps.toJavaProto(c1)) must be (c1)
CustomMaps2.parseFrom(c2.toByteArray) must be(c2)
CustomMaps2.fromAscii(c2.toProtoString) must be(c2)
CustomMaps2.fromJavaProto(CustomMaps2.toJavaProto(c2)) must be (c2)
}
}
|
dotty-staging/ScalaPB
|
e2e/src/test/scala/MapsSpec.scala
|
Scala
|
apache-2.0
| 6,262 |
package date
import org.junit.Assert
import java.util.Random
import common.CommonTest
import date.api.SkillFile
import org.junit.runner.RunWith
import de.ust.skill.common.scala.api.Read
import de.ust.skill.common.scala.api.ReadOnly
class DatesMakerTest extends CommonTest {
def read(s : String) = SkillFile.open("src/test/resources/"+s)
def compareStates(sf : SkillFile, sf2 : SkillFile) {
assert(sf.Date.size == sf2.Date.size, "dates must have the same size");
for (((l, r), i) β sf.Date.zip(sf2.Date).zipWithIndex)
if (l.date != r.date)
fail(s"${l.date} != ${r.date} at index $i")
}
test("write and read dates") {
val state = read("date-example.sf")
val out = tmpFile("test")
state.changePath(out)
state.close
val sf2 = SkillFile.open(out, Read, ReadOnly)
compareStates(SkillFile.open(out, Read, ReadOnly), sf2)
}
test("add a date") {
val state = read("date-example.sf")
state.Date.make(-15L)
assert(state.Date.all.exists(_.date == -15L), "the added date does not exist!")
}
test("read, add, modify and write some dates") {
val sf = read("date-example.sf")
RandomDatesMaker.addLinearDates(sf, 98)
for (d β sf.Date.all)
d.date = 0
val out = tmpFile("oneHundredInts.sf")
sf.changePath(out)
sf.close
val sf2 = SkillFile.open(out, Read, ReadOnly)
compareStates(sf, sf2)
sf2.Date.all.foreach({ d β assert(d.date == 0) })
}
test("write and read some linear dates") {
val sf = read("date-example.sf")
Assert.assertNotNull(sf)
RandomDatesMaker.addLinearDates(sf, 100)
Assert.assertNotNull(sf)
val out = tmpFile("someLinearDates.sf")
sf.changePath(out)
sf.close
compareStates(SkillFile.open(out, Read, ReadOnly), sf)
}
test("write and read some random dates") {
val sf = read("date-example.sf")
Assert.assertNotNull(sf)
RandomDatesMaker.addDates(sf, 100)
Assert.assertNotNull(sf)
val out = tmpFile("someDates.sf")
sf.changePath(out)
sf.close
val sf2 = SkillFile.open(out, Read, ReadOnly)
compareStates(sf, sf2);
}
test("write and read a million random dates") {
val sf = read("date-example.sf")
Assert.assertNotNull(sf)
RandomDatesMaker.addDates(sf, (1e6 - 2).toInt)
Assert.assertNotNull(sf)
val out = tmpFile("testOutWrite1MDatesNormal.sf")
sf.changePath(out)
sf.close
compareStates(sf, SkillFile.open(out, Read, ReadOnly));
}
test("write and read a million small random dates") {
val sf = read("date-example.sf")
Assert.assertNotNull(sf)
RandomDatesMaker.addDatesGaussian(sf, (1e6 - 2).toInt)
Assert.assertNotNull(sf)
val out = tmpFile("testOutWrite1MDatesGaussian.sf")
sf.changePath(out)
sf.close
compareStates(sf, SkillFile.open(out, Read, ReadOnly));
}
}
/**
* Fills a serializable state with random dates.
*/
object RandomDatesMaker {
/**
* adds count new dates with linear content to sf
*/
def addLinearDates(sf : SkillFile, count : Long) {
for (i β 0L until count)
sf.Date.make(i)
}
/**
* adds count new dates with random content to sf
*/
def addDates(sf : SkillFile, count : Int) {
var r = new Random()
for (i β 0 until count)
sf.Date.make(r.nextLong())
}
/**
* adds count new dates with random content to sf.
*
* uses a gaussian distribution, but only positive numbers
*/
def addDatesGaussian(sf : SkillFile, count : Int) {
var r = new Random()
for (i β 0 until count)
sf.Date.make((r.nextGaussian().abs * 100).toLong)
}
}
|
skill-lang/skillScalaTestSuite
|
src/test/scala/date/DatesMakerTest.scala
|
Scala
|
bsd-3-clause
| 3,617 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.component.highlevelserver.dto
final case class PersonWithId(id: Long, name: String, age: Int, married: Boolean)
|
dnvriend/akka-http-test
|
app/com/github/dnvriend/component/highlevelserver/dto/PersonWithId.scala
|
Scala
|
apache-2.0
| 739 |
package com.wavesplatform.it.sync
import com.wavesplatform.account.KeyPair
import com.wavesplatform.it.Node
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.state.AssetDistributionPage
import com.wavesplatform.transaction.transfer.MassTransferTransaction
import org.scalatest.CancelAfterFailure
import scala.concurrent.duration._
class AssetDistributionSuite extends BaseTransactionSuite with CancelAfterFailure {
lazy val node: Node = nodes.head
private lazy val issuer = node.keyPair
test("'Asset distribution at height' method works properly") {
val transferAmount = 1000000L
val issueAmount = 1000000000L
val addresses = nodes.map(_.keyPair.toAddress).filter(_ != issuer.toAddress).toList
val initialHeight = node.height
nodes.waitForHeightArise()
val issueTx = node.issue(issuer, "TestCoin", "no description", issueAmount, 8, false, issueFee, waitForTx = true).id
node.massTransfer(
issuer,
addresses.map(addr => MassTransferTransaction.Transfer(addr.stringRepr, transferAmount)),
minFee + (minFee * addresses.size),
assetId = Some(issueTx),
waitForTx = true
)
nodes.waitForHeightArise()
val distributionHeight = node.height
nodes.waitForHeightArise()
node.assetDistributionAtHeight(issueTx, initialHeight, 100).items shouldBe Map.empty
val assetDis = node
.assetDistributionAtHeight(issueTx, distributionHeight, 100)
.items
assetDis should be equals node.assetDistribution(issueTx)
val issuerAssetDis = assetDis.view.filterKeys(_ == issuer.toAddress).values
issuerAssetDis.size shouldBe 1
issuerAssetDis.head shouldBe (issueAmount - addresses.length * transferAmount)
val othersAssetDis = assetDis.view.filterKeys(_ != issuer.toAddress)
assert(othersAssetDis.values.forall(_ == transferAmount))
val assetDisFull =
distributionPages(issueTx, distributionHeight, 100)
.flatMap(_.items.toList)
.filterNot(_._1 == issuer.toAddress)
assert(assetDisFull.forall(_._2 == transferAmount))
assertBadRequestAndMessage(
node.assetDistributionAtHeight(issueTx, node.height, 10),
"Using 'assetDistributionAtHeight' on current height can lead to inconsistent result",
400
)
}
test("'Asset distribution' works properly") {
val receivers = for (i <- 0 until 10) yield KeyPair(s"receiver#$i".getBytes("UTF-8"))
val issueTx = node.issue(issuer, "TestCoin#2", "no description", issueAmount, 8, false, issueFee, waitForTx = true).id
node
.massTransfer(
issuer,
receivers.map(rc => MassTransferTransaction.Transfer(rc.toAddress.toString, 10)).toList,
minFee + minFee * receivers.length,
assetId = Some(issueTx),
waitForTx = true
)
nodes.waitForHeightArise()
val distribution = node.assetDistribution(issueTx)
distribution.size shouldBe (receivers.size + 1)
distribution(issuer.toAddress) shouldBe (issueAmount - 10 * receivers.length)
assert(receivers.forall(rc => distribution(rc.toAddress) == 10), "Distribution correct")
}
test("Correct last page and entry count") {
val receivers = for (i <- 0 until 50) yield KeyPair(s"receiver#$i".getBytes("UTF-8"))
val issueTx = node.issue(issuer, "TestCoin#2", "no description", issueAmount, 8, false, issueFee, waitForTx = true).id
node
.massTransfer(
issuer,
receivers.map(rc => MassTransferTransaction.Transfer(rc.toAddress.toString, 10)).toList,
minFee + minFee * receivers.length,
assetId = Some(issueTx),
waitForTx = true
)
nodes.waitForHeightArise()
val height = node.height
nodes.waitForHeightArise()
val pages = distributionPages(issueTx, height, 10)
assert(!pages.last.hasNext)
assert(pages.last.lastItem.nonEmpty)
assert(pages.length == 6)
assert(pages.map(_.items.size).sum == 51)
}
test("Unlimited list") {
val receivers = for (i <- 0 until 2000) yield KeyPair(s"receiver#$i".getBytes("UTF-8"))
val assetId = node.issue(issuer, "TestCoin#2", "no description", issueAmount, 8, false, issueFee, waitForTx = true).id
receivers.foreach { receiver =>
node.transfer(issuer, receiver.toAddress.stringRepr, 10, assetId = Some(assetId))
}
node.waitFor("empty utx")(_.utxSize, (_: Int) == 0, 1 second)
nodes.waitForHeightArise()
val list = node.assetDistribution(assetId)
list should have size 2001
}
def distributionPages(asset: String, height: Int, limit: Int): List[AssetDistributionPage] = {
def _load(acc: List[AssetDistributionPage], maybeAfter: Option[String]): List[AssetDistributionPage] = {
val page = node.assetDistributionAtHeight(asset, height, limit, maybeAfter)
if (page.hasNext) _load(page :: acc, page.lastItem.map(_.stringRepr))
else page :: acc
}
_load(Nil, None).reverse
}
}
|
wavesplatform/Waves
|
node-it/src/test/scala/com/wavesplatform/it/sync/AssetDistributionSuite.scala
|
Scala
|
mit
| 5,008 |
package effechecka
import java.net.{URI, URLEncoder, URL}
import akka.http.scaladsl.model.headers.{Authorization, BasicHttpCredentials}
import akka.http.scaladsl.model.{FormData, HttpMethods, HttpRequest}
import HttpMethods._
import org.joda.time.format.ISODateTimeFormat
object CsvUtils {
def toOccurrenceRow(occurrence: Occurrence): String = {
val taxonString = if (occurrence.taxon == null) "" else occurrence.taxon
val lastTaxon = taxonString.split('|').filter(_.nonEmpty).reverse.headOption match {
case Some(taxon) => taxon.trim
case _ => ""
}
val occurrenceUrl = urlForOccurrenceId(occurrence).getOrElse("")
Seq(lastTaxon, taxonString,
occurrence.lat, occurrence.lng,
dateOrEmpty(occurrence.start),
occurrence.id,
dateOrEmpty(occurrence.added),
occurrence.source,
occurrenceUrl)
.map(value => if (value == null) "" else value)
.mkString("\\n", "\\t", "")
}
private def dateOrEmpty(timestamp: Long) = {
val toISO: (Long) => String = ISODateTimeFormat.dateTime().withZoneUTC().print
toISO(timestamp)
}
def urlForOccurrenceId(occurrence: Occurrence): Option[URI] = {
occurrence.source match {
case "gbif" => Some(new URI("http", null, "www.gbif.org", -1, "/occurrence/search", s"OCCURRENCE_ID=${occurrence.id}", null))
case "idigbio" => Some(new URI("http", null, "portal.idigbio.org", -1, "/search", s"""rq={"occurrenceid":"${occurrence.id}"}""", null))
case _ => None
}
}
}
|
jhpoelen/effechecka
|
src/main/scala/effechecka/CsvUtils.scala
|
Scala
|
mit
| 1,511 |
package at.fh.swengb.resifoAndroid.db
import scala.collection.mutable.ListBuffer
import android.content.ContentValues
import android.content.Context
import android.database.sqlite.SQLiteOpenHelper
import android.database.sqlite.SQLiteDatabase
import at.fh.swengb.resifoAndroid.db.objects.FinalItem
class DBHelper(val context: Context) extends SQLiteOpenHelper(context, "MeldezettelInfo.db", null, 3) {
override def onUpgrade(db: SQLiteDatabase, oldVersion: Int, newVersion: Int) {}
override def onCreate(db: SQLiteDatabase): Unit = {}
private val DATABASE_VERSION: Int = 3
private val DATABASE_NAME: String = "MeldezettelInfo.db"
private val TABLE_NAME: String = "Meldezettel"
private val TABLE_NAME_Final: String = "MeldezettelFINAL" //TODO
private val COLUMN_ID: String = "id"
private val COLUMN_FIRSTNAME: String = "firstname"
private val COLUMN_LASTNAME: String = "lastname"
private val COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE: String = "surnameBeforeFirstMarriage"
private val COLUMN_ACADEMIC_DEGREE: String = "academicDegree"
private val COLUMN_GENDER: String = "gender"
private val COLUMN_BIRTHDATE = "birthdate"
private val COLUMN_BIRTHPLACE = "birthplace"
private val COLUMN_RELIGION = "religion"
private val COLUMN_ZMR = "zmr"
private val COLUMN_NATIONALITY = "nationality"
private val COLUMN_TRAVELDOCUMENT_TYPE = "traveldocumentType"
private val COLUMN_TRAVELDOCUMENT_NUMBER = "traveldocumentNumber"
private val COLUMN_TRAVELDOCUMENT_DATE = "traveldocumentDate"
private val COLUMN_TRAVELDOCUMENT_AGENCY = "traveldocumentAgency"
private val COLUMN_FAMILY_STATUS = "familyStatus"
private val COLUMN_FIRST_STREET = "firstStreet"
private val COLUMN_FIRST_HOUSENUMBER = "firstHousenumber"
private val COLUMN_FIRST_STAIRS = "firstStairs"
private val COLUMN_FIRST_DOOR = "firstDoor"
private val COLUMN_FIRST_ZIPCODE = "firstZipcode"
private val COLUMN_FIRST_LOCATION = "firstLocation"
private val COLUMN_SECOND_STREET = "secondStreet"
private val COLUMN_SECOND_HOUSENUMBER = "secondHousenumber"
private val COLUMN_SECOND_STAIRS = "secondStairs"
private val COLUMN_SECOND_DOOR = "secondDoor"
private val COLUMN_SECOND_ZIPCODE = "secondZipcode"
private val COLUMN_SECOND_LOCATION = "secondLocation"
private val COLUMN_THIRD_STREET = "thirdStreet"
private val COLUMN_THIRD_HOUSENUMBER = "thirdHousenumber"
private val COLUMN_THIRD_STAIRS = "thirdStairs"
private val COLUMN_THIRD_DOOR = "thirdDoor"
private val COLUMN_THIRD_ZIPCODE = "thirdZipcode"
private val COLUMN_THIRD_LOCATION = "thirdLocation"
private val COLUMN_IMMIGRATION_COUNTRY = "immigrationCountry"
private val COLUMN_CONDONED_COUNTRY = "condonedCountry"
private val COLUMN_FUNCTION = "function"
//diese EintrΓ€ge sinfd nur fΓΌr EDIT Table
//##########################################
private val COLUMN_Seite1: String = "seitea"
private val COLUMN_Seite2: String = "seiteb"
private val COLUMN_Seite2a: String = "seitec"
private val COLUMN_Seite3: String = "seited"
private val COLUMN_Seite5: String = "seitee"
private val COLUMN_Seite5a: String = "seitef"
private val COLUMN_Seite6: String = "seiteg"
//##########################################
def createTable: Unit = {
val db = this.getWritableDatabase
val sql = s"CREATE TABLE IF NOT EXISTS $TABLE_NAME ($COLUMN_ID integer primary key, $COLUMN_LASTNAME text, $COLUMN_FIRSTNAME text," +
s" $COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE text, $COLUMN_ACADEMIC_DEGREE text," +
s" $COLUMN_GENDER text, $COLUMN_BIRTHDATE text, $COLUMN_BIRTHPLACE text, $COLUMN_RELIGION text, $COLUMN_ZMR text, $COLUMN_NATIONALITY text," +
s"$COLUMN_TRAVELDOCUMENT_TYPE text, $COLUMN_TRAVELDOCUMENT_NUMBER text, $COLUMN_TRAVELDOCUMENT_DATE text, $COLUMN_TRAVELDOCUMENT_AGENCY text," +
s" $COLUMN_FAMILY_STATUS text, $COLUMN_FIRST_STREET text, $COLUMN_FIRST_HOUSENUMBER text, " +
s"$COLUMN_FIRST_STAIRS text, $COLUMN_FIRST_DOOR text, $COLUMN_FIRST_ZIPCODE text, $COLUMN_FIRST_LOCATION text, $COLUMN_SECOND_STREET text, " +
s"$COLUMN_SECOND_HOUSENUMBER text, $COLUMN_SECOND_STAIRS text, $COLUMN_SECOND_DOOR text, $COLUMN_SECOND_ZIPCODE text, $COLUMN_SECOND_LOCATION text, " +
s"$COLUMN_THIRD_STREET text, $COLUMN_THIRD_HOUSENUMBER text, $COLUMN_THIRD_STAIRS text, $COLUMN_THIRD_DOOR text, $COLUMN_THIRD_ZIPCODE text, $COLUMN_THIRD_LOCATION text," +
s"$COLUMN_IMMIGRATION_COUNTRY text, $COLUMN_CONDONED_COUNTRY text, $COLUMN_FUNCTION text,$COLUMN_Seite1 text, $COLUMN_Seite2 text," +
s"$COLUMN_Seite2a text, $COLUMN_Seite3 text, $COLUMN_Seite5 text, $COLUMN_Seite5a text, $COLUMN_Seite6 text)"
db.execSQL(sql)
}
def createFinalTable: Unit = {
val db = this.getWritableDatabase
val sql = s"CREATE TABLE IF NOT EXISTS $TABLE_NAME_Final ($COLUMN_ID integer primary key, $COLUMN_LASTNAME text, $COLUMN_FIRSTNAME text," +
s" $COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE text, $COLUMN_ACADEMIC_DEGREE text," +
s" $COLUMN_GENDER text, $COLUMN_BIRTHDATE text, $COLUMN_BIRTHPLACE text, $COLUMN_RELIGION text, $COLUMN_ZMR text, $COLUMN_NATIONALITY text," +
s"$COLUMN_TRAVELDOCUMENT_TYPE text, $COLUMN_TRAVELDOCUMENT_NUMBER text, $COLUMN_TRAVELDOCUMENT_DATE text, $COLUMN_TRAVELDOCUMENT_AGENCY text," +
s" $COLUMN_FAMILY_STATUS text, $COLUMN_FIRST_STREET text, $COLUMN_FIRST_HOUSENUMBER text, " +
s"$COLUMN_FIRST_STAIRS text, $COLUMN_FIRST_DOOR text, $COLUMN_FIRST_ZIPCODE text, $COLUMN_FIRST_LOCATION text, $COLUMN_SECOND_STREET text, " +
s"$COLUMN_SECOND_HOUSENUMBER text, $COLUMN_SECOND_STAIRS text, $COLUMN_SECOND_DOOR text, $COLUMN_SECOND_ZIPCODE text, $COLUMN_SECOND_LOCATION text, " +
s"$COLUMN_THIRD_STREET text, $COLUMN_THIRD_HOUSENUMBER text, $COLUMN_THIRD_STAIRS text, $COLUMN_THIRD_DOOR text, $COLUMN_THIRD_ZIPCODE text, $COLUMN_THIRD_LOCATION text," +
s"$COLUMN_IMMIGRATION_COUNTRY text, $COLUMN_CONDONED_COUNTRY text, $COLUMN_FUNCTION text,$COLUMN_Seite1 text, $COLUMN_Seite2 text," +
s"$COLUMN_Seite2a text, $COLUMN_Seite3 text, $COLUMN_Seite5 text, $COLUMN_Seite5a text, $COLUMN_Seite6 text)"
db.execSQL(sql)
}
def transaction: Unit = {
val db = this.getReadableDatabase
val sql = s"SELECT * FROM $TABLE_NAME" //TODO FINAL
val cursor = db.rawQuery(sql, null)
cursor.moveToLast
//copy last line from Edit-table
val lastname = cursor.getString(cursor.getColumnIndex(COLUMN_LASTNAME))
val firstname = cursor.getString(cursor.getColumnIndex(COLUMN_FIRSTNAME))
val surnameBeforeFirstMarriage = cursor.getString(cursor.getColumnIndex(COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE))
val academicDegree = cursor.getString(cursor.getColumnIndex(COLUMN_ACADEMIC_DEGREE))
val gender = cursor.getString(cursor.getColumnIndex(COLUMN_GENDER))
val birthdate = cursor.getString(cursor.getColumnIndex(COLUMN_BIRTHDATE))
val birthplace = cursor.getString(cursor.getColumnIndex(COLUMN_BIRTHPLACE))
val religion = cursor.getString(cursor.getColumnIndex(COLUMN_RELIGION))
val zmr = cursor.getString(cursor.getColumnIndex(COLUMN_ZMR))
val nationality = cursor.getString(cursor.getColumnIndex(COLUMN_NATIONALITY))
val traveldocumentType = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_TYPE))
val traveldocumentNumber = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_NUMBER))
val traveldocumentDate = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_DATE))
val traveldocumentAgency = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_AGENCY))
val familyStatus = cursor.getString(cursor.getColumnIndex(COLUMN_FAMILY_STATUS))
val firstStreet = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_STREET))
val firstHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_HOUSENUMBER))
val firstStairs = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_STAIRS))
val firstDoor = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_DOOR))
val firstZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_ZIPCODE))
val firstLocation = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_LOCATION))
val secondStreet = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_STREET))
val secondHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_HOUSENUMBER))
val secondStairs = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_STAIRS))
val secondDoor = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_DOOR))
val secondZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_ZIPCODE))
val secondLocation = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_LOCATION))
val thirdStreet = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_STREET))
val thirdHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_HOUSENUMBER))
val thirdStairs = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_STAIRS))
val thirdDoor = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_DOOR))
val thirdZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_ZIPCODE))
val thirdLocation = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_LOCATION))
val immigrationCountry = cursor.getString(cursor.getColumnIndex(COLUMN_IMMIGRATION_COUNTRY))
val condonedCountry = cursor.getString(cursor.getColumnIndex(COLUMN_CONDONED_COUNTRY))
val function = cursor.getString(cursor.getColumnIndex(COLUMN_FUNCTION))
val page1 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite1))
val page2 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite2))
val page2a = cursor.getString(cursor.getColumnIndex(COLUMN_Seite2a))
val page3 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite3))
val page5 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite5))
val page5a = cursor.getString(cursor.getColumnIndex(COLUMN_Seite5a))
val page6 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite6))
cursor.close
//Deletes all rows from Edit-table
db.execSQL("delete from " + TABLE_NAME);
//write data
val dbWrite = getWritableDatabase
val values = new ContentValues
values.put(COLUMN_LASTNAME, lastname)
values.put(COLUMN_FIRSTNAME, firstname)
values.put(COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE, surnameBeforeFirstMarriage)
values.put(COLUMN_ACADEMIC_DEGREE, academicDegree)
values.put(COLUMN_GENDER, gender)
values.put(COLUMN_BIRTHDATE, birthdate)
values.put(COLUMN_BIRTHPLACE, birthplace)
values.put(COLUMN_RELIGION, religion)
values.put(COLUMN_ZMR, zmr)
values.put(COLUMN_NATIONALITY, nationality)
values.put(COLUMN_NATIONALITY, nationality)
values.put(COLUMN_TRAVELDOCUMENT_TYPE, traveldocumentType)
values.put(COLUMN_TRAVELDOCUMENT_NUMBER, traveldocumentNumber)
values.put(COLUMN_TRAVELDOCUMENT_DATE, traveldocumentDate)
values.put(COLUMN_TRAVELDOCUMENT_AGENCY, traveldocumentAgency)
values.put(COLUMN_FAMILY_STATUS, familyStatus)
values.put(COLUMN_FIRST_STREET, firstStreet)
values.put(COLUMN_FIRST_HOUSENUMBER, firstHouseNumber)
values.put(COLUMN_FIRST_STAIRS, firstStairs)
values.put(COLUMN_FIRST_DOOR, firstDoor)
values.put(COLUMN_FIRST_ZIPCODE, firstZipcode)
values.put(COLUMN_FIRST_LOCATION, firstLocation)
values.put(COLUMN_IMMIGRATION_COUNTRY, immigrationCountry)
values.put(COLUMN_SECOND_STREET, secondStreet)
values.put(COLUMN_SECOND_HOUSENUMBER, secondHouseNumber)
values.put(COLUMN_SECOND_STAIRS, secondStairs)
values.put(COLUMN_SECOND_DOOR, secondDoor)
values.put(COLUMN_SECOND_ZIPCODE, secondZipcode)
values.put(COLUMN_SECOND_LOCATION, secondLocation)
values.put(COLUMN_THIRD_STREET, thirdStreet)
values.put(COLUMN_THIRD_HOUSENUMBER, thirdHouseNumber)
values.put(COLUMN_THIRD_STAIRS, thirdStairs)
values.put(COLUMN_THIRD_DOOR, thirdDoor)
values.put(COLUMN_THIRD_ZIPCODE, thirdZipcode)
values.put(COLUMN_THIRD_LOCATION, thirdLocation)
values.put(COLUMN_Seite1, page1)
values.put(COLUMN_Seite2, page2)
values.put(COLUMN_Seite2a, page2a)
values.put(COLUMN_Seite3, page3)
values.put(COLUMN_Seite5, page5)
values.put(COLUMN_Seite5a, page5a)
values.put(COLUMN_Seite6, page6)
values.put(COLUMN_FUNCTION, function)
dbWrite.insert(TABLE_NAME_Final, null, values)
dbWrite.close
}
def functionMeldezettel: Int = {
val db = this.getReadableDatabase
var sql = ""
sql = s"SELECT * FROM $TABLE_NAME"
val cursor = db.rawQuery(sql, null)
cursor.moveToLast
var functionM: Int = 0
def checknull = {
if (cursor.getString(cursor.getColumnIndex(COLUMN_FUNCTION)) == null) 0
}
val chNull = checknull
var functionString = ""
if (chNull == 0) functionM = 0
else functionString = cursor.getString(cursor.getColumnIndex(COLUMN_FUNCTION))
cursor.close()
functionString match {
case "1" => functionM = functionString.toInt
case "2" => functionM = functionString.toInt
case "3" => functionM = functionString.toInt
case _ => functionM = 0
}
functionM
}
def checkCorrects: List[Int] = {
val db = this.getReadableDatabase
var sql = ""
sql = s"SELECT * FROM $TABLE_NAME"
val cursor = db.rawQuery(sql, null)
cursor.moveToLast
val seite1 = if (cursor.getString(cursor.getColumnIndex(COLUMN_Seite1)) == null) 0 else 1
val seite2 = if (cursor.getString(cursor.getColumnIndex(COLUMN_Seite2)) == null) 0 else 1
val seite2a = if (cursor.getString(cursor.getColumnIndex(COLUMN_Seite2a)) == null) 0 else 1
val seite3 = if (cursor.getString(cursor.getColumnIndex(COLUMN_Seite3)) == null) 0 else 1
val seite5 = if (cursor.getString(cursor.getColumnIndex(COLUMN_Seite5)) == null) 0 else 1
val seite5a = if (cursor.getString(cursor.getColumnIndex(COLUMN_Seite5a)) == null) 0 else 1
val seite6 = if (cursor.getString(cursor.getColumnIndex(COLUMN_Seite6)) == null) 0 else 1
cursor.close()
val checkCorrect: List[Int] = List(seite1, seite2, seite2a, seite3, seite5, seite5a, seite6)
checkCorrect
}
def restartTable: Unit = {
dropTable
createTable
}
def dropTable: Unit = {
val db = this.getWritableDatabase
val sql = s"DROP TABLE IF EXISTS $TABLE_NAME"
db.execSQL(sql)
}
""" This function provides the ListView with its entries """.stripMargin
def readData: List[FinalItem] = {
val db = this.getReadableDatabase
val sql = s"SELECT * FROM $TABLE_NAME_Final" //TODO FINAL
val cursor = db.rawQuery(sql, null)
val listOfItems = new ListBuffer[FinalItem]()
if (cursor.moveToFirst) {
do {
val id = cursor.getString(cursor.getColumnIndex(COLUMN_ID))
val lastname = cursor.getString(cursor.getColumnIndex(COLUMN_LASTNAME))
val firstname = cursor.getString(cursor.getColumnIndex(COLUMN_FIRSTNAME))
val surnameBeforeFirstMarriage = cursor.getString(cursor.getColumnIndex(COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE))
val academicDegree = cursor.getString(cursor.getColumnIndex(COLUMN_ACADEMIC_DEGREE))
val gender = cursor.getString(cursor.getColumnIndex(COLUMN_GENDER))
val birthdate = cursor.getString(cursor.getColumnIndex(COLUMN_BIRTHDATE))
val birthplace = cursor.getString(cursor.getColumnIndex(COLUMN_BIRTHPLACE))
val religion = cursor.getString(cursor.getColumnIndex(COLUMN_RELIGION))
val zmr = cursor.getString(cursor.getColumnIndex(COLUMN_ZMR))
val nationality = cursor.getString(cursor.getColumnIndex(COLUMN_NATIONALITY))
val traveldocumentType = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_TYPE))
val traveldocumentNumber = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_NUMBER))
val traveldocumentDate = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_DATE))
val traveldocumentAgency = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_AGENCY))
val familyStatus = cursor.getString(cursor.getColumnIndex(COLUMN_FAMILY_STATUS))
val firstStreet = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_STREET))
val firstHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_HOUSENUMBER))
val firstStairs = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_STAIRS))
val firstDoor = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_DOOR))
val firstZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_ZIPCODE))
val firstLocation = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_LOCATION))
val secondStreet = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_STREET))
val secondHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_HOUSENUMBER))
val secondStairs = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_STAIRS))
val secondDoor = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_DOOR))
val secondZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_ZIPCODE))
val secondLocation = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_LOCATION))
val thirdStreet = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_STREET))
val thirdHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_HOUSENUMBER))
val thirdStairs = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_STAIRS))
val thirdDoor = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_DOOR))
val thirdZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_ZIPCODE))
val thirdLocation = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_LOCATION))
val immigrationCountry = cursor.getString(cursor.getColumnIndex(COLUMN_IMMIGRATION_COUNTRY))
val condonedCountry = cursor.getString(cursor.getColumnIndex(COLUMN_CONDONED_COUNTRY))
val function = cursor.getString(cursor.getColumnIndex(COLUMN_FUNCTION))
val page1 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite1))
val page2 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite2))
val page2a = cursor.getString(cursor.getColumnIndex(COLUMN_Seite2a))
val page3 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite3))
val page5 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite5))
val page5a = cursor.getString(cursor.getColumnIndex(COLUMN_Seite5a))
val page6 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite6))
listOfItems += new FinalItem(id, lastname, firstname, surnameBeforeFirstMarriage, academicDegree, gender, birthdate, birthplace, religion, zmr,
nationality, if (traveldocumentType == "") "" else traveldocumentType, if (traveldocumentNumber == "") "" else traveldocumentNumber,
if (traveldocumentDate == "") "" else traveldocumentDate, if (traveldocumentAgency == "") "" else traveldocumentAgency, familyStatus,
firstStreet, firstHouseNumber, firstStairs, firstDoor, firstZipcode, firstLocation, secondStreet, secondHouseNumber, secondStairs, secondDoor, secondZipcode, secondLocation,
thirdStreet, thirdHouseNumber, thirdStairs, thirdDoor, thirdZipcode, thirdLocation, immigrationCountry, condonedCountry, function,page1,page2,page2a,page3,page5,page5a,page6)
}
while (cursor.moveToNext)
}
cursor.close
val itemList = listOfItems.toList
itemList
}
def readLast: FinalItem = {
val db = this.getReadableDatabase
val sql = s"SELECT * FROM $TABLE_NAME"
val cursor = db.rawQuery(sql, null)
cursor.moveToLast
val id = cursor.getString(cursor.getColumnIndex(COLUMN_ID))
val lastname = cursor.getString(cursor.getColumnIndex(COLUMN_LASTNAME))
val firstname = cursor.getString(cursor.getColumnIndex(COLUMN_FIRSTNAME))
val surnameBeforeFirstMarriage = cursor.getString(cursor.getColumnIndex(COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE))
val academicDegree = cursor.getString(cursor.getColumnIndex(COLUMN_ACADEMIC_DEGREE))
val gender = cursor.getString(cursor.getColumnIndex(COLUMN_GENDER))
val birthdate = cursor.getString(cursor.getColumnIndex(COLUMN_BIRTHDATE))
val birthplace = cursor.getString(cursor.getColumnIndex(COLUMN_BIRTHPLACE))
val religion = cursor.getString(cursor.getColumnIndex(COLUMN_RELIGION))
val zmr = cursor.getString(cursor.getColumnIndex(COLUMN_ZMR))
val nationality = cursor.getString(cursor.getColumnIndex(COLUMN_NATIONALITY))
val traveldocumentType = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_TYPE))
val traveldocumentNumber = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_NUMBER))
val traveldocumentDate = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_DATE))
val traveldocumentAgency = cursor.getString(cursor.getColumnIndex(COLUMN_TRAVELDOCUMENT_AGENCY))
val familyStatus = cursor.getString(cursor.getColumnIndex(COLUMN_FAMILY_STATUS))
val firstStreet = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_STREET))
val firstHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_HOUSENUMBER))
val firstStairs = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_STAIRS))
val firstDoor = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_DOOR))
val firstZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_ZIPCODE))
val firstLocation = cursor.getString(cursor.getColumnIndex(COLUMN_FIRST_LOCATION))
val secondStreet = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_STREET))
val secondHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_HOUSENUMBER))
val secondStairs = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_STAIRS))
val secondDoor = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_DOOR))
val secondZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_ZIPCODE))
val secondLocation = cursor.getString(cursor.getColumnIndex(COLUMN_SECOND_LOCATION))
val thirdStreet = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_STREET))
val thirdHouseNumber = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_HOUSENUMBER))
val thirdStairs = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_STAIRS))
val thirdDoor = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_DOOR))
val thirdZipcode = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_ZIPCODE))
val thirdLocation = cursor.getString(cursor.getColumnIndex(COLUMN_THIRD_LOCATION))
val immigrationCountry = cursor.getString(cursor.getColumnIndex(COLUMN_IMMIGRATION_COUNTRY))
val condonedCountry = cursor.getString(cursor.getColumnIndex(COLUMN_CONDONED_COUNTRY))
val function = cursor.getString(cursor.getColumnIndex(COLUMN_FUNCTION))
val page1 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite1))
val page2 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite2))
val page2a = cursor.getString(cursor.getColumnIndex(COLUMN_Seite2a))
val page3 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite3))
val page5 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite5))
val page5a = cursor.getString(cursor.getColumnIndex(COLUMN_Seite5a))
val page6 = cursor.getString(cursor.getColumnIndex(COLUMN_Seite6))
val item = new FinalItem(id, lastname, firstname, surnameBeforeFirstMarriage, academicDegree, gender, birthdate, birthplace, religion, zmr,
nationality, if (traveldocumentType == "") "" else traveldocumentType, if (traveldocumentNumber == "") "" else traveldocumentNumber,
if (traveldocumentDate == "") "" else traveldocumentDate, if (traveldocumentAgency == "") "" else traveldocumentAgency, familyStatus,
firstStreet, firstHouseNumber, firstStairs, firstDoor, firstZipcode, firstLocation, secondStreet, secondHouseNumber, secondStairs, secondDoor, secondZipcode, secondLocation,
thirdStreet, thirdHouseNumber, thirdStairs, thirdDoor, thirdZipcode, thirdLocation, immigrationCountry, condonedCountry, function,page1,page2,page2a,page3,page5,page5a,page6)
cursor.close
item
}
def getLastID: String = {
val db = this.getReadableDatabase
val sql = s"SELECT * FROM $TABLE_NAME"
val cursor = db.rawQuery(sql, null)
cursor.moveToLast
val id = cursor.getString(cursor.getColumnIndex(COLUMN_ID))
cursor.close
id.toString
}
//TODO insert Fertigen Meldezettel in MeldezettelFinal
def insertFinal(item: FinalItem): Unit = {
???
}
//TODO LiSTVIEW lΓΆschen aller tabellen der Meldezettel Table
// //TODO wenn ein vorhandener Meldezettel bearbeitet wird -> nur wenn genug Zeit
// def updateFinal: Unit = {
// //check auf ID
// ???
// }
//TODO
def search: Unit = {
???
}
//SEARCH
def insertFirstPage: Unit = {
val db = this.getWritableDatabase
val values = new ContentValues
values.put(COLUMN_LASTNAME, "")
db.insert(TABLE_NAME, null, values)
}
def updatePage1(lastname: String, firstname: String, surnameBeforeFirstMarriage: String, academicDegree: String, gender: String, seite1: String): Unit = {
val db = this.getWritableDatabase
val values = new ContentValues
val id = getLastID
values.put(COLUMN_LASTNAME, lastname)
values.put(COLUMN_FIRSTNAME, firstname)
values.put(COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE, surnameBeforeFirstMarriage)
values.put(COLUMN_ACADEMIC_DEGREE, academicDegree)
values.put(COLUMN_GENDER, gender)
values.put(COLUMN_Seite1, seite1)
db.update(TABLE_NAME, values, "id = ?", Array(id))
}
def updatePage2(birthdate: String, birthplace: String, religion: String, zmr: String, nationality: String, seite2: String): Unit = {
val db = this.getWritableDatabase
val id = getLastID
val values = new ContentValues
values.put(COLUMN_BIRTHDATE, birthdate)
values.put(COLUMN_BIRTHPLACE, birthplace)
values.put(COLUMN_RELIGION, religion)
values.put(COLUMN_ZMR, zmr)
values.put(COLUMN_NATIONALITY, nationality)
values.put(COLUMN_Seite2, seite2)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
def updatePage2a(nationality: String, traveldocumentType: String, traveldocumentNumber: String, traveldocumentDate: String, traveldocumentAgency: String, seite2a: String): Unit = {
val db = this.getWritableDatabase
val id = getLastID
val values = new ContentValues
values.put(COLUMN_NATIONALITY, nationality)
values.put(COLUMN_TRAVELDOCUMENT_TYPE, traveldocumentType)
values.put(COLUMN_TRAVELDOCUMENT_NUMBER, traveldocumentNumber)
values.put(COLUMN_TRAVELDOCUMENT_DATE, traveldocumentDate)
values.put(COLUMN_TRAVELDOCUMENT_AGENCY, traveldocumentAgency)
values.put(COLUMN_Seite2a, seite2a)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
def updatePage3(familyStatus: String, seite3: String): Unit = {
val db = this.getWritableDatabase
val id = getLastID
val values = new ContentValues
values.put(COLUMN_FAMILY_STATUS, familyStatus)
values.put(COLUMN_Seite3, seite3)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
def updatePage4(functionMel: String): Unit = {
val db = this.getWritableDatabase
val id = getLastID
val values = new ContentValues
values.put(COLUMN_FUNCTION, functionMel)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
def updatePage5(firstSreet: String, firstHouseNumber: String, firstStairs: String, firstDoor: String, firstZipcode: String, firstLocation: String, immigrationCountry: String, seite5: String): Unit = {
val db = this.getWritableDatabase
val id = getLastID
val values = new ContentValues
values.put(COLUMN_FIRST_STREET, firstSreet)
values.put(COLUMN_FIRST_HOUSENUMBER, firstHouseNumber)
values.put(COLUMN_FIRST_STAIRS, firstStairs)
values.put(COLUMN_FIRST_DOOR, firstDoor)
values.put(COLUMN_FIRST_ZIPCODE, firstZipcode)
values.put(COLUMN_FIRST_LOCATION, firstLocation)
values.put(COLUMN_IMMIGRATION_COUNTRY, immigrationCountry)
values.put(COLUMN_Seite5, seite5)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
def updatePage5a(secondSreet: String, secondHouseNumber: String, secondStairs: String, secondDoor: String, secondZipcode: String, secondLocation: String, immigrationCountry: String, seite5a: String): Unit = {
val db = this.getWritableDatabase
val id = getLastID
val values = new ContentValues
values.put(COLUMN_SECOND_STREET, secondSreet)
values.put(COLUMN_SECOND_HOUSENUMBER, secondHouseNumber)
values.put(COLUMN_SECOND_STAIRS, secondStairs)
values.put(COLUMN_SECOND_DOOR, secondDoor)
values.put(COLUMN_SECOND_ZIPCODE, secondZipcode)
values.put(COLUMN_SECOND_LOCATION, secondLocation)
values.put(COLUMN_IMMIGRATION_COUNTRY, immigrationCountry)
values.put(COLUMN_Seite5a, seite5a)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
def updatePage6(thirdSreet: String, thirdHouseNumber: String, thirdStairs: String, thirdDoor: String, thirdZipcode: String, thirdLocation: String, immigrationCountry: String, seite6: String): Unit = {
val db = this.getWritableDatabase
val id = getLastID
val values = new ContentValues
values.put(COLUMN_THIRD_STREET, thirdSreet)
values.put(COLUMN_THIRD_HOUSENUMBER, thirdHouseNumber)
values.put(COLUMN_THIRD_STAIRS, thirdStairs)
values.put(COLUMN_THIRD_DOOR, thirdDoor)
values.put(COLUMN_THIRD_ZIPCODE, thirdZipcode)
values.put(COLUMN_THIRD_LOCATION, thirdLocation)
values.put(COLUMN_CONDONED_COUNTRY, immigrationCountry)
values.put(COLUMN_Seite6, seite6)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
""" This procedure updates the Meldezettelformular afterwards """.stripMargin
def updateItem(id: String, firstname: String, lastname: String, surnameBeforeFirstMarriage: String, academicDegree: String, birthdate: String, birthplace: String,
religion: String, zmr: String, nationality: String, traveldocumentType: String, traveldocumentDate: String, traveldocumentAgency: String, familyStatus: String): Unit = {
val db = this.getWritableDatabase
val values = new ContentValues
values.put(COLUMN_FIRSTNAME, firstname)
values.put(COLUMN_LASTNAME, lastname)
values.put(COLUMN_SURNAME_BEFORE_FIRST_MARRIAGE, surnameBeforeFirstMarriage)
values.put(COLUMN_ACADEMIC_DEGREE, academicDegree)
values.put(COLUMN_BIRTHDATE, birthdate)
values.put(COLUMN_BIRTHPLACE, birthplace)
values.put(COLUMN_RELIGION, religion)
values.put(COLUMN_ZMR, zmr)
values.put(COLUMN_NATIONALITY, nationality)
values.put(COLUMN_TRAVELDOCUMENT_TYPE, traveldocumentType)
values.put(COLUMN_TRAVELDOCUMENT_DATE, traveldocumentDate)
values.put(COLUMN_TRAVELDOCUMENT_AGENCY, traveldocumentAgency)
values.put(COLUMN_FAMILY_STATUS, familyStatus)
db.update(TABLE_NAME, values, "id = ?", Array(id))
db.close
}
""" This procedure deletes an item """.stripMargin
def deleteItem(int: Int,id: String): Unit = {
val db = this.getWritableDatabase
if (int == 1) {
db.delete(TABLE_NAME, "id = ?", Array(id))
} else db.delete(TABLE_NAME_Final, "id = ?", Array(id))
db.close
}
}
|
Gulasch4ever/resifo-android
|
app/src/main/scala/at/fh/swengb/resifoAndroid/db/DBHelper.scala
|
Scala
|
gpl-3.0
| 31,380 |
package ee.cone.c4actor.rdb_impl
import java.lang.Math.toIntExact
import java.sql.{CallableStatement, Connection}
import java.util.concurrent.CompletableFuture
import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor._
class ExternalDBSyncClient(
dbFactory: ExternalDBFactory,
db: CompletableFuture[RConnectionPool] = new CompletableFuture() //dataSource: javax.sql.DataSource
) extends ToInject with Executable with ExternalDBClient {
def toInject: List[Injectable] = WithJDBCKey.set(getConnectionPool.doWith)
def run(): Unit = concurrent.blocking{ db.complete(dbFactory.create(
createConnection β new RConnectionPool {
def doWith[T](f: RConnectionβT): T = {
FinallyClose(createConnection()) { sqlConn β
val conn = new RConnectionImpl(sqlConn)
f(conn)
}
}
}
))}
def getConnectionPool: RConnectionPool = concurrent.blocking(db.get)
}
object FinallyFree {
def apply[A,T](o: A, close: AβUnit)(f: AβT): T = try f(o) finally close(o)
}
abstract class RDBBindImpl[R] extends RDBBind[R] with LazyLogging {
def connection: java.sql.Connection
def index: Int
def code(wasCode: String): String
def execute(stmt: java.sql.CallableStatement): R
//
private def inObject(value: Object) = {
//println(Thread.currentThread.getName,"bind",value)
new InObjectRDBBind[R](this, value)
}
def in(value: Long): RDBBind[R] = inObject(value:java.lang.Long)
def in(value: Boolean): RDBBind[R] = inObject(value:java.lang.Boolean)
def in(value: String): RDBBind[R] =
if(value.length < 1000) inObject(value) else new InTextRDBBind(this, value)
def call(): R = concurrent.blocking {
val theCode = code("")
logger.debug(s"${Thread.currentThread.getName} code $theCode")
FinallyClose(connection.prepareCall(theCode))(execute)
}
}
class InObjectRDBBind[R](val prev: RDBBindImpl[R], value: Object) extends ArgRDBBind[R] {
def execute(stmt: CallableStatement): R = {
stmt.setObject(index,value)
prev.execute(stmt)
}
}
class InTextRDBBind[R](val prev: RDBBindImpl[R], value: String) extends ArgRDBBind[R] {
def execute(stmt: CallableStatement): R = {
FinallyClose[java.sql.Clob,R](_.free())(connection.createClob()){ clob β
clob.setString(1,value)
stmt.setClob(index,clob)
prev.execute(stmt)
}
}
}
abstract class ArgRDBBind[R] extends RDBBindImpl[R] {
def prev: RDBBindImpl[R]
def connection: Connection = prev.connection
def index: Int = prev.index + 1
def code(wasCode: String): String =
prev.code(if(wasCode.isEmpty) "?" else s"?,$wasCode")
}
class OutUnitRDBBind(
val connection: java.sql.Connection, name: String
) extends RDBBindImpl[Unit] {
def index = 0
def code(wasCode: String): String = s"{call $name ($wasCode)}"
def execute(stmt: CallableStatement): Unit = stmt.execute()
}
class OutLongRDBBind(
val connection: java.sql.Connection, name: String
) extends RDBBindImpl[Option[Long]] {
def index = 1
def code(wasCode: String): String = s"{? = call $name ($wasCode)}"
def execute(stmt: CallableStatement): Option[Long] = {
stmt.registerOutParameter(index,java.sql.Types.BIGINT)
stmt.execute()
Option(stmt.getLong(index))
}
}
class OutTextRDBBind(
val connection: java.sql.Connection, name: String
) extends RDBBindImpl[String] {
def index = 1
def code(wasCode: String): String = s"{? = call $name ($wasCode)}"
def execute(stmt: CallableStatement): String = {
stmt.registerOutParameter(index,java.sql.Types.CLOB)
stmt.execute()
FinallyClose[Option[java.sql.Clob],String](_.foreach(_.free()))(
Option(stmt.getClob(index))
){ clob β
clob.map(cβc.getSubString(1,toIntExact(c.length()))).getOrElse("")
}
}
}
class RConnectionImpl(conn: java.sql.Connection) extends RConnection with LazyLogging {
private def bindObjects(stmt: java.sql.PreparedStatement, bindList: List[Object]) =
bindList.zipWithIndex.foreach{ case (v,i) β stmt.setObject(i+1,v) }
def outUnit(name: String): RDBBind[Unit] = new OutUnitRDBBind(conn, name)
def outLongOption(name: String): RDBBind[Option[Long]] = new OutLongRDBBind(conn, name)
def outText(name: String): RDBBind[String] = new OutTextRDBBind(conn, name)
def execute(code: String): Unit = concurrent.blocking {
FinallyClose(conn.prepareStatement(code)) { stmt β
logger.debug(code)
stmt.execute()
//println(stmt.getWarnings)
}
}
def executeQuery(
code: String, cols: List[String], bindList: List[Object]
): List[Map[String,Object]] = concurrent.blocking {
//println(s"code:: [$code]")
//conn.prepareCall(code).re
FinallyClose(conn.prepareStatement(code)) { stmt β
bindObjects(stmt, bindList)
FinallyClose(stmt.executeQuery()) { rs β
var res: List[Map[String, Object]] = Nil
while(rs.next()) res = cols.map(cn β cn β rs.getObject(cn)).toMap :: res
res.reverse
}
}
}
}
|
wregs/c4proto
|
c4actor-rdb/src/main/scala/ee/cone/c4actor/rdb_impl/JDBCImpl.scala
|
Scala
|
apache-2.0
| 4,979 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api.test
import java.util.{Properties, Collection, ArrayList}
import org.scalatest.junit.JUnit3Suite
import org.junit.runners.Parameterized
import org.junit.runner.RunWith
import org.junit.runners.Parameterized.Parameters
import org.junit.{After, Before, Test}
import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer, ProducerConfig}
import org.junit.Assert._
import kafka.api.FetchRequestBuilder
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.consumer.SimpleConsumer
import kafka.message.Message
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.{Utils, TestUtils}
import scala.Array
@RunWith(value = classOf[Parameterized])
class ProducerCompressionTest(compression: String) extends JUnit3Suite with ZooKeeperTestHarness {
private val brokerId = 0
private val port = TestUtils.choosePort
private var server: KafkaServer = null
private val props = TestUtils.createBrokerConfig(brokerId, port)
private val config = new KafkaConfig(props)
private val topic = "topic"
private val numRecords = 100
@Before
override def setUp() {
super.setUp()
server = TestUtils.createServer(config)
}
@After
override def tearDown() {
server.shutdown
Utils.rm(server.config.logDirs)
super.tearDown()
}
/**
* testCompression
*
* Compressed messages should be able to sent and consumed correctly
*/
@Test
def testCompression() {
val props = new Properties()
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, TestUtils.getBrokerListStrFromConfigs(Seq(config)))
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compression)
var producer = new KafkaProducer(props)
val consumer = new SimpleConsumer("localhost", port, 100, 1024*1024, "")
try {
// create topic
TestUtils.createTopic(zkClient, topic, 1, 1, List(server))
val partition = 0
// prepare the messages
val messages = for (i <-0 until numRecords)
yield ("value" + i).getBytes
// make sure the returned messages are correct
val responses = for (message <- messages)
yield producer.send(new ProducerRecord(topic, null, null, message))
val futures = responses.toList
for ((future, offset) <- futures zip (0 until numRecords)) {
assertEquals(offset.toLong, future.get.offset)
}
// make sure the fetched message count match
val fetchResponse = consumer.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build())
val messageSet = fetchResponse.messageSet(topic, partition).iterator.toBuffer
assertEquals("Should have fetched " + numRecords + " messages", numRecords, messageSet.size)
var index = 0
for (message <- messages) {
assertEquals(new Message(bytes = message), messageSet(index).message)
assertEquals(index.toLong, messageSet(index).offset)
index += 1
}
} finally {
if (producer != null) {
producer.close()
producer = null
}
if (consumer != null)
consumer.close()
}
}
}
object ProducerCompressionTest {
// NOTE: Must return collection of Array[AnyRef] (NOT Array[Any]).
@Parameters
def parameters: Collection[Array[String]] = {
val list = new ArrayList[Array[String]]()
list.add(Array("gzip"))
list.add(Array("snappy"))
list
}
}
|
stealthly/kafka
|
core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala
|
Scala
|
apache-2.0
| 4,177 |
package models
import java.net.URLDecoder
import java.util.UUID
import com.hp.hpl.jena.query.{ QuerySolution, QueryExecutionFactory }
import org.joda.time.{ LocalTime, LocalDate }
import play.api.data.Form
import play.api.data.Forms._
import utils.Global._
import utils.semantic.Vocabulary.{ lwm, owl, rdf, rdfs }
import utils.semantic._
import utils.Implicits._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ Future, Promise, blocking }
case class ScheduleAssociation(group: Resource, assignmentAssoc: Resource, assignmentDate: LocalDate, dueDate: LocalDate, assignmentDateTimetableEntry: Resource, dueDateTimetableEntry: Resource, timetable: Resource)
case class AlternateAssociationFormModel(oldSchedule: String, newSchedule: String)
object ScheduleAssociations {
object Forms {
import play.api.data.Forms._
import play.api.data._
val alternateForm = Form(mapping(
"oldSchedule" -> nonEmptyText,
"newSchedule" -> nonEmptyText
)(AlternateAssociationFormModel.apply)(AlternateAssociationFormModel.unapply))
}
def create(assignment: ScheduleAssociation): Future[Individual] = {
val id = UUID.randomUUID()
val assocResource = ResourceUtils.createResource(lwmNamespace, id)
val statements = List(
Statement(assocResource, rdf.typ, lwm.ScheduleAssociation),
Statement(assocResource, rdf.typ, owl.NamedIndividual),
Statement(assocResource, lwm.hasAssignmentDate, DateLiteral(assignment.assignmentDate)),
Statement(assignment.timetable, lwm.hasScheduleAssociation, assocResource),
Statement(assocResource, lwm.hasDueDate, DateLiteral(assignment.dueDate)),
Statement(assocResource, lwm.hasGroup, assignment.group),
Statement(assignment.group, lwm.hasScheduleAssociation, assocResource),
Statement(assocResource, lwm.hasDueDateTimetableEntry, assignment.dueDateTimetableEntry),
Statement(assocResource, lwm.hasAssignmentDateTimetableEntry, assignment.assignmentDateTimetableEntry),
Statement(assocResource, lwm.hasAssignmentAssociation, assignment.assignmentAssoc)
)
sparqlExecutionContext.executeUpdate(SPARQLBuilder.insertStatements(statements: _*)).map(b β Individual(assocResource))
}
def create(assignment: ScheduleAssociation, student: Resource): Future[Individual] = {
val id = UUID.randomUUID()
val assocResource = ResourceUtils.createResource(lwmNamespace, id)
val statements = List(
Statement(assocResource, rdf.typ, lwm.ScheduleAssociation),
Statement(assocResource, rdf.typ, owl.NamedIndividual),
Statement(assocResource, lwm.hasAssignmentDate, DateLiteral(assignment.assignmentDate)),
Statement(assignment.timetable, lwm.hasScheduleAssociation, assocResource),
Statement(assocResource, lwm.hasDueDate, DateLiteral(assignment.dueDate)),
Statement(assocResource, lwm.hasGroup, assignment.group),
Statement(student, lwm.hasScheduleAssociation, assocResource),
Statement(assocResource, lwm.hasDueDateTimetableEntry, assignment.dueDateTimetableEntry),
Statement(assocResource, lwm.hasAssignmentDateTimetableEntry, assignment.assignmentDateTimetableEntry),
Statement(assocResource, lwm.hasAssignmentAssociation, assignment.assignmentAssoc)
)
sparqlExecutionContext.executeUpdate(SPARQLBuilder.insertStatements(statements: _*)).map(b β Individual(assocResource))
}
def delete(resource: Resource): Future[Resource] = {
val p = Promise[Resource]()
val individual = Individual(resource)
if (individual.props(rdf.typ).contains(lwm.ScheduleAssociation)) {
sparqlExecutionContext.executeUpdate(SPARQLBuilder.removeIndividual(resource)).map { b β p.success(resource) }
} else {
p.failure(new IllegalArgumentException("Resource is not an ScheduleAssociation"))
}
p.future
}
def all(): Future[List[Individual]] = {
sparqlExecutionContext.executeQuery(SPARQLBuilder.listIndividualsWithClass(lwm.ScheduleAssociation)).map { stringResult β
SPARQLTools.statementsFromString(stringResult).map(course β Individual(course.s)).toList
}
}
def dates(group: Resource, association: Resource): Option[(LocalDate, LocalDate)] = {
val query1 =
s"""
|SELECT ?s (${lwm.hasAssignmentDate} as ?p) ?o where {
| ${group.toQueryString} ${lwm.hasScheduleAssociation} ?s .
| ?s ${lwm.hasAssignmentAssociation} ${association.toQueryString} .
| ?s ${lwm.hasAssignmentDate} ?o .
|}
""".stripMargin
val query2 =
s"""
|SELECT ?s (${lwm.hasDueDate} as ?p) ?o where {
| ${group.toQueryString} ${lwm.hasScheduleAssociation} ?s .
| ?s ${lwm.hasAssignmentAssociation} ${association.toQueryString} .
| ?s ${lwm.hasDueDate} ?o .
|}
""".stripMargin
for {
st1 β SPARQLTools.statementsFromString(sparqlExecutionContext.executeQueryBlocking(query1)).headOption
st2 β SPARQLTools.statementsFromString(sparqlExecutionContext.executeQueryBlocking(query2)).headOption
assLit β st1.o.asLiteral()
dueLit β st2.o.asLiteral()
} yield (LocalDate.parse(assLit.decodedString), LocalDate.parse(dueLit.decodedString))
}
def times(group: Resource, association: Resource): Option[(Time, Time)] = {
val query1 =
s"""
|SELECT ?s (${lwm.hasStartTime} as ?p) ?o where {
| ${group.toQueryString} ${lwm.hasScheduleAssociation} ?sca .
| ?sca ${lwm.hasAssignmentAssociation} ${association.toQueryString} .
| ?sca ${lwm.hasAssignmentDateTimetableEntry} ?s .
| ?s ${lwm.hasStartTime} ?o .
|}
""".stripMargin
val query2 =
s"""
|SELECT ?s (${lwm.hasStartTime} as ?p) ?o where {
| ${group.toQueryString} ${lwm.hasScheduleAssociation} ?sca .
| ?sca ${lwm.hasAssignmentAssociation} ${association.toQueryString} .
| ?sca ${lwm.hasDueDateTimetableEntry} ?s .
| ?s ${lwm.hasStartTime} ?o .
|}
""".stripMargin
for {
st1 β SPARQLTools.statementsFromString(sparqlExecutionContext.executeQueryBlocking(query1)).headOption
st2 β SPARQLTools.statementsFromString(sparqlExecutionContext.executeQueryBlocking(query2)).headOption
startTime β st1.o.asLiteral()
st = startTime.decodedString.split(":")
h1 = st(0).toInt
m1 = st(1).toInt
endTime β st2.o.asLiteral()
et = endTime.decodedString.split(":")
h2 = et(0).toInt
m2 = et(1).toInt
} yield (Time(h1, m1), Time(h2, m2))
}
def getSupervisorsFor(scheduleAssociation: Resource): List[Resource] = {
val query =
s"""
|select ($scheduleAssociation as ?s) (${lwm.hasSupervisor} as ?p) (?supervisor as ?o) where{
| $scheduleAssociation ${lwm.hasAssignmentDateTimetableEntry} ?entry .
| ?entry ${lwm.hasSupervisor} ?supervisor .
|}
""".stripMargin
val result = sparqlExecutionContext.executeQueryBlocking(query)
SPARQLTools.statementsFromString(result).map { statement β
Resource(statement.o.value)
}.toList
}
def getForGroup(group: Resource): Future[List[ScheduleAssociation]] = Future {
def query2(scheduleAssociation: Resource) =
s"""
|select * where {
| $scheduleAssociation ${lwm.hasAssignmentDate} ?assignmentDate .
| $scheduleAssociation ${lwm.hasDueDate} ?dueDate .
| $scheduleAssociation ${lwm.hasAssignmentDateTimetableEntry} ?assignmentDateEntry .
| $scheduleAssociation ${lwm.hasDueDateTimetableEntry} ?dueDateEntry .
| $scheduleAssociation ${lwm.hasAssignmentDateTimetableEntry} ?assignmentEntry .
| $scheduleAssociation ${lwm.hasAssignmentAssociation} ?assignmentAssociation .
| ?timetable ${rdf.typ} ${lwm.Timetable} .
| ?timetable ${lwm.hasScheduleAssociation} $scheduleAssociation
|}
""".stripMargin
val t = Individual(group).props.get(lwm.hasScheduleAssociation).map { schedules β
schedules.map { schedule β
val q = query2(schedule.asResource().get)
val result = QueryExecutionFactory.sparqlService(queryHost, q).execSelect()
var sss = List.empty[ScheduleAssociation]
while (result.hasNext) {
val n = result.nextSolution()
val assignmentDate = LocalDate.parse(n.get("assignmentDate").toString)
val dueDate = LocalDate.parse(n.get("dueDate").toString)
val dueDateEntry = Resource(n.get("dueDateEntry").toString)
val assignmentEntry = Resource(n.get("assignmentEntry").toString)
val assignmentAssociation = Resource(n.get("assignmentAssociation").toString)
val timetable = Resource(n.get("timetable").toString)
sss = ScheduleAssociation(group, assignmentAssociation, assignmentDate, dueDate, assignmentEntry, dueDateEntry, timetable) :: sss
}
sss
}.flatten
}
t match {
case Some(list) β list
case None β Nil
}
}
def getAlternateDates(group: Resource, groupId: String, orderId: String) = {
import utils.Implicits._
s"""
|${Vocabulary.defaultPrefixes}
|
| Select * where {
|
| ?labwork lwm:hasGroup $group .
| ?labwork lwm:hasGroup ?group .
| ?group lwm:hasScheduleAssociation ?association .
| ?group lwm:hasGroupId ?groupId .
| ?association lwm:hasAssignmentAssociation ?assignmentAssociation .
| ?association lwm:hasAssignmentDate ?date .
| ?association lwm:hasAssignmentDateTimetableEntry ?entry .
| ?entry lwm:hasStartTime ?time .
| ?assignmentAssociation lwm:hasOrderId "$orderId" .
|
| filter not exists {?group lwm:hasGroupId "$groupId"}
|
| } order by desc(?date) desc(?time)
""".stripMargin.execSelect().map { qs β
val altSchedule = qs.data("association").asResource().getURI
val altGroupId = qs.data("groupId").asLiteral().getString
val altDate = qs.data("date").asLiteral().getString
val altTime = URLDecoder.decode(qs.data("time").asLiteral().getString, "UTF-8")
val altGroup = qs.data("group").asResource().getURI
val groupMembersSize = getNormalizedCount(Resource(altGroup), altDate)
(altSchedule, s"$altDate, $altTime Gruppe $altGroupId ($groupMembersSize)")
}
}
def getNormalizedCount(group: Resource, date: String): Int = {
import utils.Implicits._
val groupSize = Individual(group).props.getOrElse(lwm.hasMember, Nil).size
val alternateSize =
s"""
|${Vocabulary.defaultPrefixes}
|
| Select ?association where {
| ?association lwm:hasAlternateScheduleAssociation ?schedule .
| ?schedule lwm:hasAssignmentDate "$date" .
| ?schedule lwm:hasGroup $group .
| }
""".stripMargin.execSelect().size
val hiddenSize =
s"""
|${Vocabulary.defaultPrefixes}
|
| Select ?s where {
| ?s lwm:memberOf $group .
| $group lwm:hasLabWork ?labwork .
| ?s lwm:hasHidingState ?state .
| ?state lwm:hasHidingSubject ?labwork .
| }
""".stripMargin.execSelect().size
groupSize + alternateSize - hiddenSize
}
}
|
FHK-ADV/lwm
|
app/models/ScheduleAssociations.scala
|
Scala
|
mit
| 11,402 |
package im.actor.server.util
import im.actor.api.rpc.files
import im.actor.api.rpc.messaging._
object GroupServiceMessages {
def groupCreated = ServiceMessage("Group created", Some(ServiceExGroupCreated))
def userInvited(userId: Int) = ServiceMessage("User invited to the group", Some(ServiceExUserInvited(userId)))
def userJoined = ServiceMessage("User joined the group", Some(ServiceExUserJoined))
def userLeft(userId: Int) = ServiceMessage("User left the group", Some(ServiceExUserLeft))
def userKicked(userId: Int) = ServiceMessage("User kicked from the group", Some(ServiceExUserKicked(userId)))
def changedTitle(title: String) = ServiceMessage("Group title changed", Some(ServiceExChangedTitle(title)))
def changedTopic(topic: Option[String]) = ServiceMessage("Group topic changed", Some(ServiceExChangedTitle(""))) //ServiceExChangedTopic(topic)
def changedAbout(about: Option[String]) = ServiceMessage("Group about changed", Some(ServiceExChangedTitle(""))) //ServiceExChangedAbout(about)
def changedAvatar(avatar: Option[files.Avatar]) = ServiceMessage(
"Group avatar changed",
Some(ServiceExChangedAvatar(avatar))
)
}
|
v2tmobile/actor-platform
|
actor-server/actor-utils/src/main/scala/im/actor/server/util/GroupServiceMessages.scala
|
Scala
|
mit
| 1,158 |
package com.sksamuel.elastic4s.requests.searches.queries
import com.sksamuel.elastic4s.handlers.searches.queries.term
import com.sksamuel.elastic4s.requests.searches.term.TermQuery
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class TermQueryBodyFnTest extends AnyFunSuite with Matchers {
test("term query should generate expected json") {
val q = TermQuery("mysearch", "myvalue")
.boost(1.2)
.queryName("myquery")
.caseInsensitive(true)
term.TermQueryBodyFn(q).string() shouldBe
"""{"term":{"mysearch":{"boost":1.2,"_name":"myquery","value":"myvalue","case_insensitive":true}}}"""
}
}
|
sksamuel/elastic4s
|
elastic4s-core/src/test/scala/com/sksamuel/elastic4s/requests/searches/queries/TermQueryBodyFnTest.scala
|
Scala
|
apache-2.0
| 667 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.nisp.controllers
import org.jsoup.Jsoup
import org.mockito.ArgumentMatchers.{any => mockAny, eq => mockEQ}
import org.mockito.Mockito._
import org.scalatest.BeforeAndAfterEach
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.Application
import play.api.i18n.{Lang, MessagesApi, MessagesImpl}
import play.api.inject.bind
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.test.Helpers._
import play.api.test.{FakeRequest, Helpers, Injecting}
import uk.gov.hmrc.auth.core.{AuthConnector, MissingBearerToken}
import uk.gov.hmrc.http.SessionKeys
import uk.gov.hmrc.nisp.config.ApplicationConfig
import uk.gov.hmrc.nisp.connectors.{IdentityVerificationConnector, IdentityVerificationSuccessResponse}
import uk.gov.hmrc.nisp.controllers.auth.VerifyAuthActionImpl
import uk.gov.hmrc.nisp.helpers._
import uk.gov.hmrc.nisp.utils.UnitSpec
import java.time.LocalDate
import java.util.{Locale, UUID}
import scala.concurrent.Future
class LandingControllerSpec extends UnitSpec with BeforeAndAfterEach with GuiceOneAppPerSuite with Injecting {
implicit val fakeRequest = FakeRequest("GET", "/")
val fakeRequestWelsh = FakeRequest("GET", "/cymraeg")
val urResearchURL =
"https://signup.take-part-in-research.service.gov.uk/?utm_campaign=checkyourstatepensionPTA&utm_source=Other&utm_medium=other&t=HMRC&id=183"
val mockApplicationConfig: ApplicationConfig = mock[ApplicationConfig]
val mockIVConnector: IdentityVerificationConnector = mock[IdentityVerificationConnector]
override def fakeApplication(): Application = GuiceApplicationBuilder()
.overrides(
bind[IdentityVerificationConnector].toInstance(mockIVConnector),
bind[ApplicationConfig].toInstance(mockApplicationConfig),
bind[VerifyAuthActionImpl].to[FakeVerifyAuthAction]
)
.build()
override def beforeEach(): Unit = {
super.beforeEach()
reset(mockApplicationConfig, mockIVConnector)
when(mockApplicationConfig.urBannerUrl).thenReturn(urResearchURL)
when(mockApplicationConfig.pertaxFrontendUrl).thenReturn("/pert")
when(mockApplicationConfig.reportAProblemNonJSUrl).thenReturn("/reportAProblem")
when(mockApplicationConfig.contactFormServiceIdentifier).thenReturn("/id")
}
val verifyLandingController = inject[LandingController]
implicit val messages: MessagesImpl = MessagesImpl(Lang(Locale.getDefault), inject[MessagesApi])
"GET /" should {
"return 200" in {
val result = verifyLandingController.show(fakeRequest)
status(result) shouldBe OK
}
"return HTML" in {
val result = verifyLandingController.show(fakeRequest)
Helpers.contentType(result) shouldBe Some("text/html")
charset(result) shouldBe Some("utf-8")
}
"load the landing page" in {
val result = verifyLandingController.show(fakeRequest)
contentAsString(result) should include(
"Your State Pension forecast is provided for your information only and the " +
"service does not offer financial advice. When planning for your retirement, you should seek professional advice."
)
}
"have a start button" in {
val result = verifyLandingController.show(fakeRequest)
contentAsString(result) should include("Continue")
}
"return IVLanding page" in {
when(mockApplicationConfig.identityVerification).thenReturn(true)
val result = verifyLandingController.show(fakeRequest)
val doc = Jsoup.parse(contentAsString(result))
doc.getElementById("landing-signin-heading").text shouldBe messages("nisp.landing.signin.heading")
}
"return non-IV landing page when switched on" in {
when(mockApplicationConfig.identityVerification).thenReturn(false)
val result = verifyLandingController.show(fakeRequest)
val doc = Jsoup.parse(contentAsString(result))
doc.getElementById("eligibility-heading").text shouldBe messages("nisp.landing.eligibility.heading")
}
}
"GET /signin/verify" must {
"redirect to verify" in {
val mockAuthConnector = mock[AuthConnector]
val verifyAuthBasedInjector = GuiceApplicationBuilder()
.overrides(
bind[IdentityVerificationConnector].toInstance(mockIVConnector),
bind[AuthConnector].toInstance(mockAuthConnector)
)
.injector()
when(mockAuthConnector.authorise(mockAny(), mockAny())(mockAny(), mockAny()))
.thenReturn(Future.failed(MissingBearerToken("Missing Bearer Token!")))
val verifyLandingController = verifyAuthBasedInjector.instanceOf[LandingController]
val result = verifyLandingController.verifySignIn(fakeRequest)
redirectLocation(result) shouldBe Some(
"http://localhost:9949/auth-login-stub/verify-sign-in?continue=http%3A%2F%2Flocalhost%3A9234%2Fcheck-your-state-pension%2Faccount"
)
}
"redirect to account page when signed in" in {
val result = verifyLandingController.verifySignIn(
FakeRequest().withSession(
SessionKeys.sessionId -> s"session-${UUID.randomUUID()}",
SessionKeys.lastRequestTimestamp -> LocalDate.now.toEpochDay.toString
)
)
redirectLocation(result) shouldBe Some("/check-your-state-pension/account")
}
}
"GET /not-authorised" must {
"show not authorised page" when {
"journey Id is None" in {
val result = verifyLandingController.showNotAuthorised(None)(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include("We cannot confirm your identity")
}
"show generic not_authorised template for FailedMatching journey" in {
val journeyId = "failed-matching-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("FailedMatching"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include("We cannot confirm your identity")
}
"show generic not_authorised template for InsufficientEvidence journey" in {
val journeyId = "insufficient-evidence-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("InsufficientEvidence"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include("We cannot confirm your identity")
}
"show generic not_authorised template for Incomplete journey" in {
val journeyId = "incomplete-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("Incomplete"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include("We cannot confirm your identity")
}
"show generic not_authorised template for PreconditionFailed journey" in {
val journeyId = "precondition-failed-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("PreconditionFailed"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include("We cannot confirm your identity")
}
"show generic not_authorised template for UserAborted journey" in {
val journeyId = "user-aborted-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("UserAborted"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include("We cannot confirm your identity")
}
}
"show technical_issue template for TechnicalIssue journey" in {
val journeyId = "technical-issue-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("TechnicalIssue"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe INTERNAL_SERVER_ERROR
contentAsString(result) should include("This online service is experiencing technical difficulties.")
}
"show locked_out template for LockedOut journey" in {
val journeyId = "locked-out-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("LockedOut"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe LOCKED
contentAsString(result) should include(
"You have reached the maximum number of attempts to confirm your identity."
)
}
"show timeout template for Timeout journey" in {
val journeyId = "timeout-journey-id"
when(mockIVConnector.identityVerificationResponse(mockEQ(journeyId))(mockAny())).thenReturn(
Future.successful(IdentityVerificationSuccessResponse("Timeout"))
)
val result = verifyLandingController.showNotAuthorised(Some(journeyId))(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include(
"Your session has ended because you have not done anything for 15 minutes."
)
}
"show 2FA failure page when no journey ID specified" in {
val result = verifyLandingController.showNotAuthorised(None)(fakeRequest)
status(result) shouldBe UNAUTHORIZED
contentAsString(result) should include("We cannot confirm your identity")
contentAsString(result) should not include "If you cannot confirm your identity and you have a query you can"
}
}
"GET /cymraeg" must {
implicit val lang = Lang("cy")
"return 200" in {
val result = verifyLandingController.show(fakeRequestWelsh)
status(result) shouldBe OK
}
"return HTML" in {
val result = verifyLandingController.show(fakeRequestWelsh)
Helpers.contentType(result) shouldBe Some("text/html")
charset(result) shouldBe Some("utf-8")
}
"load the landing page in welsh" in {
when(mockApplicationConfig.isWelshEnabled).thenReturn(true)
val result = verifyLandingController.show(fakeRequestWelsh)
contentAsString(result) should include("data-journey-click=\"link - click:lang-select:Cymraeg\"")
}
}
}
|
hmrc/nisp-frontend
|
test/uk/gov/hmrc/nisp/controllers/LandingControllerSpec.scala
|
Scala
|
apache-2.0
| 11,957 |
import org.scalatest.{ Matchers, FlatSpec }
import bogo_sort._
class bogo_sort_test extends FlatSpec with Matchers {
val values = Array(1, 17, -4, 2)
"Bogo Sort" should "sort an array of Ints" in {
val sorted = sort(values)
sorted should contain inOrder (-4, 1, 2, 17)
}
"Bogo Sort V2" should "sort an array of Ints by looking for the sorted array in the iterator of all permutations " in {
val sorted = sort_v2(values)
sorted should contain inOrder (-4, 1, 2, 17)
}
}
|
Etiene/Algorithm-Implementations
|
Bogosort/Scala/lichtsprung/bogo_sort_test.scala
|
Scala
|
mit
| 498 |
package outer
package nested
val one: Int = 1
type Hi = Int
object Hi {
def hi: Hi = 2
}
|
som-snytt/dotty
|
tests/pos-special/sourcepath/outer/nested/toplevel1.scala
|
Scala
|
apache-2.0
| 93 |
package lila.evaluation
case class PlayerFlags(
suspiciousErrorRate: Boolean,
alwaysHasAdvantage: Boolean,
highBlurRate: Boolean,
moderateBlurRate: Boolean,
highlyConsistentMoveTimes: Boolean,
moderatelyConsistentMoveTimes: Boolean,
noFastMoves: Boolean,
suspiciousHoldAlert: Boolean
)
|
luanlv/lila
|
modules/evaluation/src/main/PlayerFlags.scala
|
Scala
|
mit
| 319 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.orc
import scala.collection.JavaConverters._
import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex
import org.apache.spark.sql.execution.datasources.orc.OrcFilters
import org.apache.spark.sql.execution.datasources.v2.FileScanBuilder
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.sources.v2.DataSourceOptions
import org.apache.spark.sql.sources.v2.reader.Scan
import org.apache.spark.sql.types.StructType
case class OrcScanBuilder(
sparkSession: SparkSession,
fileIndex: PartitioningAwareFileIndex,
schema: StructType,
dataSchema: StructType,
options: DataSourceOptions) extends FileScanBuilder(schema) {
lazy val hadoopConf =
sparkSession.sessionState.newHadoopConfWithOptions(options.asMap().asScala.toMap)
override def build(): Scan = {
OrcScan(sparkSession, hadoopConf, fileIndex, dataSchema, readSchema)
}
private var _pushedFilters: Array[Filter] = Array.empty
override def pushFilters(filters: Array[Filter]): Array[Filter] = {
if (sparkSession.sessionState.conf.orcFilterPushDown) {
OrcFilters.createFilter(schema, filters).foreach { f =>
// The pushed filters will be set in `hadoopConf`. After that, we can simply use the
// changed `hadoopConf` in executors.
OrcInputFormat.setSearchArgument(hadoopConf, f, schema.fieldNames)
}
val dataTypeMap = schema.map(f => f.name -> f.dataType).toMap
_pushedFilters = OrcFilters.convertibleFilters(schema, dataTypeMap, filters).toArray
}
filters
}
override def pushedFilters(): Array[Filter] = _pushedFilters
}
|
WindCanDie/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/orc/OrcScanBuilder.scala
|
Scala
|
apache-2.0
| 2,562 |
/*
*
* * Licensed to the Apache Software Foundation (ASF) under one or more
* * contributor license agreements. See the NOTICE file distributed with
* * this work for additional information regarding copyright ownership.
* * The ASF licenses this file to You under the Apache License, Version 2.0
* * (the "License"); you may not use this file except in compliance with
* * the License. You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.apache.eagle.datastream.utils
import java.util
import org.apache.eagle.alert.dedup.{AlertEmailDeduplicationExecutor, AlertEntityDeduplicationExecutor}
import org.apache.eagle.alert.executor.AlertExecutor
import org.apache.eagle.alert.notification.AlertNotificationExecutor
import org.apache.eagle.alert.persist.AlertPersistExecutor
import org.apache.eagle.datastream.core.{StreamConnector, FlatMapProducer, StreamProducer}
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable.ListBuffer
/**
* Create alert executors and provide callback for programmer to link alert executor to immediate parent executors
*
* <br/><br/>
* Explanations for programId, alertExecutorId and policy<br/><br/>
* - programId - distributed or single-process program for example one storm topology<br/>
* - alertExecutorId - one process/thread which executes multiple policies<br/>
* - policy - some rules to be evaluated<br/>
*
* <br/>
*
* Normally the mapping is like following:
* <pre>
* programId (1:N) alertExecutorId
* alertExecutorId (1:N) policy
* </pre>
*/
object AlertExecutorConsumerUtils {
private val LOG: Logger = LoggerFactory.getLogger(AlertExecutorConsumerUtils.getClass)
def setupAlertConsumers(toBeAddedEdges: ListBuffer[StreamConnector[Any,Any]], alertStreamProducers: List[StreamProducer[Any]]): Unit = {
val alertExecutorIdList: java.util.List[String] = new util.ArrayList[String]()
alertStreamProducers.map(x =>
alertExecutorIdList.add(x.asInstanceOf[FlatMapProducer[AnyRef, AnyRef]].mapper.asInstanceOf[AlertExecutor].getExecutorId));
val alertDefDao = alertStreamProducers.head.asInstanceOf[FlatMapProducer[AnyRef, AnyRef]].mapper.asInstanceOf[AlertExecutor].getPolicyDefinitionDao
val entityDedupExecutor: AlertEntityDeduplicationExecutor = new AlertEntityDeduplicationExecutor(alertExecutorIdList, alertDefDao)
val emailDedupExecutor: AlertEmailDeduplicationExecutor = new AlertEmailDeduplicationExecutor(alertExecutorIdList, alertDefDao)
val notificationExecutor: AlertNotificationExecutor = new AlertNotificationExecutor(alertExecutorIdList, alertDefDao)
val persistExecutor: AlertPersistExecutor = new AlertPersistExecutor
val entityDedupStreamProducer = FlatMapProducer(entityDedupExecutor)
val persistStreamProducer = FlatMapProducer(persistExecutor)
val emailDedupStreamProducer = FlatMapProducer(emailDedupExecutor)
val notificationStreamProducer = FlatMapProducer(notificationExecutor)
toBeAddedEdges += StreamConnector(entityDedupStreamProducer, persistStreamProducer)
toBeAddedEdges += StreamConnector(emailDedupStreamProducer, notificationStreamProducer)
alertStreamProducers.foreach(sp => {
toBeAddedEdges += StreamConnector(sp, entityDedupStreamProducer)
toBeAddedEdges += StreamConnector(sp, emailDedupStreamProducer)
})
}
}
|
qinzhaokun/incubator-eagle
|
eagle-core/eagle-data-process/eagle-stream-process-api/src/main/scala/org/apache/eagle/datastream/utils/AlertExecutorConsumerUtils.scala
|
Scala
|
apache-2.0
| 3,730 |
package chapter.one
object ExerciseSix {
val twoToThe1024: BigInt = {
BigInt(2).pow(1024)
}
}
|
deekim/impatient-scala
|
src/main/scala/chapter/one/ExerciseSix.scala
|
Scala
|
apache-2.0
| 105 |
package zeroformatter
import java.time._
import dog._
import dog.props._
import scalaprops._
import scalaz.std.anyVal._
import scalaz.std.string._
import scalaz.std.option._
object PrimitiveFormatterTest extends Base {
implicit val boolCase = Gen.elements((true, Array(1).map(_.toByte)), (false, Array(0).map(_.toByte)))
val `serialize Boolean` = Prop.property((v: (Boolean, Array[Byte])) =>
assert.eq(v._2, ZeroFormatter.serialize(v._1))
)
val `deserialize Boolean` = Prop.property((v: (Boolean, Array[Byte])) =>
assert.eq(v._1, ZeroFormatter.deserialize[Boolean](v._2))
)
val `serialize Byte` = TestCase {
val value = 123.toByte
val bytes = Array(0x7b.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Byte` = TestCase {
for {
values <- `serialize Byte`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Byte](values._2)).lift
} yield ()
}
val `serialize Option[Byte]` = TestCase {
val value: Option[Byte] = Some(123.toByte)
val bytes = Array(0x01.toByte, 0x7b.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Option[Byte]` = TestCase {
for {
values <- `serialize Option[Byte]`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Option[Byte]](values._2)).lift
} yield ()
}
val `serialize Short` = TestCase {
val value = 123.toShort
val bytes = Array(0x7b, 0x00).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Short` = TestCase {
for {
values <- `serialize Short`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Short](values._2)).lift
} yield ()
}
val `serialize Int` = TestCase {
val value = 123
val bytes = Array(0x7b, 0x00, 0x00, 0x00).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Int` = TestCase {
for {
values <- `serialize Int`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Int](values._2)).lift
} yield ()
}
val `serialize Long` = TestCase {
val value = 123.toLong
val bytes = Array(0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Long` = TestCase {
for {
values <- `serialize Long`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Long](values._2)).lift
} yield ()
}
val `serialize Float` = TestCase {
val value = 123.0.toFloat
val bytes = Array(0x00, 0x00, 0xf6, 0x42).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Float` = TestCase {
for {
values <- `serialize Float`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Float](values._2)).lift
} yield ()
}
val `serialize Double` = TestCase {
val value = 123.0
val bytes = Array(0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x5e, 0x40).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Double` = TestCase {
for {
values <- `serialize Double`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Double](values._2)).lift
} yield ()
}
val `serialize Char` = TestCase {
val value = 'γ'
val bytes = Array(0x42, 0x30).map(_.toByte)
for {
_ <- assert.eq(bytes, ZeroFormatter.serialize(value)).lift
} yield (value, bytes)
}
val `deserialize Char` = TestCase {
for {
values <- `serialize Char`
_ <- assert.eq(values._1, ZeroFormatter.deserialize[Char](values._2)).lift
} yield ()
}
val `serialize and deserialize String` = TestCase {
val value = "γγγγγ"
val r = ZeroFormatter.serialize(value)
assert.eq(value, ZeroFormatter.deserialize[String](r))
}
val `serialize and deserialize Option[String]` = TestCase {
val value: Option[String] = Some("γγγγγ")
val r = ZeroFormatter.serialize(value)
assert.eq(value, ZeroFormatter.deserialize[Option[String]](r))
}
val `serialize and deserialize Duration` = TestCase {
val value = Duration.ofSeconds(1234, 56)
val r = ZeroFormatter.serialize(value)
assert.equal(value, ZeroFormatter.deserialize[Duration](r))
}
}
|
pocketberserker/scala-zero-formatter
|
zero-formatter/src/test/scala/zeroformatter/PrimitiveFormatterTest.scala
|
Scala
|
mit
| 4,574 |
package controller
import console.model.World
import me.mtrupkin.console.control.{Border, Composite}
import me.mtrupkin.console.controller.ControllerStateMachine
import me.mtrupkin.console.layout.{Pos, Layout, Orientation}
import me.mtrupkin.console.screen.{ConsoleKeyModifier, ConsoleKey}
import me.mtrupkin.geometry.Point
import me.mtrupkin.terminal.Input
import me.mtrupkin.widget.IndexListWidget
import model.Saves
/**
* Created by mtrupkin on 11/29/2014.
*/
trait LoadGame { self: ControllerStateMachine =>
class LoadGameController extends ControllerState {
val window = new Composite(name = "window", layoutFlow = Orientation.VERTICAL) {
override def keyPressed(key: ConsoleKey) {
import scala.swing.event.Key._
key.keyValue match {
case Escape => revertState()
case _ => super.keyPressed(key)
}
}
}
val listWidget = new IndexListWidget(Saves.names, slot)
val listBoarder = new Composite(name = "list-border", border = Border.DOUBLE)
listBoarder.layout = Some(Layout(None, Pos.CENTER))
listBoarder.addControl(listWidget)
window.addControl(listBoarder)
override def update(elapsed: Int): Unit = {}
def slot(i: Int): Unit = {
changeState(new GameController(Saves.loadGame(i)))
}
}
}
|
mtrupkin/console-lib
|
src/main/scala/controller/LoadGame.scala
|
Scala
|
mit
| 1,341 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.expressions
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.sql.catalyst.expressions.ScalaUDF
import org.apache.spark.sql.Column
import org.apache.spark.sql.functions
import org.apache.spark.sql.types.DataType
/**
* A user-defined function. To create one, use the `udf` functions in `functions`.
*
* As an example:
* {{{
* // Defined a UDF that returns true or false based on some numeric score.
* val predict = udf((score: Double) => if (score > 0.5) true else false)
*
* // Projects a column that adds a prediction column based on the score column.
* df.select( predict(df("score")) )
* }}}
*
* @note The user-defined functions must be deterministic. Due to optimization,
* duplicate invocations may be eliminated or the function may even be invoked more times than
* it is present in the query.
*
* @since 1.3.0
*/
@InterfaceStability.Stable
case class UserDefinedFunction protected[sql] (
f: AnyRef,
dataType: DataType,
inputTypes: Option[Seq[DataType]]) {
/**
* Returns an expression that invokes the UDF, using the given arguments.
*
* @since 1.3.0
*/
def apply(exprs: Column*): Column = {
Column(ScalaUDF(f, dataType, exprs.map(_.expr), inputTypes.getOrElse(Nil)))
}
}
|
Panos-Bletsos/spark-cost-model-optimizer
|
sql/core/src/main/scala/org/apache/spark/sql/expressions/UserDefinedFunction.scala
|
Scala
|
apache-2.0
| 2,084 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.read.recalibration
import org.bdgenomics.formats.avro.AlignmentRecord
import org.scalatest.FunSuite
class RecalibrationTableSuite extends FunSuite {
val observedCovariates = Map((CovariateKey(0,
(50 + 33).toChar,
2,
'A',
'C') -> new Aggregate(1000000, 1, 10.0)),
(CovariateKey(0,
(40 + 33).toChar,
1,
'N',
'N') -> new Aggregate(100000, 1, 10.0)))
val table = RecalibrationTable(new ObservationTable(
observedCovariates))
test("look up quality scores in table") {
val scores = table(observedCovariates.map(_._1).toArray)
assert(scores.size === 2)
assert(scores(0) === (50 + 33).toChar)
assert(scores(1) === (47 + 33).toChar)
}
}
|
massie/adam
|
adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/recalibration/RecalibrationTableSuite.scala
|
Scala
|
apache-2.0
| 1,531 |
package com.temerev.wontfix
trait FixDictionary {
def getTagsInGroup(msgType: String, groupTag: Int): Seq[Int]
def getTagsInMessage(msgType: String): Seq[Int]
}
|
atemerev/wontfix
|
src/main/scala/com/temerev/wontfix/FixDictionary.scala
|
Scala
|
bsd-2-clause
| 166 |
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scray.querying.source
import scray.querying.description.{ Column, EmptyRow, Row }
import scray.querying.queries.DomainQuery
import scray.querying.description.internal.{
Domain,
RangeValueDomain,
SingleValueDomain,
ComposedMultivalueDomain,
StringDomainConverter,
BooleanDomainConverter,
IntDomainConverter,
LongDomainConverter,
BigIntDomainConverter,
JBigIntegerDomainConverter,
DoubleDomainConverter,
BigDecimalDomainConverter,
JBigDecimalDomainConverter,
DomainTypeConverter
}
import java.math.{BigInteger => JBigInteger, BigDecimal => JBigDecimal}
import com.twitter.util.Try
import scray.querying.description.WildcardChecker
import com.typesafe.scalalogging.LazyLogging
/**
* Common code for domain checking
*/
object DomainFilterSource extends LazyLogging {
/**
* check if the provided value is compatible with the domains
*/
def domainCheck[T](value: T, domain: Domain[_],
converter: Option[DomainTypeConverter[_]]): Boolean = domain match {
case single: SingleValueDomain[T] => Try {
if(single.isNull) {
true
} else if(single.isWildcard){
!WildcardChecker.checkValueAgainstPredicate(single.value.asInstanceOf[String], value.asInstanceOf[String])
} else {
if(value.getClass().isPrimitive() || value.getClass().isAssignableFrom(single.value.getClass)) {
!single.equiv.equiv(value, single.value)
} else {
converter.map { converter =>
val mapped = converter.mapDomain(domain).asInstanceOf[Option[SingleValueDomain[T]]]
mapped.map(svd => !svd.equiv.equiv(value, svd.value)).getOrElse(true)}.getOrElse(true)
}
}
}.getOrElse(converter.map{converter =>
val mapped = converter.mapDomain(domain).asInstanceOf[Option[SingleValueDomain[T]]]
mapped.map(svd => !svd.equiv.equiv(value, svd.value)).getOrElse(true)}.getOrElse(true))
case range: RangeValueDomain[T] => Try {
!range.valueIsInBounds(value)
}.getOrElse(converter.map{converter =>
val mapped = converter.mapDomain(domain).asInstanceOf[Option[RangeValueDomain[T]]]
mapped.map(rvd => Try(!rvd.valueIsInBounds(value)).getOrElse(true)).getOrElse(true)}.getOrElse(true))
case composed: ComposedMultivalueDomain[T] => composed.domains.find(!domainCheck(value, _, converter)).isEmpty
}
/**
* determine filter converter mappers for the value
*/
@inline def getDomainConverter(value: Any): Option[DomainTypeConverter[_]] = value match {
case s: String => Some(StringDomainConverter)
case b: Boolean => Some(BooleanDomainConverter)
case i: Int => Some(IntDomainConverter)
case l: Long => Some(LongDomainConverter)
case l: Double => Some(DoubleDomainConverter)
case bi: BigInt => Some(BigIntDomainConverter)
case bji: JBigInteger => Some(JBigIntegerDomainConverter)
case db: BigDecimal => Some(BigDecimalDomainConverter)
case dbj: JBigDecimal => Some(JBigDecimalDomainConverter)
case _ => None
}
}
/**
* used to filter rows according to the domain parameters supplied
* TODO: exclude filters which have already been applied due to usage in database system
*/
class LazyQueryDomainFilterSource[Q <: DomainQuery](source: LazySource[Q])
extends LazyQueryMappingSource[Q](source) with LazyLogging {
override def transformSpoolElement(element: Row, query: Q): Row = {
// if we find a domain which is not matched by this Row we throw it (the Row) away
query.getWhereAST.find { domain =>
element.getColumnValue[Any](domain.column) match {
case None => domain match {
case single: SingleValueDomain[_] if single.isNull => false
case _ => true
}
case Some(value) => DomainFilterSource.domainCheck(value, domain, DomainFilterSource.getDomainConverter(value))
}
} match {
case None => element
case Some(x) => new EmptyRow
}
}
/**
* LazyQueryDomainFilterSource doesn't throw away columns (only rows),
* so we report back all columns from upstream
*/
override def getColumns: Set[Column] = source.getColumns
override def getDiscriminant = "Filter" + source.getDiscriminant
}
/**
* used to filter rows according to the domain parameters supplied
*/
class EagerCollectingDomainFilterSource[Q <: DomainQuery, R](source: Source[Q, R])
extends EagerCollectingQueryMappingSource[Q, R](source) {
override def transformSeq(element: Seq[Row], query: Q): Seq[Row] = {
element.filter { row =>
query.getWhereAST.find { domain =>
row.getColumnValue[Any](domain.column) match {
case None => domain match {
case single: SingleValueDomain[_] if single.isNull => false
case _ => true
}
case Some(value) => DomainFilterSource.domainCheck(value, domain, DomainFilterSource.getDomainConverter(value))
}
} match {
case None => true
case Some(x) => false
}
}
}
override def transformSeqElement(element: Row, query: Q): Row = element
override def getColumns: Set[Column] = source.getColumns
override def getDiscriminant = "Filter" + source.getDiscriminant
}
|
scray/scray
|
scray-querying/modules/scray-querying/src/main/scala/scray/querying/source/queryDomainFilterSources.scala
|
Scala
|
apache-2.0
| 5,852 |
package no.uio.musit.healthcheck
import no.uio.musit.test.MusitSpec
class StopWatchSpec extends MusitSpec {
class ListTicker(var values: List[Long]) extends Ticker {
override def tick() = values match {
case head :: Nil =>
head
case head :: tail =>
values = tail
head
case Nil =>
throw new IllegalStateException()
}
}
"StopWatch" when {
"elapsed is called" should {
"calculate from the first tick" in {
val ticker = new ListTicker(List(2, 44))
val sw = StopWatch(ticker)
sw.elapsed() mustBe 42
}
}
}
}
|
MUSIT-Norway/musit
|
musit-service/src/test/scala/no/uio/musit/healthcheck/StopWatchSpec.scala
|
Scala
|
gpl-2.0
| 623 |
package chandu0101.scalajs.react.components.demo.pages
import chandu0101.scalajs.react.components.demo.components.LeftNavPage
import chandu0101.scalajs.react.components.demo.routes.{LeftRoute, MuiRouteModule}
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.router2.RouterCtl
import scala.scalajs.js.Dynamic.{global => g, literal => json}
object MuiPage {
val component = ReactComponentB[Props]("MuiPage")
.render((P) => {
LeftNavPage(MuiRouteModule.menu, P.selectedPage, P.ctrl)
})
.configureSpec(materialui.installMuiContext)
.build
case class Props(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute])
def apply(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute]) = component(Props(selectedPage, ctrl))
}
|
coreyauger/scalajs-react-components
|
demo/src/main/scala/chandu0101/scalajs/react/components/demo/pages/MuiPage.scala
|
Scala
|
apache-2.0
| 760 |
package rml.args.arg.injector
import java.io.File
import rml.args.arg.Arg
import rml.args.config.FullConfig
import rml.args.util.CsvReader
trait DataFromCsv extends Injector {
val files: Arg[List[File]]
override def inject(config: FullConfig, keyColumn: String, keyValue: String): FullConfig = {
files.apply(config).get.foreach{ file =>
CsvReader.findKey(file, keyColumn, keyValue) match {
case Some(m) => return config.over(m)
case None =>
}
}
config
}
}
|
rml/scala_args
|
src/main/scala/rml/args/arg/injector/DataFromCsv.scala
|
Scala
|
gpl-3.0
| 510 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master.ui
import org.apache.spark.Logging
import org.apache.spark.deploy.master.Master
import org.apache.spark.ui.{SparkUI, WebUI}
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.AkkaUtils
/**
* Web UI server for the standalone master.
*/
private[spark]
class MasterWebUI(val master: Master, requestedPort: Int)
extends WebUI(master.securityMgr, requestedPort, master.conf, name = "MasterUI") with Logging {
val masterActorRef = master.self
val timeout = AkkaUtils.askTimeout(master.conf)
initialize()
/** Initialize all components of the server. */
def initialize() {
attachPage(new ApplicationPage(this))
attachPage(new HistoryNotFoundPage(this))
attachPage(new MasterPage(this))
attachHandler(createStaticHandler(MasterWebUI.STATIC_RESOURCE_DIR, "/static"))
}
/** Attach a reconstructed UI to this Master UI. Only valid after bind(). */
def attachSparkUI(ui: SparkUI) {
assert(serverInfo.isDefined, "Master UI must be bound to a server before attaching SparkUIs")
ui.getHandlers.foreach(attachHandler)
}
/** Detach a reconstructed UI from this Master UI. Only valid after bind(). */
def detachSparkUI(ui: SparkUI) {
assert(serverInfo.isDefined, "Master UI must be bound to a server before detaching SparkUIs")
ui.getHandlers.foreach(detachHandler)
}
}
private[spark] object MasterWebUI {
val STATIC_RESOURCE_DIR = SparkUI.STATIC_RESOURCE_DIR
}
|
Dax1n/spark-core
|
core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala
|
Scala
|
apache-2.0
| 2,272 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.concurrent.CountDownLatch
import org.apache.commons.lang3.RandomStringUtils
import org.mockito.Mockito._
import org.scalactic.TolerantNumerics
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.mock.MockitoSugar
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.util.{BlockingSource, MockSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.ManualClock
class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging with MockitoSugar {
import AwaitTerminationTester._
import testImplicits._
// To make === between double tolerate inexact values
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
sqlContext.streams.active.foreach(_.stop())
}
test("name unique in active queries") {
withTempDir { dir =>
def startQuery(name: Option[String]): StreamingQuery = {
val writer = MemoryStream[Int].toDS.writeStream
name.foreach(writer.queryName)
writer
.foreach(new TestForeachWriter)
.start()
}
// No name by default, multiple active queries can have no name
val q1 = startQuery(name = None)
assert(q1.name === null)
val q2 = startQuery(name = None)
assert(q2.name === null)
// Can be set by user
val q3 = startQuery(name = Some("q3"))
assert(q3.name === "q3")
// Multiple active queries cannot have same name
val e = intercept[IllegalArgumentException] {
startQuery(name = Some("q3"))
}
q1.stop()
q2.stop()
q3.stop()
}
}
test(
"id unique in active queries + persists across restarts, runId unique across start/restarts") {
val inputData = MemoryStream[Int]
withTempDir { dir =>
var cpDir: String = null
def startQuery(restart: Boolean): StreamingQuery = {
if (cpDir == null || !restart) cpDir = s"$dir/${RandomStringUtils.randomAlphabetic(10)}"
MemoryStream[Int].toDS().groupBy().count()
.writeStream
.format("memory")
.outputMode("complete")
.queryName(s"name${RandomStringUtils.randomAlphabetic(10)}")
.option("checkpointLocation", cpDir)
.start()
}
// id and runId unique for new queries
val q1 = startQuery(restart = false)
val q2 = startQuery(restart = false)
assert(q1.id !== q2.id)
assert(q1.runId !== q2.runId)
q1.stop()
q2.stop()
// id persists across restarts, runId unique across restarts
val q3 = startQuery(restart = false)
q3.stop()
val q4 = startQuery(restart = true)
q4.stop()
assert(q3.id === q3.id)
assert(q3.runId !== q4.runId)
// Only one query with same id can be active
val q5 = startQuery(restart = false)
val e = intercept[IllegalStateException] {
startQuery(restart = true)
}
}
}
testQuietly("isActive, exception, and awaitTermination") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive === true),
AssertOnQuery(_.exception.isEmpty),
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
TestAwaitTermination(ExpectBlocked),
TestAwaitTermination(ExpectBlocked, timeoutMs = 2000),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = false),
StopStream,
AssertOnQuery(_.isActive === false),
AssertOnQuery(_.exception.isEmpty),
TestAwaitTermination(ExpectNotBlocked),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 2000, expectedReturnValue = true),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = true),
StartStream(),
AssertOnQuery(_.isActive === true),
AddData(inputData, 0),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
TestAwaitTermination(ExpectException[SparkException]),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 2000),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 10),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("OneTime trigger, commit log, and exception") {
import Trigger.Once
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive === true),
StopStream,
AddData(inputData, 1, 2),
StartStream(trigger = Once),
CheckAnswer(6, 3),
StopStream, // clears out StreamTest state
AssertOnQuery { q =>
// both commit log and offset log contain the same (latest) batch id
q.batchCommitLog.getLatest().map(_._1).getOrElse(-1L) ==
q.offsetLog.getLatest().map(_._1).getOrElse(-2L)
},
AssertOnQuery { q =>
// blow away commit log and sink result
q.batchCommitLog.purge(1)
q.sink.asInstanceOf[MemorySink].clear()
true
},
StartStream(trigger = Once),
CheckAnswer(6, 3), // ensure we fall back to offset log and reprocess batch
StopStream,
AddData(inputData, 3),
StartStream(trigger = Once),
CheckLastBatch(2), // commit log should be back in place
StopStream,
AddData(inputData, 0),
StartStream(trigger = Once),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("status, lastProgress, and recentProgress") {
import StreamingQuerySuite._
clock = new StreamManualClock
/** Custom MemoryStream that waits for manual clock to reach a time */
val inputData = new MemoryStream[Int](0, sqlContext) {
// getOffset should take 50 ms the first time it is called
override def getOffset: Option[Offset] = {
val offset = super.getOffset
if (offset.nonEmpty) {
clock.waitTillTime(1050)
}
offset
}
// getBatch should take 100 ms the first time it is called
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
if (start.isEmpty) clock.waitTillTime(1150)
super.getBatch(start, end)
}
}
// query execution should take 350 ms the first time it is called
val mapped = inputData.toDS.coalesce(1).as[Long].map { x =>
clock.waitTillTime(1500) // this will only wait the first time when clock < 1500
10 / x
}.agg(count("*")).as[Long]
case class AssertStreamExecThreadIsWaitingForTime(targetTime: Long)
extends AssertOnQuery(q => {
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingFor(targetTime))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}, "") {
override def toString: String = s"AssertStreamExecThreadIsWaitingForTime($targetTime)"
}
case class AssertClockTime(time: Long)
extends AssertOnQuery(q => clock.getTimeMillis() === time, "") {
override def toString: String = s"AssertClockTime($time)"
}
var lastProgressBeforeStop: StreamingQueryProgress = null
testStream(mapped, OutputMode.Complete)(
StartStream(ProcessingTime(1000), triggerClock = clock),
AssertStreamExecThreadIsWaitingForTime(1000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while offset is being fetched
AddData(inputData, 1, 2),
AdvanceManualClock(1000), // time = 1000 to start new trigger, will block on getOffset
AssertStreamExecThreadIsWaitingForTime(1050),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === true),
AssertOnQuery(_.status.message.startsWith("Getting offsets from")),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch is being fetched
AdvanceManualClock(50), // time = 1050 to unblock getOffset
AssertClockTime(1050),
AssertStreamExecThreadIsWaitingForTime(1150), // will block on getBatch that needs 1150
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === true),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch is being processed
AdvanceManualClock(100), // time = 1150 to unblock getBatch
AssertClockTime(1150),
AssertStreamExecThreadIsWaitingForTime(1500), // will block in Spark job that needs 1500
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === true),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch processing has completed
AssertOnQuery { _ => clock.getTimeMillis() === 1150 },
AdvanceManualClock(350), // time = 1500 to unblock job
AssertClockTime(1500),
CheckAnswer(2),
AssertStreamExecThreadIsWaitingForTime(2000),
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.lastProgress != null)
assert(query.recentProgress.exists(_.numInputRows > 0))
assert(query.recentProgress.last.eq(query.lastProgress))
val progress = query.lastProgress
assert(progress.id === query.id)
assert(progress.name === query.name)
assert(progress.batchId === 0)
assert(progress.timestamp === "1970-01-01T00:00:01.000Z") // 100 ms in UTC
assert(progress.numInputRows === 2)
assert(progress.processedRowsPerSecond === 4.0)
assert(progress.durationMs.get("getOffset") === 50)
assert(progress.durationMs.get("getBatch") === 100)
assert(progress.durationMs.get("queryPlanning") === 0)
assert(progress.durationMs.get("walCommit") === 0)
assert(progress.durationMs.get("triggerExecution") === 500)
assert(progress.sources.length === 1)
assert(progress.sources(0).description contains "MemoryStream")
assert(progress.sources(0).startOffset === null)
assert(progress.sources(0).endOffset !== null)
assert(progress.sources(0).processedRowsPerSecond === 4.0) // 2 rows processed in 500 ms
assert(progress.stateOperators.length === 1)
assert(progress.stateOperators(0).numRowsUpdated === 1)
assert(progress.stateOperators(0).numRowsTotal === 1)
assert(progress.sink.description contains "MemorySink")
true
},
// Test whether input rate is updated after two batches
AssertStreamExecThreadIsWaitingForTime(2000), // blocked waiting for next trigger time
AddData(inputData, 1, 2),
AdvanceManualClock(500), // allow another trigger
AssertClockTime(2000),
AssertStreamExecThreadIsWaitingForTime(3000), // will block waiting for next trigger time
CheckAnswer(4),
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.recentProgress.last.eq(query.lastProgress))
assert(query.lastProgress.batchId === 1)
assert(query.lastProgress.inputRowsPerSecond === 2.0)
assert(query.lastProgress.sources(0).inputRowsPerSecond === 2.0)
true
},
// Test status and progress after data is not available for a trigger
AdvanceManualClock(1000), // allow another trigger
AssertStreamExecThreadIsWaitingForTime(4000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
// Test status and progress after query stopped
AssertOnQuery { query =>
lastProgressBeforeStop = query.lastProgress
true
},
StopStream,
AssertOnQuery(_.lastProgress.json === lastProgressBeforeStop.json),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Stopped"),
// Test status and progress after query terminated with error
StartStream(ProcessingTime(1000), triggerClock = clock),
AdvanceManualClock(1000), // ensure initial trigger completes before AddData
AddData(inputData, 0),
AdvanceManualClock(1000), // allow another trigger
ExpectFailure[SparkException](),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message.startsWith("Terminated with exception"))
)
}
test("lastProgress should be null when recentProgress is empty") {
BlockingSource.latch = new CountDownLatch(1)
withTempDir { tempDir =>
val sq = spark.readStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.load()
.writeStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.option("checkpointLocation", tempDir.toString)
.start()
// Creating source is blocked so recentProgress is empty and lastProgress should be null
assert(sq.lastProgress === null)
// Release the latch and stop the query
BlockingSource.latch.countDown()
sq.stop()
}
}
test("codahale metrics") {
val inputData = MemoryStream[Int]
/** Whether metrics of a query is registered for reporting */
def isMetricsRegistered(query: StreamingQuery): Boolean = {
val sourceName = s"spark.streaming.${query.id}"
val sources = spark.sparkContext.env.metricsSystem.getSourcesByName(sourceName)
require(sources.size <= 1)
sources.nonEmpty
}
// Disabled by default
assert(spark.conf.get("spark.sql.streaming.metricsEnabled").toBoolean === false)
withSQLConf("spark.sql.streaming.metricsEnabled" -> "false") {
testStream(inputData.toDF)(
AssertOnQuery { q => !isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
// Registered when enabled
withSQLConf("spark.sql.streaming.metricsEnabled" -> "true") {
testStream(inputData.toDF)(
AssertOnQuery { q => isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
}
test("input row calculation with mixed batch and streaming sources") {
val streamingTriggerDF = spark.createDataset(1 to 10).toDF
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF).toDF("value")
val staticInputDF = spark.createDataFrame(Seq(1 -> "1", 2 -> "2")).toDF("value", "anotherValue")
// Trigger input has 10 rows, static input has 2 rows,
// therefore after the first trigger, the calculated input rows should be 10
val progress = getFirstProgress(streamingInputDF.join(staticInputDF, "value"))
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
test("input row calculation with trigger input DF having multiple leaves") {
val streamingTriggerDF =
spark.createDataset(1 to 5).toDF.union(spark.createDataset(6 to 10).toDF)
require(streamingTriggerDF.logicalPlan.collectLeaves().size > 1)
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF)
// After the first trigger, the calculated input rows should be 10
val progress = getFirstProgress(streamingInputDF)
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
testQuietly("StreamExecution metadata garbage collection") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(6 / _)
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "1") {
// Run 3 batches, and then assert that only 2 metadata files is are at the end
// since the first should have been purged.
testStream(mapped)(
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
AddData(inputData, 1, 2),
CheckAnswer(6, 3, 6, 3),
AddData(inputData, 4, 6),
CheckAnswer(6, 3, 6, 3, 1, 1),
AssertOnQuery("metadata log should contain only two files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 2 && toTest.head == "1")
true
}
)
}
val inputData2 = MemoryStream[Int]
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "2") {
// Run 5 batches, and then assert that 3 metadata files is are at the end
// since the two should have been purged.
testStream(inputData2.toDS())(
AddData(inputData2, 1, 2),
CheckAnswer(1, 2),
AddData(inputData2, 1, 2),
CheckAnswer(1, 2, 1, 2),
AddData(inputData2, 3, 4),
CheckAnswer(1, 2, 1, 2, 3, 4),
AddData(inputData2, 5, 6),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6),
AddData(inputData2, 7, 8),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6, 7, 8),
AssertOnQuery("metadata log should contain three files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 3 && toTest.head == "2")
true
}
)
}
}
testQuietly("StreamingQuery should be Serializable but cannot be used in executors") {
def startQuery(ds: Dataset[Int], queryName: String): StreamingQuery = {
ds.writeStream
.queryName(queryName)
.format("memory")
.start()
}
val input = MemoryStream[Int]
val q1 = startQuery(input.toDS, "stream_serializable_test_1")
val q2 = startQuery(input.toDS.map { i =>
// Emulate that `StreamingQuery` get captured with normal usage unintentionally.
// It should not fail the query.
q1
i
}, "stream_serializable_test_2")
val q3 = startQuery(input.toDS.map { i =>
// Emulate that `StreamingQuery` is used in executors. We should fail the query with a clear
// error message.
q1.explain()
i
}, "stream_serializable_test_3")
try {
input.addData(1)
// q2 should not fail since it doesn't use `q1` in the closure
q2.processAllAvailable()
// The user calls `StreamingQuery` in the closure and it should fail
val e = intercept[StreamingQueryException] {
q3.processAllAvailable()
}
assert(e.getCause.isInstanceOf[SparkException])
assert(e.getCause.getCause.isInstanceOf[IllegalStateException])
assert(e.getMessage.contains("StreamingQuery cannot be used in executors"))
} finally {
q1.stop()
q2.stop()
q3.stop()
}
}
test("StreamExecution should call stop() on sources when a stream is stopped") {
var calledStop = false
val source = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(StopStream)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
testQuietly("SPARK-19774: StreamExecution should call stop() on sources when a stream fails") {
var calledStop = false
val source1 = new Source {
override def stop(): Unit = {
throw new RuntimeException("Oh no!")
}
override def getOffset: Option[Offset] = Some(LongOffset(1))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.range(2).toDF(MockSourceProvider.fakeSchema.fieldNames: _*)
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
val source2 = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source1, source2) {
val df1 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
val df2 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
testStream(df1.union(df2).map(i => i / 0))(
AssertOnQuery { sq =>
intercept[StreamingQueryException](sq.processAllAvailable())
sq.exception.isDefined && !sq.isActive
}
)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
test("get the query id in source") {
@volatile var queryId: String = null
val source = new Source {
override def stop(): Unit = {}
override def getOffset: Option[Offset] = {
queryId = spark.sparkContext.getLocalProperty(StreamExecution.QUERY_ID_KEY)
None
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = spark.emptyDataFrame
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(
AssertOnQuery { sq =>
sq.processAllAvailable()
assert(sq.id.toString === queryId)
assert(sq.runId.toString !== queryId)
true
}
)
}
}
/** Create a streaming DF that only execute one batch in which it returns the given static DF */
private def createSingleTriggerStreamingDF(triggerDF: DataFrame): DataFrame = {
require(!triggerDF.isStreaming)
// A streaming Source that generate only on trigger and returns the given Dataframe as batch
val source = new Source() {
override def schema: StructType = triggerDF.schema
override def getOffset: Option[Offset] = Some(LongOffset(0))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
sqlContext.internalCreateDataFrame(
triggerDF.queryExecution.toRdd, triggerDF.schema, isStreaming = true)
}
override def stop(): Unit = {}
}
StreamingExecutionRelation(source)
}
/** Returns the query progress at the end of the first trigger of streaming DF */
private def getFirstProgress(streamingDF: DataFrame): StreamingQueryProgress = {
try {
val q = streamingDF.writeStream.format("memory").queryName("test").start()
q.processAllAvailable()
q.recentProgress.head
} finally {
spark.streams.active.map(_.stop())
}
}
/**
* A [[StreamAction]] to test the behavior of `StreamingQuery.awaitTermination()`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
case class TestAwaitTermination(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int = -1,
expectedReturnValue: Boolean = false
) extends AssertOnQuery(
TestAwaitTermination.assertOnQueryCondition(expectedBehavior, timeoutMs, expectedReturnValue),
"Error testing awaitTermination behavior"
) {
override def toString(): String = {
s"TestAwaitTermination($expectedBehavior, timeoutMs = $timeoutMs, " +
s"expectedReturnValue = $expectedReturnValue)"
}
}
object TestAwaitTermination {
/**
* Tests the behavior of `StreamingQuery.awaitTermination`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
def assertOnQueryCondition(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int,
expectedReturnValue: Boolean
)(q: StreamExecution): Boolean = {
def awaitTermFunc(): Unit = {
if (timeoutMs <= 0) {
q.awaitTermination()
} else {
val returnedValue = q.awaitTermination(timeoutMs)
assert(returnedValue === expectedReturnValue, "Returned value does not match expected")
}
}
AwaitTerminationTester.test(expectedBehavior, awaitTermFunc)
true // If the control reached here, then everything worked as expected
}
}
}
object StreamingQuerySuite {
// Singleton reference to clock that does not get serialized in task closures
var clock: StreamManualClock = null
}
|
stanzhai/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
|
Scala
|
apache-2.0
| 28,535 |
/** MACHINE-GENERATED FROM AVRO SCHEMA. DO NOT EDIT DIRECTLY */
package example.proto
sealed trait EnumProtocol
final object Suit extends Enumeration with EnumProtocol {
type Suit = Value
val SPADES, HEARTS, DIAMONDS, CLUBS = Value
}
final case class Card(suit: Suit.Value, number: Int) extends EnumProtocol
|
julianpeeters/avrohugger
|
avrohugger-core/src/test/expected/standard/example/proto/EnumProtocol.scala
|
Scala
|
apache-2.0
| 314 |
package ai.agnos.sparql.stream.client
import ai.agnos.sparql._
import java.io.{StringReader, StringWriter}
import java.net.URL
import java.nio.file.Path
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.Accept
import akka.http.scaladsl.model.{HttpEntity, _}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{FileIO, Flow, Framing, Source}
import akka.util.ByteString
import ai.agnos.sparql.api._
import ai.agnos.sparql.util.HttpEndpoint
import org.eclipse.rdf4j.model.{IRI, Model}
import org.eclipse.rdf4j.rio.{RDFFormat, Rio}
import scala.util.{Failure, Success, Try}
import scala.concurrent.duration._
object GraphStoreRequestFlowBuilder {
/**
* A Set of status codes on which the response will always show success = true
*/
val successfulHttpResponseStatusCodes: Set[StatusCode] = {
Set(
StatusCodes.OK,
StatusCodes.Created,
StatusCodes.Accepted,
StatusCodes.NoContent,
StatusCodes.AlreadyReported
)
}
/**
* A Set of status codes which the flow can handle gracefully, even though
* these all mean that the operation has failed. In all these cases, however
* the stream remains open. Any codes not shown on the success or failure
* list will cause the stream to fail and complete prematurely.
*/
val failingHttpResponseStatusCodes: Set[StatusCode] = {
Set(
StatusCodes.NotFound,
StatusCodes.Unauthorized,
StatusCodes.PaymentRequired,
StatusCodes.Forbidden,
StatusCodes.NotFound,
StatusCodes.ProxyAuthenticationRequired,
StatusCodes.RequestTimeout ,
StatusCodes.Conflict,
StatusCodes.Gone
)
}
}
trait GraphStoreRequestFlowBuilder extends SparqlClientHelpers with HttpClientFlowBuilder with ErrorHandlerSupport {
import SparqlClientConstants._
import GraphStoreRequestFlowBuilder._
implicit val system: ActorSystem
implicit val materializer: ActorMaterializer
/**
* If this is set to true then the response entity is "strictified", i.e. all chunks are loaded
* into memory in one go. However by being false, the result is processed
* as a proper stream but there is the risk is that the user might
* get more than a single response per request.
*/
val useStrictByteStringStrategy = true
/**
* Specifies for how long to wait for a "strict" http entity.
*/
val strictEntityReadTimeout: FiniteDuration = 60 seconds
/**
* Limit the response entity size to 100MB by default
*/
val strictEntityMaximumLengthInBytes: Int = 100 * 1024 * 1024
def graphStoreRequestFlow(
endpointFlow: HttpEndpointFlow[GraphStoreRequest]
): Flow[GraphStoreRequest, GraphStoreResponse, NotUsed] = {
Flow
.fromFunction(graphStoreOpToRequest(endpointFlow.endpoint))
.log("beforeHttpRequest")
.via(endpointFlow.flow)
.log("afterHttpRequest")
.flatMapConcat {
case (Success(response), request) =>
val gsr = GraphStoreResponse(
request,
success = calculateSuccess(response.status),
statusCode = response.status.intValue,
statusText = response.status.reason
)
makeModelSource(response.entity).map( s => gsr.copy(model = s))
case (Failure(error), _) =>
// the handler can choose to throw, which will collapse the stream or ignore the error, in which case
// no response will be returned
errorHandler.handleError(error)
Source.empty
}
}
def makeModelSource(entity: HttpEntity): Source[Option[Model], Any] = {
if ( !entity.isChunked() && (entity.isKnownEmpty() || entity.contentLengthOption.getOrElse(0) == 0)) {
// if we know there are no bytes in the entity (no-graph has been returned)
// or the reponse content type is not what we have requested then no model is emitted.
entity.discardBytes()
Source.single(None)
} else if ( !useStrictByteStringStrategy && entity.isChunked()) {
// NB: mapping over the data bytes stream won't work because the stream will never emit for empty entities
// the trick is to introduce a scan() call, which will emit an empty string even if nothing comes through.
entity.withoutSizeLimit().dataBytes
.via(Framing.delimiter(ByteString.fromString("\\n"), maximumFrameLength = strictEntityMaximumLengthInBytes, allowTruncation = true))
.scan(ByteString.empty)((a,b) => b ++ a)
.filter(_.nonEmpty)
.map { bs =>
if ( !bs.isEmpty ) {
val reader = new StringReader(bs.utf8String)
val mt = Try(Rio.parse(reader, "", mapContentTypeToRdfFormat(entity.contentType)))
mt.toOption
} else {
None
}
}
} else { //i.e. if useStrictByteStringStrategy is true
// this workaround does seem to be alright for smaller graphs that can be
// converted to a strict in-memory entity - currently this off by default
Source.single(entity.withSizeLimit(strictEntityMaximumLengthInBytes))
.mapAsync(numberOfCpuCores)(_.toStrict(strictEntityReadTimeout))
.map { bs =>
val reader = new StringReader(bs.data.utf8String)
val mt = Try(Rio.parse(reader, "", mapContentTypeToRdfFormat(entity.contentType)))
mt.toOption
}
}
}
/**
* Returns true or false if a supported success or failure code is given. For unsupported
* codes, a SparqlClientRequestFailed is thrown.
*
* @param statusCode
* @return
*/
def calculateSuccess(statusCode: StatusCode): Boolean = {
if (successfulHttpResponseStatusCodes.contains(statusCode)) true
else if (failingHttpResponseStatusCodes.contains(statusCode)) false
else {
throw SparqlClientRequestFailed(s"request failed with status code: $statusCode")
}
}
def graphStoreOpToRequest(endpoint: HttpEndpoint)
(graphStoreRequest: GraphStoreRequest): (HttpRequest, GraphStoreRequest) = {
(makeHttpRequest(endpoint, graphStoreRequest), graphStoreRequest)
}
def makeHttpRequest(endpoint: HttpEndpoint, request: GraphStoreRequest): HttpRequest = {
request match {
case GetGraphM(graphUri, method) =>
HttpRequest(
method, uri = s"${endpoint.path}${mapGraphOptionToPath(graphUri)}"
).withHeaders(
Accept(`application/n-triples`.mediaType)
:: makeRequestHeaders(endpoint)
)
case DropGraphM(graphUri, method) =>
HttpRequest(
method, uri = s"${endpoint.path}${mapGraphOptionToPath(graphUri)}"
).withHeaders(makeRequestHeaders(endpoint))
case InsertGraphFromModelM(model, format, graphUri, method) =>
makeInsertGraphHttpRequest(endpoint, method, graphUri, mapRdfFormatToContentType(format)) {
() => makeGraphSource(model, format)
}
case InsertGraphFromURLM(url, format, graphUri, method) =>
makeInsertGraphHttpRequest(endpoint, method, graphUri, mapRdfFormatToContentType(format)) {
() => makeGraphSource(url, format)
}
case InsertGraphFromPathM(path, format, graphUri, method) =>
makeInsertGraphHttpRequest(endpoint, method, graphUri, mapRdfFormatToContentType(format)) {
() => makeGraphSource(path, format)
}
}
}
private def makeInsertGraphHttpRequest
(
endpoint: HttpEndpoint,
method: HttpMethod,
graphIri: Option[IRI],
contentType: ContentType
)
(
entitySourceCreator: () => Source[ByteString, Any]
): HttpRequest = {
HttpRequest(
method, uri = s"${endpoint.path}${mapGraphOptionToPath(graphIri)}"
)
.withHeaders(makeRequestHeaders(endpoint))
.withEntity(
entity = HttpEntity(
contentType = contentType,
data = entitySourceCreator()
)
)
}
private def mapGraphOptionToPath(graphIri: Option[IRI]): String = graphIri match {
case Some(uri) => s"?$GRAPH_PARAM_NAME=${urlEncode(uri.toString)}"
case None => s"?$DEFAULT_PARAM_NAME"
}
private def makeGraphSource(model: Model, format: RDFFormat): Source[ByteString, Any] = {
Source.single(model)
.map { model =>
val writer = new StringWriter()
Rio.write(model, writer, format)
ByteString(writer.getBuffer.toString, "UTF-8")
}
}
private def makeGraphSource(fileUrl: URL, format: RDFFormat): Source[ByteString, Any] = {
Source.single(Uri(fileUrl.toURI.toString))
.mapAsync(1)(uri => Http().singleRequest {
HttpRequest(uri = uri).withHeaders(Accept(mapRdfFormatToContentType(format).mediaType))
})
.flatMapConcat(res => res.entity.dataBytes)
}
private def makeGraphSource(filePath: Path, format: RDFFormat): Source[ByteString, Any] = {
FileIO.fromPath(filePath)
}
}
|
modelfabric/reactive-sparql
|
src/main/scala/ai/agnos/sparql/stream/client/GraphStoreRequestFlowBuilder.scala
|
Scala
|
mit
| 8,899 |
/*
* Copyright 2010-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package webapptest
import org.specs2.matcher.XmlMatchers
import org.specs2.mutable.Specification
import util._
import http._
import testing._
import Helpers._
import java.net.{URL, InetAddress}
import snippet.Counter
import net.liftweb.common.Full
object OneShot extends Specification with RequestKit with XmlMatchers {
sequential
private def reachableLocalAddress = {
val l = InetAddress.getLocalHost
tryo { l.isReachable(50) } match {
case Full(true) => l.getHostAddress
case _ => "127.0.0.1"
}
}
private val host_ = System.getProperty("net.liftweb.webapptest.oneshot.host", reachableLocalAddress)
private val port_ = System.getProperty("net.liftweb.webapptest.oneshot.port", "8181").toInt
private lazy val baseUrl_ = new URL("http://%s:%s".format(host_, port_))
private lazy val jetty = new JettyTestServer(Full(baseUrl_))
def baseUrl = jetty.baseUrl.toString
step(jetty.start())
"ContainerVars" should {
"have correct int default" in {
val tmp = LiftRules.sessionCreator
try {
LiftRules.sessionCreator = LiftRules.sessionCreatorForMigratorySessions
val bx =
for {
resp <- get("/cv_int")
xml <- resp.xml
} yield xml
bx.openOrThrowException("legacy code") must ==/ (<int>45</int>).when(jetty.running)
} finally {
LiftRules.sessionCreator = tmp
}
}
"be settable as Int" in {
val tmp = LiftRules.sessionCreator
try {
LiftRules.sessionCreator = LiftRules.sessionCreatorForMigratorySessions
val bx =
for {
resp <- get("/cv_int/33")
resp2 <- resp.get("/cv_int")
xml <- resp2.xml
} yield xml
bx.openOrThrowException("legacy code") must ==/ (<int>33</int>).when(jetty.running)
} finally {
LiftRules.sessionCreator = tmp
}
}
"be session aware" in {
val tmp = LiftRules.sessionCreator
try {
LiftRules.sessionCreator = LiftRules.sessionCreatorForMigratorySessions
val bx =
for {
resp <- get("/cv_int/33")
resp2 <- resp.get("/cv_int")
xml <- resp2.xml
resp3 <- get("/cv_int")
xml2 <- resp3.xml
} yield (xml, xml2)
bx.openOrThrowException("legacy code")._1 must ==/ (<int>33</int>).when(jetty.running)
bx.openOrThrowException("legacy code")._2 must ==/ (<int>45</int>).when(jetty.running)
} finally {
LiftRules.sessionCreator = tmp
}
}
"support multiple vars" in {
val tmp = LiftRules.sessionCreator
try {
LiftRules.sessionCreator = LiftRules.sessionCreatorForMigratorySessions
val bx =
for {
resp <- get("/cv_int/33")
resp2 <- resp.get("/cv_int")
respx <- resp.get("/cv_str/meow")
resp3 <- resp.get("/cv_str")
xml <- resp2.xml
xml2 <- resp3.xml
} yield (xml, xml2)
bx.openOrThrowException("legacy code")._1 must ==/(<int>33</int>).when(jetty.running)
bx.openOrThrowException("legacy code")._2 must ==/(<str>meow</str>).when(jetty.running)
} finally {
LiftRules.sessionCreator = tmp
}
}
}
"OneShot" should {
"fire once for oneshot" in {
Counter.x = 0
for {
resp <- get("/oneshot")
xml <- resp.html5AsXml
span <- (xml \\ "span").filter(x => (x \ "@id").text == "one")
in <- (span \\ "input")
name <- in \ "@name"
} {
resp.get("/oneshot?" + urlEncode(name.text) + "=3")
resp.get("/oneshot?" + urlEncode(name.text) + "=3")
}
Counter.x must be_==(1).when(jetty.running)
}
"fire multiple times for normal" in {
Counter.x = 0
for {
resp <- get("/oneshot")
xml <- resp.html5AsXml
span <- (xml \\ "span").filter(x => (x \ "@id").text == "two")
in <- (span \\ "input")
name <- in \ "@name"
} {
resp.get("/oneshot?" + urlEncode(name.text) + "=3")
resp.get("/oneshot?" + urlEncode(name.text) + "=3")
}
Counter.x must be_>=(2).when(jetty.running)
}
}
step {
tryo {
jetty.stop()
}
}
}
|
lzpfmh/framework-2
|
web/webkit/src/test/scala/net/liftweb/webapptest/OneShot.scala
|
Scala
|
apache-2.0
| 4,945 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.history
import org.apache.spark.{SparkFunSuite, Success, TaskState}
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.scheduler._
import org.apache.spark.sql.execution.ui.{SparkListenerDriverAccumUpdates, SparkListenerSQLAdaptiveExecutionUpdate, SparkListenerSQLExecutionEnd, SparkListenerSQLExecutionStart}
import org.apache.spark.status.ListenerEventsTestHelper.{createRddsWithId, createStage, createTasks}
class SQLLiveEntitiesEventFilterSuite extends SparkFunSuite {
test("filter in events for jobs related to live SQL execution") {
// assume finished job 1 with stage 1, task (1, 2), rdds (1, 2) and finished sql execution id 1
// live job 2 with stages 2, tasks (3, 4), rdds (3, 4) and job 2 belongs to the live
// sql execution id 2
val liveSQLExecutions = Set(2L)
val liveJobs = Set(2)
val liveStages = Set(2, 3)
val liveTasks = Set(3L, 4L, 5L, 6L)
val liveRDDs = Set(3, 4, 5, 6)
val liveExecutors: Set[String] = Set("1", "2")
val filter = new SQLLiveEntitiesEventFilter(liveSQLExecutions, liveJobs, liveStages, liveTasks,
liveRDDs)
val acceptFn = filter.acceptFn().lift
// Verifying with finished SQL execution 1
assert(Some(false) === acceptFn(SparkListenerSQLExecutionStart(1, "description1", "details1",
"plan", null, 0)))
assert(Some(false) === acceptFn(SparkListenerSQLExecutionEnd(1, 0)))
assert(Some(false) === acceptFn(SparkListenerSQLAdaptiveExecutionUpdate(1, "plan", null)))
assert(Some(false) === acceptFn(SparkListenerDriverAccumUpdates(1, Seq.empty)))
// Verifying with finished job 1
val rddsForStage1 = createRddsWithId(1 to 2)
val stage1 = createStage(1, rddsForStage1, Nil)
val tasksForStage1 = createTasks(Seq(1L, 2L), liveExecutors.toArray, 0)
tasksForStage1.foreach { task => task.markFinished(TaskState.FINISHED, 5) }
val jobStartEventForJob1 = SparkListenerJobStart(1, 0, Seq(stage1))
val jobEndEventForJob1 = SparkListenerJobEnd(1, 0, JobSucceeded)
val stageSubmittedEventsForJob1 = SparkListenerStageSubmitted(stage1)
val stageCompletedEventsForJob1 = SparkListenerStageCompleted(stage1)
val unpersistRDDEventsForJob1 = (1 to 2).map(SparkListenerUnpersistRDD)
// job events for finished job should be considered as "don't know"
assert(None === acceptFn(jobStartEventForJob1))
assert(None === acceptFn(jobEndEventForJob1))
// stage events for finished job should be considered as "don't know"
assert(None === acceptFn(stageSubmittedEventsForJob1))
assert(None === acceptFn(stageCompletedEventsForJob1))
unpersistRDDEventsForJob1.foreach { event =>
assert(None === acceptFn(event))
}
val taskSpeculativeTaskSubmittedEvent = SparkListenerSpeculativeTaskSubmitted(stage1.stageId,
stageAttemptId = 1)
assert(None === acceptFn(taskSpeculativeTaskSubmittedEvent))
// task events for finished job should be considered as "don't know"
tasksForStage1.foreach { task =>
val taskStartEvent = SparkListenerTaskStart(stage1.stageId, 0, task)
assert(None === acceptFn(taskStartEvent))
val taskGettingResultEvent = SparkListenerTaskGettingResult(task)
assert(None === acceptFn(taskGettingResultEvent))
val taskEndEvent = SparkListenerTaskEnd(stage1.stageId, 0, "taskType",
Success, task, new ExecutorMetrics, null)
assert(None === acceptFn(taskEndEvent))
}
// Verifying with live SQL execution 2
assert(Some(true) === acceptFn(SparkListenerSQLExecutionStart(2, "description2", "details2",
"plan", null, 0)))
assert(Some(true) === acceptFn(SparkListenerSQLExecutionEnd(2, 0)))
assert(Some(true) === acceptFn(SparkListenerSQLAdaptiveExecutionUpdate(2, "plan", null)))
assert(Some(true) === acceptFn(SparkListenerDriverAccumUpdates(2, Seq.empty)))
// Verifying with live job 2
val rddsForStage2 = createRddsWithId(3 to 4)
val stage2 = createStage(2, rddsForStage2, Nil)
val tasksForStage2 = createTasks(Seq(3L, 4L), liveExecutors.toArray, 0)
tasksForStage1.foreach { task => task.markFinished(TaskState.FINISHED, 5) }
val jobStartEventForJob2 = SparkListenerJobStart(2, 0, Seq(stage2))
val stageSubmittedEventsForJob2 = SparkListenerStageSubmitted(stage2)
val stageCompletedEventsForJob2 = SparkListenerStageCompleted(stage2)
val unpersistRDDEventsForJob2 = rddsForStage2.map { rdd => SparkListenerUnpersistRDD(rdd.id) }
// job events for live job should be accepted
assert(Some(true) === acceptFn(jobStartEventForJob2))
// stage events for live job should be accepted
assert(Some(true) === acceptFn(stageSubmittedEventsForJob2))
assert(Some(true) === acceptFn(stageCompletedEventsForJob2))
unpersistRDDEventsForJob2.foreach { event =>
assert(Some(true) === acceptFn(event))
}
val taskSpeculativeTaskSubmittedEvent2 = SparkListenerSpeculativeTaskSubmitted(stage2.stageId,
stageAttemptId = 1)
assert(Some(true) === acceptFn(taskSpeculativeTaskSubmittedEvent2))
// task events for live job should be accepted
tasksForStage2.foreach { task =>
val taskStartEvent = SparkListenerTaskStart(stage2.stageId, 0, task)
assert(Some(true) === acceptFn(taskStartEvent))
val taskGettingResultEvent = SparkListenerTaskGettingResult(task)
assert(Some(true) === acceptFn(taskGettingResultEvent))
val taskEndEvent = SparkListenerTaskEnd(stage1.stageId, 0, "taskType",
Success, task, new ExecutorMetrics, null)
assert(Some(true) === acceptFn(taskEndEvent))
}
}
}
|
goldmedal/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/history/SQLLiveEntitiesEventFilterSuite.scala
|
Scala
|
apache-2.0
| 6,439 |
// A Spritz Cipher driver program to Encrypt/Decrypt files.
// This implementation is copyright 2015 Richard Todd
// The license in GPL, see license file in the repository.
package rwt.spritz
import com.waywardcode.crypto.{SpritzCipher, SpritzInputStream, SpritzOutputStream}
import java.io.{File,FileInputStream,FileOutputStream}
object Crypt {
/** uses read()/write() to copy one iostream to another.
*/
private def copy(instr: java.io.InputStream,
outstr: java.io.OutputStream): Unit = {
val buffer = new Array[Byte](4096)
var count = instr.read(buffer)
while(count >= 0) {
outstr.write(buffer,0,count)
count = instr.read(buffer)
}
}
/** changes the path of a file, preserving the name */
private def changeDir(infl: String, odir: String): String = odir match {
case "" => infl
case _ => new File(odir, new File(infl).getName).toString
}
/** checks a file to make sure it can be decrypted with the given
* password.
*/
private def checkOne(pw: String)(fname: String): String = {
val instream = fname match {
case "-" => System.in
case _ => new FileInputStream(fname)
}
try {
val insideName = new SpritzInputStream(pw, instream).
originalName.
getOrElse("(no name)")
s"$fname: correct password! File inside is $insideName"
} catch {
case e: IllegalStateException => s"$fname: $e"
}finally {
instream.close()
}
}
private def decryptOne(pw: String, odir: Option[String])(fname: String): String = {
val instream = if( fname == "-" ) System.in else new FileInputStream(fname)
val cipher = new SpritzInputStream(pw, instream)
val outname = cipher.originalName.getOrElse {
if(fname.endsWith(".spritz")) fname.dropRight(7)
else (fname + ".unenc")
}
val outstream = if( fname == "-" ) System.out
else {
var outdir = odir.getOrElse( new File(fname).getParent )
if( outdir == null ) outdir = ""
new FileOutputStream(changeDir(outname,outdir))
}
try {
copy(cipher.inputStream, outstream)
s"$fname -decrypt-> $outname"
} finally {
instream.close()
outstream.close()
}
}
private def encryptOne(pw: String, odir: Option[String])(fname: String): String = {
val outname = changeDir(fname + ".dat", odir.getOrElse(""))
val (instream, outstream, origName) = fname match {
case "-" => (System.in, System.out, None)
case _ => (new FileInputStream(fname),
new FileOutputStream(outname),
Some(fname))
}
val encOut = new SpritzOutputStream(origName, pw, outstream)
try {
copy(instream, encOut.outputStream)
s"$fname -encrypt-> $outname"
} finally {
instream.close()
encOut.close()
}
}
def cmd(args: List[String]): Unit = {
var decrypt = false
var check = false // check supersedes decrypt, if given
var passwd = ""
var odir: Option[String] = None
@annotation.tailrec
def parseArgs(args: List[String]): List[String] = {
args match {
case "-c" :: rest => check = true
parseArgs(rest)
case "-d" :: rest => decrypt = true
parseArgs(rest)
case "-p" :: str :: rest => passwd = str
parseArgs(rest)
case "-o" :: str :: rest => odir = Some(str)
parseArgs(rest)
case rest => rest
}
}
var flist = parseArgs(args)
if(passwd.length == 0) {
passwd = Passwords.getPassword("Password: ", (!decrypt)&&(!check)).getOrElse("")
if (passwd.length == 0) {
throw new Exception("Password Required!")
}
}
val process = if (check) checkOne(passwd)_
else if (decrypt) decryptOne(passwd,odir)_
else encryptOne(passwd,odir)_
val printout: String=>Unit = if (flist.isEmpty) (x) => { } else println
if (flist.isEmpty) { flist = List("-") }
flist.par.foreach { f => printout(process(f)) }
}
}
|
rwtodd/spritz_cipher
|
scala_version/cmd/crypt.scala
|
Scala
|
gpl-2.0
| 4,526 |
package io.finch.oauth2
import com.twitter.finagle._
import com.twitter.finagle.oauth2.{AuthInfo, GrantHandlerResult}
import com.twitter.util.Await
import io.circe.generic.auto._
import io.finch._
import io.finch.circe._
/**
* A simple example of finch-oauth2 usage
*
* Use the following sbt command to run the application.
*
* {{{
* $ sbt 'examples/runMain io.finch.oauth2.Main'
* }}}
*
* Use the following HTTPie commands to test endpoints.
*
* {{{
* $ http POST :8081/users/auth Authorization:'OAuth dXNlcl9pZDp1c2VyX3NlY3JldA=='\
* grant_type==client_credentials
*
* $ http POST :8081/users/auth grant_type==password username==user_name\
* password==user_password client_id==user_id
*
* $ http POST :8081/users/auth grant_type==authorization_code code==user_auth_code client_id==user_id
*
* $ http GET :8081/users/users/current access_token=='AT-5b0e7e3b-943f-479f-beab-7814814d0315'
*
* $ http POST :8081/users/auth client_id==user_id grant_type==refresh_token\
* refresh_token=='RT-7e1bbf43-e7ba-4a8a-a38e-baf62ce3ceae'
*
* $ http GET :8081/users/unprotected
* }}}
*/
object Main extends App {
case class UnprotectedUser(name: String)
val users: Endpoint[OAuthUser] = get("users" :: "current" :: authorize(InMemoryDataHandler)) {
ai: AuthInfo[OAuthUser] => Ok(ai.user)
}
val tokens: Endpoint[GrantHandlerResult] = post("users" :: "auth" :: issueAccessToken(InMemoryDataHandler))
val unprotected: Endpoint[UnprotectedUser] = get("users" :: "unprotected") {
Ok(UnprotectedUser("unprotected"))
}
Await.ready(Http.server.serve(":8081", (tokens :+: users :+: unprotected).toService))
}
|
travisbrown/finch
|
examples/src/main/scala/io/finch/oauth2/Main.scala
|
Scala
|
apache-2.0
| 1,696 |
package xsbt.boot
import Pre._
object JAnsi {
def uninstall(loader: ClassLoader): Unit = callJAnsi("systemUninstall", loader)
def install(loader: ClassLoader): Unit = callJAnsi("systemInstall", loader)
private[this] def callJAnsi(methodName: String, loader: ClassLoader): Unit = if (isWindows && !isCygwin) callJAnsiMethod(methodName, loader)
private[this] def callJAnsiMethod(methodName: String, loader: ClassLoader): Unit =
try {
val c = Class.forName("org.fusesource.jansi.AnsiConsole", true, loader)
c.getMethod(methodName).invoke(null)
} catch {
case ignore: ClassNotFoundException =>
/* The below code intentionally traps everything. It technically shouldn't trap the
* non-StackOverflowError VirtualMachineErrors and AWTError would be weird, but this is PermGen
* mitigation code that should not render sbt completely unusable if jansi initialization fails.
* [From Mark Harrah, https://github.com/sbt/sbt/pull/633#issuecomment-11957578].
*/
case ex: Throwable => println("Jansi found on class path but initialization failed: " + ex)
}
}
|
xeno-by/old-scalameta-sbt
|
launch/src/main/scala/xsbt/boot/JAnsi.scala
|
Scala
|
bsd-3-clause
| 1,130 |
/**
* @author ven
*/
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package toplevel
package typedef
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.project.Project
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.{PsiClass, PsiClassType, PsiElement}
import org.jetbrains.plugins.scala.caches.CachesUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.PsiTypeParameterExt
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition, ScTrait, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.{ScDesignatorType, ScProjectionType, ScThisType}
import org.jetbrains.plugins.scala.lang.psi.types.api.{ParameterizedType, TypeParameterType, TypeSystem}
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext}
import org.jetbrains.plugins.scala.lang.refactoring.util.ScTypeUtil.AliasType
import org.jetbrains.plugins.scala.macroAnnotations.CachedWithRecursionGuard
import org.jetbrains.plugins.scala.project.ProjectExt
import org.jetbrains.plugins.scala.util.ScEquivalenceUtil
import scala.annotation.tailrec
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
abstract class MixinNodes {
type T
def equiv(t1: T, t2: T): Boolean
def same(t1: T, t2: T): Boolean
def computeHashCode(t: T): Int
def elemName(t: T): String
def isAbstract(t: T): Boolean
def isImplicit(t: T): Boolean
def isPrivate(t: T): Boolean
class Node(val info: T, val substitutor: ScSubstitutor) {
var supers: Seq[Node] = Seq.empty
var primarySuper: Option[Node] = None
}
class Map extends mutable.HashMap[String, ArrayBuffer[(T, Node)]] {
private[Map] val implicitNames: mutable.HashSet[String] = new mutable.HashSet[String]
private val privatesMap: mutable.HashMap[String, ArrayBuffer[(T, Node)]] = mutable.HashMap.empty
def addToMap(key: T, node: Node) {
val name = ScalaPsiUtil.convertMemberName(elemName(key))
(if (!isPrivate(key)) this else privatesMap).
getOrElseUpdate(name, new ArrayBuffer) += ((key, node))
if (isImplicit(key)) implicitNames.add(name)
}
@volatile
private var supersList: List[Map] = List.empty
def setSupersMap(list: List[Map]) {
for (m <- list) {
implicitNames ++= m.implicitNames
}
supersList = list
}
private val calculatedNames: mutable.HashSet[String] = new mutable.HashSet
private val calculated: mutable.HashMap[String, AllNodes] = new mutable.HashMap
private val calculatedSupers: mutable.HashMap[String, AllNodes] = new mutable.HashMap
def forName(name: String): (AllNodes, AllNodes) = {
val convertedName = ScalaPsiUtil.convertMemberName(name)
synchronized {
if (calculatedNames.contains(convertedName)) {
return (calculated(convertedName), calculatedSupers(convertedName))
}
}
val thisMap: NodesMap = toNodesMap(getOrElse(convertedName, new ArrayBuffer))
val maps: List[NodesMap] = supersList.map(sup => toNodesMap(sup.getOrElse(convertedName, new ArrayBuffer)))
val supers = mergeWithSupers(thisMap, mergeSupers(maps))
val list = supersList.flatMap(_.privatesMap.getOrElse(convertedName, new ArrayBuffer[(T, Node)]))
val supersPrivates = toNodesSeq(list)
val thisPrivates = toNodesSeq(privatesMap.getOrElse(convertedName, new ArrayBuffer[(T, Node)]).toList ::: list)
val thisAllNodes = new AllNodes(thisMap, thisPrivates)
val supersAllNodes = new AllNodes(supers, supersPrivates)
synchronized {
calculatedNames.add(convertedName)
calculated.+=((convertedName, thisAllNodes))
calculatedSupers.+=((convertedName, supersAllNodes))
}
(thisAllNodes, supersAllNodes)
}
@volatile
private var forImplicitsCache: List[(T, Node)] = null
def forImplicits(): List[(T, Node)] = {
if (forImplicitsCache != null) return forImplicitsCache
val res = new ArrayBuffer[(T, Node)]()
for (name <- implicitNames) {
val map = forName(name)._1
for (elem <- map) {
if (isImplicit(elem._1)) res += elem
}
}
forImplicitsCache = res.toList
forImplicitsCache
}
def allNames(): mutable.Set[String] = {
val names = new mutable.HashSet[String]
names ++= keySet
names ++= privatesMap.keySet
for (sup <- supersList) {
names ++= sup.keySet
names ++= sup.privatesMap.keySet
}
names
}
private def forAll(): (mutable.HashMap[String, AllNodes], mutable.HashMap[String, AllNodes]) = {
for (name <- allNames()) forName(name)
synchronized {
(calculated, calculatedSupers)
}
}
def allFirstSeq(): Seq[AllNodes] = {
forAll()._1.toSeq.map(_._2)
}
def allSecondSeq(): Seq[AllNodes] = {
forAll()._1.toSeq.map(_._2)
}
private def toNodesSeq(seq: List[(T, Node)]): NodesSeq = {
val map = new mutable.HashMap[Int, List[(T, Node)]]
for (elem <- seq) {
val key = computeHashCode(elem._1)
val prev = map.getOrElse(key, List.empty)
map.update(key, elem :: prev)
}
new NodesSeq(map)
}
private def toNodesMap(buf: ArrayBuffer[(T, Node)]): NodesMap = {
val res = new NodesMap
res ++= buf
res
}
private class MultiMap extends mutable.HashMap[T, mutable.Set[Node]] with collection.mutable.MultiMap[T, Node] {
override def elemHashCode(t : T) = computeHashCode(t)
override def elemEquals(t1 : T, t2 : T) = equiv(t1, t2)
override def makeSet = new mutable.LinkedHashSet[Node]
}
private object MultiMap {def empty = new MultiMap}
private def mergeSupers(maps: List[NodesMap]) : MultiMap = {
val res = MultiMap.empty
val mapsIterator = maps.iterator
while (mapsIterator.hasNext) {
val currentIterator = mapsIterator.next().iterator
while (currentIterator.hasNext) {
val (k, node) = currentIterator.next()
res.addBinding(k, node)
}
}
res
}
//Return primary selected from supersMerged
private def mergeWithSupers(thisMap: NodesMap, supersMerged: MultiMap): NodesMap = {
val primarySupers = new NodesMap
for ((key, nodes) <- supersMerged) {
val primarySuper = nodes.find {n => !isAbstract(n.info)} match {
case None => nodes.toList.head
case Some(concrete) => concrete
}
primarySupers += ((key, primarySuper))
thisMap.get(key) match {
case Some(node) =>
node.primarySuper = Some(primarySuper)
node.supers = nodes.toSeq
case None =>
nodes -= primarySuper
primarySuper.supers = nodes.toSeq
thisMap += ((key, primarySuper))
}
}
primarySupers
}
}
class AllNodes(publics: NodesMap, privates: NodesSeq) {
def get(s: T): Option[Node] = {
publics.get(s) match {
case res: Some[Node] => res
case _ => privates.get(s)
}
}
def foreach(p: ((T, Node)) => Unit) {
publics.foreach(p)
privates.map.values.flatten.foreach(p)
}
def map[R](p: ((T, Node)) => R): Seq[R] = {
publics.map(p).toSeq ++ privates.map.values.flatten.map(p)
}
def filter(p: ((T, Node)) => Boolean): Seq[(T, Node)] = {
publics.filter(p).toSeq ++ privates.map.values.flatten.filter(p)
}
def withFilter(p: ((T, Node)) => Boolean) = {
(publics.toSeq ++ privates.map.values.flatten).withFilter(p)
}
def flatMap[R](p: ((T, Node)) => Traversable[R]): Seq[R] = {
publics.flatMap(p).toSeq ++ privates.map.values.flatten.flatMap(p)
}
def iterator: Iterator[(T, Node)] = {
new Iterator[(T, Node)] {
private val iter1 = publics.iterator
private val iter2 = privates.map.values.flatten.iterator
def hasNext: Boolean = iter1.hasNext || iter2.hasNext
def next(): (T, Node) = if (iter1.hasNext) iter1.next() else iter2.next()
}
}
def fastPhysicalSignatureGet(key: T): Option[Node] = {
publics.fastPhysicalSignatureGet(key) match {
case res: Some[Node] => res
case _ => privates.get(key)
}
}
def isEmpty: Boolean = publics.isEmpty && privates.map.values.forall(_.isEmpty)
}
class NodesSeq(private[MixinNodes] val map: mutable.HashMap[Int, List[(T, Node)]]) {
def get(s: T): Option[Node] = {
val list = map.getOrElse(computeHashCode(s), Nil)
val iterator = list.iterator
while (iterator.hasNext) {
val next = iterator.next()
if (same(s, next._1)) return Some(next._2)
}
None
}
def fastPhysicalSignatureGet(key: T): Option[Node] = {
val list = map.getOrElse(computeHashCode(key), List.empty)
list match {
case Nil => None
case x :: Nil => Some(x._2)
case e =>
val iterator = e.iterator
while (iterator.hasNext) {
val next = iterator.next()
if (same(key, next._1)) return Some(next._2)
}
None
}
}
}
class NodesMap extends mutable.HashMap[T, Node] {
override def elemHashCode(t: T) = computeHashCode(t)
override def elemEquals(t1 : T, t2 : T) = equiv(t1, t2)
/**
* Use this method if you are sure, that map contains key
*/
def fastGet(key: T): Option[Node] = {
//todo: possible optimization to filter without types first then if only one variant left, get it.
val h = index(elemHashCode(key))
var e = table(h).asInstanceOf[Entry]
if (e != null && e.next == null) return Some(e.value)
while (e != null) {
if (elemEquals(e.key, key)) return Some(e.value)
e = e.next
if (e.next == null) return Some(e.value)
}
None
}
def fastPhysicalSignatureGet(key: T): Option[Node] = {
key match {
case p: PhysicalSignature =>
val h = index(elemHashCode(key))
var e = table(h).asInstanceOf[Entry]
if (e != null && e.next == null) {
e.value.info match {
case p2: PhysicalSignature =>
if (p.method == p2.method) return Some(e.value)
else return None
case _ => return None
}
}
while (e != null) {
e.value.info match {
case p2: PhysicalSignature =>
if (p.method == p2.method) return Some(e.value)
case _ =>
}
e = e.next
}
fastGet(key)
case _ => fastGet(key)
}
}
}
def build(clazz: PsiClass)(implicit typeSystem: TypeSystem): Map = build(ScalaType.designator(clazz))
def build(tp: ScType, compoundThisType: Option[ScType] = None)
(implicit typeSystem: TypeSystem): Map = {
var isPredef = false
var place: Option[PsiElement] = None
val map = new Map
val superTypesBuff = new ListBuffer[Map]
val (superTypes, subst, thisTypeSubst): (Seq[ScType], ScSubstitutor, ScSubstitutor) = tp match {
case cp: ScCompoundType =>
processRefinement(cp, map, place)
val thisTypeSubst = compoundThisType match {
case Some(_) => new ScSubstitutor(Map.empty, Map.empty, compoundThisType)
case _ => new ScSubstitutor(Predef.Map.empty, Predef.Map.empty, Some(tp))
}
(MixinNodes.linearization(cp), ScSubstitutor.empty, thisTypeSubst)
case _ =>
val clazz = tp match {
case ScDesignatorType(clazz: PsiClass) => clazz
case ScProjectionType(_, clazz: PsiClass, _) => clazz
case _ => null
}
if (clazz == null) (Seq.empty, ScSubstitutor.empty, ScSubstitutor.empty)
else
clazz match {
case template: ScTypeDefinition =>
if (template.qualifiedName == "scala.Predef") isPredef = true
place = Option(template.extendsBlock)
processScala(template, ScSubstitutor.empty, map, place, base = true)
val lin = MixinNodes.linearization(template)
var zSubst = new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(template)))
var placer = template.getContext
while (placer != null) {
placer match {
case t: ScTemplateDefinition => zSubst = zSubst.followed(
new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(t)))
)
case _ =>
}
placer = placer.getContext
}
(if (lin.nonEmpty) lin.tail else lin, ScSubstitutor.empty.putAliases(template), zSubst)
case template: ScTemplateDefinition =>
place = Option(template.asInstanceOf[ScalaStubBasedElementImpl[_]].getLastChildStub)
processScala(template, ScSubstitutor.empty, map, place, base = true)
var zSubst = new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(template)))
var placer = template.getContext
while (placer != null) {
placer match {
case t: ScTemplateDefinition => zSubst = zSubst.followed(
new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(t)))
)
case _ =>
}
placer = placer.getContext
}
(MixinNodes.linearization(template),
ScSubstitutor.empty.putAliases(template), zSubst)
case syn: ScSyntheticClass =>
(syn.getSuperTypes.map { psiType => psiType.toScType(syn.getProject) }: Seq[ScType],
ScSubstitutor.empty, ScSubstitutor.empty)
case clazz: PsiClass =>
place = Option(clazz.getLastChild)
processJava(clazz, ScSubstitutor.empty, map, place)
val lin = MixinNodes.linearization(clazz)
(if (lin.nonEmpty) lin.tail else lin,
ScSubstitutor.empty, ScSubstitutor.empty)
case _ =>
(Seq.empty, ScSubstitutor.empty, ScSubstitutor.empty)
}
}
val iter = superTypes.iterator
while (iter.hasNext) {
val superType = iter.next()
superType.extractClassType(place.map(_.getProject).orNull) match {
case Some((superClass, s)) =>
// Do not include scala.ScalaObject to Predef's base types to prevent SOE
if (!(superClass.qualifiedName == "scala.ScalaObject" && isPredef)) {
val dependentSubst = superType match {
case p@ScProjectionType(proj, eem, _) => new ScSubstitutor(proj).followed(p.actualSubst)
case ParameterizedType(p@ScProjectionType(proj, _, _), _) => new ScSubstitutor(proj).followed(p.actualSubst)
case _ => ScSubstitutor.empty
}
val newSubst = combine(s, subst, superClass).followed(thisTypeSubst).followed(dependentSubst)
val newMap = new Map
superClass match {
case template: ScTemplateDefinition => processScala(template, newSubst, newMap, place, base = false)
case syn: ScSyntheticClass =>
//it's required to do like this to have possibility mix Synthetic types
val clazz = ScalaPsiManager.instance(syn.getProject).getCachedClass(syn.getQualifiedName,
GlobalSearchScope.allScope(syn.getProject), ScalaPsiManager.ClassCategory.TYPE
)
clazz match {
case template: ScTemplateDefinition => processScala(template, newSubst, newMap, place, base = false)
case _ => //do nothing
}
case _ => processJava(superClass, newSubst, newMap, place)
}
superTypesBuff += newMap
}
case _ =>
}
(superType.isAliasType match {
case Some(AliasType(td: ScTypeAliasDefinition, lower, _)) => lower.getOrElse(superType)
case _ => superType
}) match {
case c: ScCompoundType =>
processRefinement(c, map, place)
case _ =>
}
}
map.setSupersMap(superTypesBuff.toList)
map
}
def combine(superSubst : ScSubstitutor, derived : ScSubstitutor, superClass : PsiClass) = {
var res : ScSubstitutor = ScSubstitutor.empty
for (typeParameter <- superClass.getTypeParameters) {
res = res bindT(typeParameter.nameAndId, derived.subst(superSubst.subst(TypeParameterType(typeParameter, None))))
}
superClass match {
case td : ScTypeDefinition =>
var aliasesMap = res.aliasesMap
for (alias <- td.aliases) {
derived.aliasesMap.get(alias.name) match {
case Some(t) => aliasesMap = aliasesMap + ((alias.name, t))
case None =>
}
}
res = new ScSubstitutor(res.tvMap, aliasesMap, None)
case _ => ()
}
res
}
def processJava(clazz: PsiClass, subst: ScSubstitutor, map: Map, place: Option[PsiElement])(implicit typeSystem: TypeSystem)
def processScala(template: ScTemplateDefinition, subst: ScSubstitutor, map: Map,
place: Option[PsiElement], base: Boolean)(implicit typeSystem: TypeSystem)
def processRefinement(cp: ScCompoundType, map: Map, place: Option[PsiElement])(implicit typeSystem: TypeSystem)
}
object MixinNodes {
def linearization(clazz: PsiClass): Seq[ScType] = {
@CachedWithRecursionGuard[PsiClass](clazz, Seq.empty, CachesUtil.getDependentItem(clazz)())
def inner(): Seq[ScType] = {
clazz match {
case obj: ScObject if obj.isPackageObject && obj.qualifiedName == "scala" =>
return Seq(ScalaType.designator(obj))
case _ =>
}
ProgressManager.checkCanceled()
val project = clazz.getProject
val tp = {
def default =
if (clazz.getTypeParameters.isEmpty) ScalaType.designator(clazz)
else ScParameterizedType(ScalaType.designator(clazz),
clazz.getTypeParameters.map(TypeParameterType(_, None)))
clazz match {
case td: ScTypeDefinition => td.getType(TypingContext.empty).getOrElse(default)
case _ => default
}
}
val supers: Seq[ScType] = {
clazz match {
case td: ScTemplateDefinition => td.superTypes
case clazz: PsiClass => clazz.getSuperTypes.map {
case ctp: PsiClassType =>
val cl = ctp.resolve()
if (cl != null && cl.qualifiedName == "java.lang.Object") ScDesignatorType(cl)
else ctp.toScType(clazz.getProject)
case ctp => ctp.toScType(clazz.getProject)
}.toSeq
}
}
generalLinearization(Some(project), tp, addTp = true, supers = supers)(project.typeSystem)
}
inner()
}
def linearization(compound: ScCompoundType, addTp: Boolean = false): Seq[ScType] = {
val comps = compound.components
generalLinearization(None, compound, addTp = addTp, supers = comps)(ScalaTypeSystem)
}
private def generalLinearization(project: Option[Project], tp: ScType, addTp: Boolean, supers: Seq[ScType])
(implicit typeSystem: TypeSystem): Seq[ScType] = {
val buffer = new ListBuffer[ScType]
val set: mutable.HashSet[String] = new mutable.HashSet //to add here qualified names of classes
def classString(clazz: PsiClass): String = {
clazz match {
case obj: ScObject => "Object: " + obj.qualifiedName
case tra: ScTrait => "Trait: " + tra.qualifiedName
case _ => "Class: " + clazz.qualifiedName
}
}
def add(tp: ScType) {
tp.extractClass(project.orNull) match {
case Some(clazz) if clazz.qualifiedName != null && !set.contains(classString(clazz)) =>
tp +=: buffer
set += classString(clazz)
case Some(clazz) if clazz.getTypeParameters.nonEmpty =>
val i = buffer.indexWhere(_.extractClass(clazz.getProject) match {
case Some(newClazz) if ScEquivalenceUtil.areClassesEquivalent(newClazz, clazz) => true
case _ => false
}
)
if (i != -1) {
val newTp = buffer.apply(i)
if (tp.conforms(newTp)) buffer.update(i, tp)
}
case _ =>
(tp.isAliasType match {
case Some(AliasType(td: ScTypeAliasDefinition, lower, _)) => lower.getOrElse(tp)
case _ => tp
}) match {
case c: ScCompoundType => c +=: buffer
case _ =>
}
}
}
val iterator = supers.iterator
while (iterator.hasNext) {
var tp = iterator.next()
@tailrec
def updateTp(tp: ScType): ScType = {
tp.isAliasType match {
case Some(AliasType(_, _, Success(upper, _))) => updateTp(upper)
case _ =>
tp match {
case ex: ScExistentialType => ex.quantified
case tpt: TypeParameterType => tpt.upperType.v
case _ => tp
}
}
}
tp = updateTp(tp)
tp.extractClassType() match {
case Some((clazz, subst)) =>
val lin = linearization(clazz)
val newIterator = lin.reverseIterator
while (newIterator.hasNext) {
val tp = newIterator.next()
add(subst.subst(tp))
}
case _ =>
(tp.isAliasType match {
case Some(AliasType(td: ScTypeAliasDefinition, lower, _)) => lower.getOrElse(tp)
case _ => tp
}) match {
case c: ScCompoundType =>
val lin = linearization(c, addTp = true)
val newIterator = lin.reverseIterator
while (newIterator.hasNext) {
val tp = newIterator.next()
add(tp)
}
case _ =>
}
}
}
if (addTp) add(tp)
buffer
}
}
|
whorbowicz/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/typedef/MixinNodes.scala
|
Scala
|
apache-2.0
| 22,369 |
package breeze.linalg.functions
import breeze.generic.UFunc
import breeze.linalg._
import breeze.linalg.eig.Eig
import breeze.linalg.eigSym.{DenseEigSym, EigSym}
import breeze.numerics._
import breeze.stats.distributions.Rand
import spire.implicits.cforRange
/**
* Approximate truncated randomized EVD
*/
object evdr extends UFunc {
implicit object EVDR_DM_Impl2 extends Impl2[DenseMatrix[Double], Int, DenseEigSym] {
def apply(M: DenseMatrix[Double], s: Int): DenseEigSym =
doEigSymDouble(M, s, nOversamples = 10, nIter = 0)
}
implicit object EVDR_DM_Impl3 extends Impl3[DenseMatrix[Double], Int, Int, DenseEigSym] {
def apply(M: DenseMatrix[Double], s: Int, nOversamples: Int): DenseEigSym =
doEigSymDouble(M, s, nOversamples, nIter = 0)
}
implicit object EVDR_DM_Impl4 extends Impl4[DenseMatrix[Double], Int, Int, Int, DenseEigSym] {
def apply(M: DenseMatrix[Double], s: Int, nOversamples: Int, nIter: Int): DenseEigSym =
doEigSymDouble(M, s, nOversamples, nIter)
}
/**
* Computes an approximate truncated randomized EVD. Fast on large matrices.
*
* @param M Matrix to decompose
* @param s Number of columns in orthonormal matrix (sketch size)
* @param nOversamples Additional number of random vectors to sample the range of M so as
* to ensure proper conditioning. The total number of random vectors
* used to find the range of M is [s + nOversamples]
* @param nIter Number of power iterations (can be used to deal with very noisy problems)
* @return The eigenvalue decomposition (EVD) with the eigenvalues and the eigenvectors
*
* ==References==
*
* Finding structure with randomness: Stochastic algorithms for constructing
* approximate matrix decompositions
* Halko, et al., 2009 [[http://arxiv.org/abs/arXiv:0909.4061]]
*/
private def doEigSymDouble(M: DenseMatrix[Double],
s: Int,
nOversamples: Int = 10,
nIter: Int = 0): DenseEigSym = {
require(s <= (M.rows min M.cols), "Number of columns in orthonormal matrix should be less than min(M.rows, M.cols)")
require(s >= 1, "Sketch size should be greater than 1")
val nRandom = s + nOversamples
val Q = randomizedStateFinder(M, nRandom, nIter)
val b = Q.t * (M * Q)
val Eig(w, _, v) = eig(b)
val _u = Q * v
val u = flipSigns(_u)
EigSym(w, u)
}
/**
* Computes an orthonormal matrix whose range approximates the range of M
*
* @param M The input data matrix
* @param size Size of the matrix to return
* @param nIter Number of power iterations used to stabilize the result
* @return A size-by-size projection matrix Q
*
* ==Notes==
*
* Algorithm 4.3 of "Finding structure with randomness:
* Stochastic algorithms for constructing approximate matrix decompositions"
* Halko, et al., 2009 (arXiv:909) [[http://arxiv.org/pdf/0909.4061]]
*/
private def randomizedStateFinder(M: DenseMatrix[Double],
size: Int,
nIter: Int): DenseMatrix[Double] = {
val R = DenseMatrix.rand(M.cols, size, rand = Rand.gaussian)
val Y = M * R
cforRange(0 until nIter){ _ =>
Y := M * (M.t * Y)
}
val q = qr.reduced.justQ(Y)
q
}
/**
* Resolves the sign ambiguity. Largest in absolute value entries of u columns are always positive
*
* @param u eigenvectors
* @return eigenvectors with resolved sign ambiguity
*/
private def flipSigns(u: DenseMatrix[Double]): DenseMatrix[Double] = {
import DenseMatrix.canMapValues
val abs_u = abs(u)
val max_abs_cols = (0 until u.cols).map(c => argmax(abs_u(::, c)))
val signs = max_abs_cols.zipWithIndex.map(e => signum(u(e._1, e._2)))
signs.zipWithIndex.foreach(s => {
u(::, s._2) :*= s._1
})
u
}
}
|
claydonkey/breeze
|
math/src/main/scala/breeze/linalg/functions/evdr.scala
|
Scala
|
apache-2.0
| 3,943 |
package de.agilecoders.projects.scaspell.util
import com.twitter.finagle.http.{Response, Request}
import com.twitter.finagle.Service
import com.twitter.finagle.http.service.RoutingService
/**
* TODO miha: document class purpose
*
* @author miha
*/
object RequestAwareRoutingService {
def byRequest[REQUEST <: Request](routes: PartialFunction[Request, Service[REQUEST, Response]]) =
new RoutingService(
new PartialFunction[Request, Service[REQUEST, Response]] {
def apply(request: Request) = routes(request)
def isDefinedAt(request: Request) = routes.isDefinedAt(request)
})
}
|
l0rdn1kk0n/scaspell
|
scaspell-service/src/main/scala/de/agilecoders/projects/scaspell/util/RequestAwareRoutingService.scala
|
Scala
|
apache-2.0
| 651 |
package health
import java.util
import akka.actor.Actor
import spray.routing._
import spray.http._
import spray.client.pipelining._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Success, Failure}
class CheckerActor extends Actor with Checker {
def actorRefFactory = context
def receive = runRoute(checkRoute)
}
trait Checker extends HttpService {
val checkRoute = {
path("health") {
get {
dynamic {
onComplete(check) {
case Success(value) => {
value.foreach(println)
complete("OK")
}
case Failure(ex) => failWith(ex)
}
}
}
}
}
def check: Future[List[String]] = {
Future.sequence(endpoints.map(checkIndividual))
}
def httpIfy(endpoint: String): String =
if (endpoint.startsWith("http://")) {
endpoint
} else {
"http://" + endpoint
}
def endpoints: List[String] = rawEndpoints
.map(_.split(","))
.fold(List.empty[String])(_.to)
.map(httpIfy)
def rawEndpoints: Option[String] = {
Environment.endpoints
}
def checkIndividual(endpoint: String): Future[String] = {
pipeline(Get(endpoint)).map(endpointOK(endpoint, _))
}
def pipeline: HttpRequest => Future[HttpResponse] = sendReceive
def endpointOK(endpoint: String, response: HttpResponse): String = {
if (response.status.isFailure) {
throw new Exception(s"Endpoint '$endpoint' is unhealthy and returned ${response.status}")
} else {
s"[healthcheck] Endpoint '$endpoint' returned ${response.status}"
}
}
}
|
ExpatConnect/health
|
src/main/scala/health/Checker.scala
|
Scala
|
mit
| 1,650 |
package org.orbeon.oxf.externalcontext
import java.{util β ju}
import org.orbeon.oxf.common.OXFException
import org.orbeon.oxf.externalcontext.ExternalContext.{Session, SessionListener, SessionScope}
import scala.collection.JavaConverters._
import scala.collection.{immutable β i}
class SimpleSession(sessionId: String) extends Session {
private val creationTime = System.currentTimeMillis
private val sessionListeners = new ju.LinkedHashSet[SessionListener]
private var expired = false
private var sessionAtts = i.HashMap[String, AnyRef]()
def expireSession(): Unit = {
for (listener β sessionListeners.asScala)
listener.sessionDestroyed(this)
expired = true
}
def addListener(sessionListener: SessionListener): Unit = {
checkExpired()
sessionListeners.add(sessionListener)
}
def removeListener(sessionListener: SessionListener): Unit = {
checkExpired()
sessionListeners.remove(sessionListener)
}
def getAttribute(name: String, scope: SessionScope): Option[AnyRef] = {
checkExpired()
sessionAtts.get(name)
}
def setAttribute(name: String, value: AnyRef, scope: SessionScope): Unit = {
checkExpired()
sessionAtts += name β value
}
def removeAttribute(name: String, scope: SessionScope): Unit = {
checkExpired()
sessionAtts -= name
}
def getCreationTime: Long = {
checkExpired()
creationTime
}
def getId: String = {
checkExpired()
sessionId
}
// TODO
def getLastAccessedTime: Long = {
checkExpired()
0L
}
// TODO
def getMaxInactiveInterval: Int = {
checkExpired()
0
}
// TODO
def invalidate(): Unit = {
checkExpired()
}
// TODO
def isNew: Boolean = {
checkExpired()
false
}
// TODO
def setMaxInactiveInterval(interval: Int): Unit = {
checkExpired()
}
private def checkExpired(): Unit =
if (expired)
throw new OXFException("Cannot call methods on expired session.")
}
|
brunobuzzi/orbeon-forms
|
src/main/scala/org/orbeon/oxf/externalcontext/SimpleSession.scala
|
Scala
|
lgpl-2.1
| 1,992 |
package org.meritocracy.lib
import scala.xml._
import net.liftweb._
import common._
import http.NoticeType
import json._
import util.CssSel
import util.Helpers._
import org.bson.types.ObjectId
trait AppHelpers {
/*
* Allows for the following to be used when building snippets and it will handle
* errors according to handleError:
*
* for {
* user <- User.currentUser ?~ "You must be logged in to edit your profile."
* } yield ({
* ...
* }): NodeSeq
*/
implicit protected def boxNodeSeqToNodeSeq(in: Box[NodeSeq]): NodeSeq = in match {
case Full(ns) => ns
case Failure(msg, _, _) => handleNodeSeqError(msg)
case Empty => handleNodeSeqError("Empty snippet")
}
protected def handleNodeSeqError(msg: String): NodeSeq = Comment("ERROR: %s".format(msg))
/*
* Allows for the following to be used when building snippets and it will handle
* errors according to handleError:
*
* for {
* user <- User.currentUser ?~ "You must be logged in to edit your profile."
* } yield ({
* ...
* }): CssSel
*/
implicit protected def boxCssSelToCssSel(in: Box[CssSel]): CssSel = in match {
case Full(csssel) => csssel
case Failure(msg, _, _) => handleCssSelError(msg)
case Empty => handleCssSelError("Empty snippet")
}
protected def handleCssSelError(msg: String): CssSel = "*" #> Text("ERROR: %s".format(msg))
/*
* For use in for comprehensions
*/
protected def boolToBox(b: Boolean): Box[Boolean] = if (b) Full(b) else Empty
/*
* For RestHelper API classes
*/
implicit def boxJsonToJsonResponse(in: Box[JValue]): JValue = in match {
case Full(jv) => jv
case Failure(msg, _, _) => JsonAlert.error(msg).asJValue
case Empty => JsonAlert.warning("Empty response").asJValue
}
case class JsonAlert(val message: String, val level: NoticeType.Value) {
import JsonDSL._
def asJValue: JValue = ("error" -> ("message" -> message) ~ ("level" -> level.title))
}
object JsonAlert {
def info(msg: String): JsonAlert = JsonAlert(msg, NoticeType.Notice)
def error(msg: String): JsonAlert = JsonAlert(msg, NoticeType.Error)
def warning(msg: String): JsonAlert = JsonAlert(msg, NoticeType.Warning)
}
object AsObjectId {
def unapply(in: String): Option[ObjectId] = asObjectId(in)
private def asObjectId(in: String): Option[ObjectId] =
if (ObjectId.isValid(in)) Some(new ObjectId(in))
else None
}
}
|
Rmanolis/meritocracy
|
src/main/scala/org/meritocracy/lib/AddHelpers.scala
|
Scala
|
apache-2.0
| 2,378 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.group
import java.util.UUID
import java.util.concurrent.locks.ReentrantLock
import kafka.common.OffsetAndMetadata
import kafka.utils.{CoreUtils, Logging, nonthreadsafe}
import org.apache.kafka.common.TopicPartition
import scala.collection.{Seq, immutable, mutable}
private[group] sealed trait GroupState
/**
* Group is preparing to rebalance
*
* action: respond to heartbeats with REBALANCE_IN_PROGRESS
* respond to sync group with REBALANCE_IN_PROGRESS
* remove member on leave group request
* park join group requests from new or existing members until all expected members have joined
* allow offset commits from previous generation
* allow offset fetch requests
* transition: some members have joined by the timeout => CompletingRebalance
* all members have left the group => Empty
* group is removed by partition emigration => Dead
*/
private[group] case object PreparingRebalance extends GroupState
/**
* Group is awaiting state assignment from the leader
*
* action: respond to heartbeats with REBALANCE_IN_PROGRESS
* respond to offset commits with REBALANCE_IN_PROGRESS
* park sync group requests from followers until transition to Stable
* allow offset fetch requests
* transition: sync group with state assignment received from leader => Stable
* join group from new member or existing member with updated metadata => PreparingRebalance
* leave group from existing member => PreparingRebalance
* member failure detected => PreparingRebalance
* group is removed by partition emigration => Dead
*/
private[group] case object CompletingRebalance extends GroupState
/**
* Group is stable
*
* action: respond to member heartbeats normally
* respond to sync group from any member with current assignment
* respond to join group from followers with matching metadata with current group metadata
* allow offset commits from member of current generation
* allow offset fetch requests
* transition: member failure detected via heartbeat => PreparingRebalance
* leave group from existing member => PreparingRebalance
* leader join-group received => PreparingRebalance
* follower join-group with new metadata => PreparingRebalance
* group is removed by partition emigration => Dead
*/
private[group] case object Stable extends GroupState
/**
* Group has no more members and its metadata is being removed
*
* action: respond to join group with UNKNOWN_MEMBER_ID
* respond to sync group with UNKNOWN_MEMBER_ID
* respond to heartbeat with UNKNOWN_MEMBER_ID
* respond to leave group with UNKNOWN_MEMBER_ID
* respond to offset commit with UNKNOWN_MEMBER_ID
* allow offset fetch requests
* transition: Dead is a final state before group metadata is cleaned up, so there are no transitions
*/
private[group] case object Dead extends GroupState
/**
* Group has no more members, but lingers until all offsets have expired. This state
* also represents groups which use Kafka only for offset commits and have no members.
*
* action: respond normally to join group from new members
* respond to sync group with UNKNOWN_MEMBER_ID
* respond to heartbeat with UNKNOWN_MEMBER_ID
* respond to leave group with UNKNOWN_MEMBER_ID
* respond to offset commit with UNKNOWN_MEMBER_ID
* allow offset fetch requests
* transition: last offsets removed in periodic expiration task => Dead
* join group from a new member => PreparingRebalance
* group is removed by partition emigration => Dead
* group is removed by expiration => Dead
*/
private[group] case object Empty extends GroupState
private object GroupMetadata {
private val validPreviousStates: Map[GroupState, Set[GroupState]] =
Map(Dead -> Set(Stable, PreparingRebalance, CompletingRebalance, Empty, Dead),
CompletingRebalance -> Set(PreparingRebalance),
Stable -> Set(CompletingRebalance),
PreparingRebalance -> Set(Stable, CompletingRebalance, Empty),
Empty -> Set(PreparingRebalance))
def loadGroup(groupId: String,
initialState: GroupState,
generationId: Int,
protocolType: String,
protocol: String,
leaderId: String,
members: Iterable[MemberMetadata]): GroupMetadata = {
val group = new GroupMetadata(groupId, initialState)
group.generationId = generationId
group.protocolType = if (protocolType == null || protocolType.isEmpty) None else Some(protocolType)
group.protocol = Option(protocol)
group.leaderId = Option(leaderId)
members.foreach(group.add)
group
}
}
/**
* Case class used to represent group metadata for the ListGroups API
*/
case class GroupOverview(groupId: String,
protocolType: String)
/**
* Case class used to represent group metadata for the DescribeGroup API
*/
case class GroupSummary(state: String,
protocolType: String,
protocol: String,
members: List[MemberSummary])
/**
* We cache offset commits along with their commit record offset. This enables us to ensure that the latest offset
* commit is always materialized when we have a mix of transactional and regular offset commits. Without preserving
* information of the commit record offset, compaction of the offsets topic it self may result in the wrong offset commit
* being materialized.
*/
case class CommitRecordMetadataAndOffset(appendedBatchOffset: Option[Long], offsetAndMetadata: OffsetAndMetadata) {
def olderThan(that: CommitRecordMetadataAndOffset) : Boolean = appendedBatchOffset.get < that.appendedBatchOffset.get
}
/**
* Group contains the following metadata:
*
* Membership metadata:
* 1. Members registered in this group
* 2. Current protocol assigned to the group (e.g. partition assignment strategy for consumers)
* 3. Protocol metadata associated with group members
*
* State metadata:
* 1. group state
* 2. generation id
* 3. leader id
*/
@nonthreadsafe
private[group] class GroupMetadata(val groupId: String, initialState: GroupState) extends Logging {
private[group] val lock = new ReentrantLock
private var state: GroupState = initialState
var protocolType: Option[String] = None
var generationId = 0
private var leaderId: Option[String] = None
private var protocol: Option[String] = None
private val members = new mutable.HashMap[String, MemberMetadata]
private val offsets = new mutable.HashMap[TopicPartition, CommitRecordMetadataAndOffset]
private val pendingOffsetCommits = new mutable.HashMap[TopicPartition, OffsetAndMetadata]
private val pendingTransactionalOffsetCommits = new mutable.HashMap[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]()
private var receivedTransactionalOffsetCommits = false
private var receivedConsumerOffsetCommits = false
var newMemberAdded: Boolean = false
def inLock[T](fun: => T): T = CoreUtils.inLock(lock)(fun)
def is(groupState: GroupState) = state == groupState
def not(groupState: GroupState) = state != groupState
def has(memberId: String) = members.contains(memberId)
def get(memberId: String) = members(memberId)
def isLeader(memberId: String): Boolean = leaderId.contains(memberId)
def leaderOrNull: String = leaderId.orNull
def protocolOrNull: String = protocol.orNull
def add(member: MemberMetadata) {
if (members.isEmpty)
this.protocolType = Some(member.protocolType)
assert(groupId == member.groupId)
assert(this.protocolType.orNull == member.protocolType)
assert(supportsProtocols(member.protocols))
if (leaderId.isEmpty)
leaderId = Some(member.memberId)
members.put(member.memberId, member)
}
def remove(memberId: String) {
members.remove(memberId)
if (isLeader(memberId)) {
leaderId = if (members.isEmpty) {
None
} else {
Some(members.keys.head)
}
}
}
def currentState = state
def notYetRejoinedMembers = members.values.filter(_.awaitingJoinCallback == null).toList
def allMembers = members.keySet
def allMemberMetadata = members.values.toList
def rebalanceTimeoutMs = members.values.foldLeft(0) { (timeout, member) =>
timeout.max(member.rebalanceTimeoutMs)
}
// TODO: decide if ids should be predictable or random
def generateMemberIdSuffix = UUID.randomUUID().toString
def canRebalance = GroupMetadata.validPreviousStates(PreparingRebalance).contains(state)
def transitionTo(groupState: GroupState) {
assertValidTransition(groupState)
state = groupState
}
def selectProtocol: String = {
if (members.isEmpty)
throw new IllegalStateException("Cannot select protocol for empty group")
// select the protocol for this group which is supported by all members
val candidates = candidateProtocols
// let each member vote for one of the protocols and choose the one with the most votes
val votes: List[(String, Int)] = allMemberMetadata
.map(_.vote(candidates))
.groupBy(identity)
.mapValues(_.size)
.toList
votes.maxBy(_._2)._1
}
private def candidateProtocols = {
// get the set of protocols that are commonly supported by all members
allMemberMetadata
.map(_.protocols)
.reduceLeft((commonProtocols, protocols) => commonProtocols & protocols)
}
def supportsProtocols(memberProtocols: Set[String]) = {
members.isEmpty || (memberProtocols & candidateProtocols).nonEmpty
}
def initNextGeneration() = {
assert(notYetRejoinedMembers == List.empty[MemberMetadata])
if (members.nonEmpty) {
generationId += 1
protocol = Some(selectProtocol)
transitionTo(CompletingRebalance)
} else {
generationId += 1
protocol = None
transitionTo(Empty)
}
receivedConsumerOffsetCommits = false
receivedTransactionalOffsetCommits = false
}
def currentMemberMetadata: Map[String, Array[Byte]] = {
if (is(Dead) || is(PreparingRebalance))
throw new IllegalStateException("Cannot obtain member metadata for group in state %s".format(state))
members.map{ case (memberId, memberMetadata) => (memberId, memberMetadata.metadata(protocol.get))}.toMap
}
def summary: GroupSummary = {
if (is(Stable)) {
val protocol = protocolOrNull
if (protocol == null)
throw new IllegalStateException("Invalid null group protocol for stable group")
val members = this.members.values.map { member => member.summary(protocol) }
GroupSummary(state.toString, protocolType.getOrElse(""), protocol, members.toList)
} else {
val members = this.members.values.map{ member => member.summaryNoMetadata() }
GroupSummary(state.toString, protocolType.getOrElse(""), GroupCoordinator.NoProtocol, members.toList)
}
}
def overview: GroupOverview = {
GroupOverview(groupId, protocolType.getOrElse(""))
}
def initializeOffsets(offsets: collection.Map[TopicPartition, CommitRecordMetadataAndOffset],
pendingTxnOffsets: Map[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]) {
this.offsets ++= offsets
this.pendingTransactionalOffsetCommits ++= pendingTxnOffsets
}
def onOffsetCommitAppend(topicPartition: TopicPartition, offsetWithCommitRecordMetadata: CommitRecordMetadataAndOffset) {
if (pendingOffsetCommits.contains(topicPartition)) {
if (offsetWithCommitRecordMetadata.appendedBatchOffset.isEmpty)
throw new IllegalStateException("Cannot complete offset commit write without providing the metadata of the record " +
"in the log.")
if (!offsets.contains(topicPartition) || offsets(topicPartition).olderThan(offsetWithCommitRecordMetadata))
offsets.put(topicPartition, offsetWithCommitRecordMetadata)
}
pendingOffsetCommits.get(topicPartition) match {
case Some(stagedOffset) if offsetWithCommitRecordMetadata.offsetAndMetadata == stagedOffset =>
pendingOffsetCommits.remove(topicPartition)
case _ =>
// The pendingOffsetCommits for this partition could be empty if the topic was deleted, in which case
// its entries would be removed from the cache by the `removeOffsets` method.
}
}
def failPendingOffsetWrite(topicPartition: TopicPartition, offset: OffsetAndMetadata): Unit = {
pendingOffsetCommits.get(topicPartition) match {
case Some(pendingOffset) if offset == pendingOffset => pendingOffsetCommits.remove(topicPartition)
case _ =>
}
}
def prepareOffsetCommit(offsets: Map[TopicPartition, OffsetAndMetadata]) {
receivedConsumerOffsetCommits = true
pendingOffsetCommits ++= offsets
}
def prepareTxnOffsetCommit(producerId: Long, offsets: Map[TopicPartition, OffsetAndMetadata]) {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $offsets is pending")
receivedTransactionalOffsetCommits = true
val producerOffsets = pendingTransactionalOffsetCommits.getOrElseUpdate(producerId,
mutable.Map.empty[TopicPartition, CommitRecordMetadataAndOffset])
offsets.foreach { case (topicPartition, offsetAndMetadata) =>
producerOffsets.put(topicPartition, CommitRecordMetadataAndOffset(None, offsetAndMetadata))
}
}
def hasReceivedConsistentOffsetCommits : Boolean = {
!receivedConsumerOffsetCommits || !receivedTransactionalOffsetCommits
}
/* Remove a pending transactional offset commit if the actual offset commit record was not written to the log.
* We will return an error and the client will retry the request, potentially to a different coordinator.
*/
def failPendingTxnOffsetCommit(producerId: Long, topicPartition: TopicPartition): Unit = {
pendingTransactionalOffsetCommits.get(producerId) match {
case Some(pendingOffsets) =>
val pendingOffsetCommit = pendingOffsets.remove(topicPartition)
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $pendingOffsetCommit failed " +
s"to be appended to the log")
if (pendingOffsets.isEmpty)
pendingTransactionalOffsetCommits.remove(producerId)
case _ =>
// We may hit this case if the partition in question has emigrated already.
}
}
def onTxnOffsetCommitAppend(producerId: Long, topicPartition: TopicPartition,
commitRecordMetadataAndOffset: CommitRecordMetadataAndOffset) {
pendingTransactionalOffsetCommits.get(producerId) match {
case Some(pendingOffset) =>
if (pendingOffset.contains(topicPartition)
&& pendingOffset(topicPartition).offsetAndMetadata == commitRecordMetadataAndOffset.offsetAndMetadata)
pendingOffset.update(topicPartition, commitRecordMetadataAndOffset)
case _ =>
// We may hit this case if the partition in question has emigrated.
}
}
/* Complete a pending transactional offset commit. This is called after a commit or abort marker is fully written
* to the log.
*/
def completePendingTxnOffsetCommit(producerId: Long, isCommit: Boolean): Unit = {
val pendingOffsetsOpt = pendingTransactionalOffsetCommits.remove(producerId)
if (isCommit) {
pendingOffsetsOpt.foreach { pendingOffsets =>
pendingOffsets.foreach { case (topicPartition, commitRecordMetadataAndOffset) =>
if (commitRecordMetadataAndOffset.appendedBatchOffset.isEmpty)
throw new IllegalStateException(s"Trying to complete a transactional offset commit for producerId $producerId " +
s"and groupId $groupId even though the offset commit record itself hasn't been appended to the log.")
val currentOffsetOpt = offsets.get(topicPartition)
if (currentOffsetOpt.forall(_.olderThan(commitRecordMetadataAndOffset))) {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offset $commitRecordMetadataAndOffset " +
"committed and loaded into the cache.")
offsets.put(topicPartition, commitRecordMetadataAndOffset)
} else {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offset $commitRecordMetadataAndOffset " +
s"committed, but not loaded since its offset is older than current offset $currentOffsetOpt.")
}
}
}
} else {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $pendingOffsetsOpt aborted")
}
}
def activeProducers = pendingTransactionalOffsetCommits.keySet
def hasPendingOffsetCommitsFromProducer(producerId: Long) =
pendingTransactionalOffsetCommits.contains(producerId)
def removeOffsets(topicPartitions: Seq[TopicPartition]): immutable.Map[TopicPartition, OffsetAndMetadata] = {
topicPartitions.flatMap { topicPartition =>
pendingOffsetCommits.remove(topicPartition)
pendingTransactionalOffsetCommits.foreach { case (_, pendingOffsets) =>
pendingOffsets.remove(topicPartition)
}
val removedOffset = offsets.remove(topicPartition)
removedOffset.map(topicPartition -> _.offsetAndMetadata)
}.toMap
}
def removeExpiredOffsets(startMs: Long) : Map[TopicPartition, OffsetAndMetadata] = {
val expiredOffsets = offsets
.filter {
case (topicPartition, commitRecordMetadataAndOffset) =>
commitRecordMetadataAndOffset.offsetAndMetadata.expireTimestamp < startMs && !pendingOffsetCommits.contains(topicPartition)
}
.map {
case (topicPartition, commitRecordOffsetAndMetadata) =>
(topicPartition, commitRecordOffsetAndMetadata.offsetAndMetadata)
}
offsets --= expiredOffsets.keySet
expiredOffsets.toMap
}
def allOffsets = offsets.map { case (topicPartition, commitRecordMetadataAndOffset) =>
(topicPartition, commitRecordMetadataAndOffset.offsetAndMetadata)
}.toMap
def offset(topicPartition: TopicPartition): Option[OffsetAndMetadata] = offsets.get(topicPartition).map(_.offsetAndMetadata)
// visible for testing
private[group] def offsetWithRecordMetadata(topicPartition: TopicPartition): Option[CommitRecordMetadataAndOffset] = offsets.get(topicPartition)
def numOffsets = offsets.size
def hasOffsets = offsets.nonEmpty || pendingOffsetCommits.nonEmpty || pendingTransactionalOffsetCommits.nonEmpty
private def assertValidTransition(targetState: GroupState) {
if (!GroupMetadata.validPreviousStates(targetState).contains(state))
throw new IllegalStateException("Group %s should be in the %s states before moving to %s state. Instead it is in %s state"
.format(groupId, GroupMetadata.validPreviousStates(targetState).mkString(","), targetState, state))
}
override def toString: String = {
"GroupMetadata(" +
s"groupId=$groupId, " +
s"generation=$generationId, " +
s"protocolType=$protocolType, " +
s"currentState=$currentState, " +
s"members=$members)"
}
}
|
MyPureCloud/kafka
|
core/src/main/scala/kafka/coordinator/group/GroupMetadata.scala
|
Scala
|
apache-2.0
| 20,071 |
/*
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
package org.infinispan.server.core
import org.infinispan.factories.components.ModuleMetadataFileFinder
class ServerCoreMetadataFileFinder extends ModuleMetadataFileFinder {
def getMetadataFilename = "infinispan-server-core-component-metadata.dat"
}
|
nmldiegues/stibt
|
infinispan/server/core/src/main/scala/org/infinispan/server/core/ServerCoreMetadataFileFinder.scala
|
Scala
|
apache-2.0
| 1,054 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.global
import org.scalatest.{FunSpec, Matchers}
class ExecutionCounterSpec extends FunSpec with Matchers {
describe("ExecutionCounter") {
describe("#increment( String )"){
it("should increment value when key is not present"){
ExecutionCounter incr "foo" should be(1)
}
it("should increment value for key when it is present"){
ExecutionCounter incr "bar" should be(1)
ExecutionCounter incr "bar" should be(2)
}
}
}
}
|
yeghishe/spark-kernel
|
kernel/src/test/scala/com/ibm/spark/global/ExecutionCounterSpec.scala
|
Scala
|
apache-2.0
| 1,095 |
package graphql.resolvers
import java.nio.file.{Files, Path, Paths}
import actors.FileActor
import actors.FileActor.SaveFileMetadata
import akka.actor.ActorRef
import akka.http.scaladsl.model.Multipart.FormData
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{FileIO, Keep, Sink, Source}
import com.byteslounge.slickrepo.repository.Repository
import com.google.inject.name.Named
import common.implicits.RichDBIO._
import common.errors._
import common.Logger
import common.actors.ActorMessageDelivering
import graphql.resolvers.FileUploadResolverImpl._
import javax.inject.Inject
import models.FileMetadata
import services.HashAppender
import slick.dbio.DBIO
import scala.concurrent.{ExecutionContext, Future}
class FileUploadResolverImpl @Inject()(
@Named(FileActor.name) fileActor: ActorRef,
fileRepository: Repository[FileMetadata, Int],
hashAppender: HashAppender
)(implicit executionContext: ExecutionContext, materializer: ActorMaterializer)
extends FileUploadResolver
with Logger
with ActorMessageDelivering {
override def uploadFiles(parts: Source[FormData.BodyPart, Any]): Future[Boolean] = {
parts
.filter(_.filename.nonEmpty)
.mapAsync(1) {
part =>
{
val hashedFilename = hashAppender.append(part.filename.get)
if (!publicDirPath.toFile.exists) Files.createDirectory(publicDirPath)
part.entity.dataBytes.runWith(FileIO.toPath(publicDirPath.resolve(hashedFilename))).map {
ioResult =>
FileMetadata(
name = part.filename.get,
contentType = part.entity.contentType.toString,
size = ioResult.count,
path = s"public/$hashedFilename"
)
}
}
}
.mapAsync(1) {
fileMetadata =>
sendMessageWithFunc[FileMetadata](actorRef => fileActor ! SaveFileMetadata(fileMetadata, actorRef))
}
.toMat(Sink.ignore)(Keep.right)
.run
.map(_ => true)
}.recover {
case error: Error =>
log.error(s"Failed to upload files. Reason: [$error")
false
}
override def files: Future[List[FileMetadata]] = fileRepository.findAll.run.map(_.toList)
override def removeFile(id: Int): Future[Boolean] = {
fileRepository
.executeTransactionally(
for {
fileMetadataOption <- fileRepository.findOne(id)
fileMetadata <- if (fileMetadataOption.nonEmpty) DBIO.successful(fileMetadataOption.get)
else DBIO.failed(NotFound(s"FileMetadata(id: $id)"))
deletedFileMetadata <- fileRepository.delete(fileMetadata)
} yield deletedFileMetadata
)
.run
.map {
deletedFileMetadata =>
Files.deleteIfExists(resourcesDirPath.resolve(deletedFileMetadata.path))
}
}.recover {
case error: Error =>
log.error(s"Failed to upload files. Reason: [$error")
false
}
}
object FileUploadResolverImpl {
val resourcesDirPath: Path = Paths.get(getClass.getResource("/").getPath)
val publicDirPath: Path = resourcesDirPath.resolve("public")
}
|
sysgears/apollo-universal-starter-kit
|
modules/upload/server-scala/src/main/scala/graphql/resolvers/FileUploadResolverImpl.scala
|
Scala
|
mit
| 3,138 |
package uk.gov.dvla.vehicles.presentation.common.model
import play.api.data.Forms.mapping
import play.api.libs.json.Json
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.CacheKey
import uk.gov.dvla.vehicles.presentation.common.mappings.DropDown
final case class NewKeeperChooseYourAddressFormModel(addressSelected: String)
object NewKeeperChooseYourAddressFormModel {
implicit val JsonFormat = Json.format[NewKeeperChooseYourAddressFormModel]
implicit def key(implicit prefix: CacheKeyPrefix) =
CacheKey[NewKeeperChooseYourAddressFormModel](value = newKeeperChooseYourAddressCacheKey)
def newKeeperChooseYourAddressCacheKey(implicit prefix: CacheKeyPrefix) = s"${prefix}newKeeperChooseYourAddress"
object Form {
final val AddressSelectId = "newKeeperChooseYourAddress_addressSelect"
final val Mapping = mapping(
AddressSelectId -> DropDown.addressDropDown
)(NewKeeperChooseYourAddressFormModel.apply)(NewKeeperChooseYourAddressFormModel.unapply)
}
}
|
dvla/vehicles-presentation-common
|
app/uk/gov/dvla/vehicles/presentation/common/model/NewKeeperChooseYourAddressFormModel.scala
|
Scala
|
mit
| 1,005 |
package net.sansa_stack.inference.spark.utils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
/**
* @author Lorenz Buehmann
*/
object SparkManager {
/**
* Spark context
*/
private var context: SparkContext = null
/**
* Creates a new Spark context
*
* @see https://spark.apache.org/docs/1.6.1/configuration.html
*/
def createSparkContext() {
val conf = new SparkConf()
// Use the Kryo serializer, because it is faster than Java serializing
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
conf.set("spark.kryo.registrator", "de.tf.uni.freiburg.sparkrdf.sparql.serialization.Registrator")
conf.set("spark.core.connection.ack.wait.timeout", "5000");
conf.set("spark.shuffle.consolidateFiles", "true");
conf.set("spark.rdd.compress", "true");
conf.set("spark.kryoserializer.buffer.max.mb", "512");
if (SparkSettings.locale) {
conf.setMaster("local")
}
if (SparkSettings.executorMem != null) {
conf.set("spark.executor.memory", SparkSettings.executorMem)
}
if (SparkSettings.parallelism != null) {
conf.set("spark.default.parallelism", SparkSettings.parallelism)
}
if (SparkSettings.memoryFraction != null) {
conf.set("spark.storage.memoryFraction", SparkSettings.memoryFraction)
}
if (SparkSettings.jobName != null) {
conf.setAppName(SparkSettings.jobName)
}
context = new SparkContext(conf)
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
}
/**
* Close the Spark context
*/
def closeContext() {
context.stop()
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-inference/sansa-inference-spark/src/main/scala/net/sansa_stack/inference/spark/utils/SparkManager.scala
|
Scala
|
apache-2.0
| 1,705 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal
import java.util.concurrent.TimeUnit
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.metrics.GarbageCollectionMetrics
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.scheduler.{EventLoggingListener, SchedulingMode}
import org.apache.spark.storage.{DefaultTopologyMapper, RandomBlockReplicationPolicy}
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.unsafe.sort.UnsafeSorterSpillReader.MAX_BUFFER_SIZE_BYTES
package object config {
private[spark] val DRIVER_CLASS_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val DRIVER_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS).stringConf.createOptional
private[spark] val DRIVER_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val DRIVER_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.driver.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val DRIVER_CORES = ConfigBuilder("spark.driver.cores")
.doc("Number of cores to use for the driver process, only in cluster mode.")
.intConf
.createWithDefault(1)
private[spark] val DRIVER_MEMORY = ConfigBuilder(SparkLauncher.DRIVER_MEMORY)
.doc("Amount of memory to use for the driver process, in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val DRIVER_MEMORY_OVERHEAD = ConfigBuilder("spark.driver.memoryOverhead")
.doc("The amount of off-heap memory to be allocated per driver in cluster mode, " +
"in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val DRIVER_LOG_DFS_DIR =
ConfigBuilder("spark.driver.log.dfsDir").stringConf.createOptional
private[spark] val DRIVER_LOG_LAYOUT =
ConfigBuilder("spark.driver.log.layout")
.stringConf
.createOptional
private[spark] val DRIVER_LOG_PERSISTTODFS =
ConfigBuilder("spark.driver.log.persistToDfs.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ENABLED = ConfigBuilder("spark.eventLog.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_DIR = ConfigBuilder("spark.eventLog.dir")
.stringConf
.createWithDefault(EventLoggingListener.DEFAULT_LOG_DIR)
private[spark] val EVENT_LOG_COMPRESS =
ConfigBuilder("spark.eventLog.compress")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_BLOCK_UPDATES =
ConfigBuilder("spark.eventLog.logBlockUpdates.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ALLOW_EC =
ConfigBuilder("spark.eventLog.allowErasureCoding")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_TESTING =
ConfigBuilder("spark.eventLog.testing")
.internal()
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_OUTPUT_BUFFER_SIZE = ConfigBuilder("spark.eventLog.buffer.kb")
.doc("Buffer size to use when writing to output streams, in KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.createWithDefaultString("100k")
private[spark] val EVENT_LOG_STAGE_EXECUTOR_METRICS =
ConfigBuilder("spark.eventLog.logStageExecutorMetrics.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_PROCESS_TREE_METRICS =
ConfigBuilder("spark.eventLog.logStageExecutorProcessTreeMetrics.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_GC_METRICS_YOUNG_GENERATION_GARBAGE_COLLECTORS =
ConfigBuilder("spark.eventLog.gcMetrics.youngGenerationGarbageCollectors")
.doc("Names of supported young generation garbage collector. A name usually is " +
" the return of GarbageCollectorMXBean.getName. The built-in young generation garbage " +
s"collectors are ${GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
.stringConf
.toSequence
.createWithDefault(GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
private[spark] val EVENT_LOG_GC_METRICS_OLD_GENERATION_GARBAGE_COLLECTORS =
ConfigBuilder("spark.eventLog.gcMetrics.oldGenerationGarbageCollectors")
.doc("Names of supported old generation garbage collector. A name usually is " +
"the return of GarbageCollectorMXBean.getName. The built-in old generation garbage " +
s"collectors are ${GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
.stringConf
.toSequence
.createWithDefault(GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
private[spark] val EVENT_LOG_OVERWRITE =
ConfigBuilder("spark.eventLog.overwrite").booleanConf.createWithDefault(false)
private[spark] val EVENT_LOG_CALLSITE_LONG_FORM =
ConfigBuilder("spark.eventLog.longForm.enabled").booleanConf.createWithDefault(false)
private[spark] val EXECUTOR_ID =
ConfigBuilder("spark.executor.id").stringConf.createOptional
private[spark] val EXECUTOR_CLASS_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val EXECUTOR_HEARTBEAT_DROP_ZERO_ACCUMULATOR_UPDATES =
ConfigBuilder("spark.executor.heartbeat.dropZeroAccumulatorUpdates")
.internal()
.booleanConf
.createWithDefault(true)
private[spark] val EXECUTOR_HEARTBEAT_INTERVAL =
ConfigBuilder("spark.executor.heartbeatInterval")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("10s")
private[spark] val EXECUTOR_HEARTBEAT_MAX_FAILURES =
ConfigBuilder("spark.executor.heartbeat.maxFailures").internal().intConf.createWithDefault(60)
private[spark] val EXECUTOR_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS).stringConf.createOptional
private[spark] val EXECUTOR_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val EXECUTOR_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.executor.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val EXECUTOR_CORES = ConfigBuilder(SparkLauncher.EXECUTOR_CORES)
.intConf
.createWithDefault(1)
private[spark] val EXECUTOR_MEMORY = ConfigBuilder(SparkLauncher.EXECUTOR_MEMORY)
.doc("Amount of memory to use per executor process, in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.executor.memoryOverhead")
.doc("The amount of off-heap memory to be allocated per executor in cluster mode, " +
"in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val CORES_MAX = ConfigBuilder("spark.cores.max")
.doc("When running on a standalone deploy cluster or a Mesos cluster in coarse-grained " +
"sharing mode, the maximum amount of CPU cores to request for the application from across " +
"the cluster (not from each machine). If not set, the default will be " +
"`spark.deploy.defaultCores` on Spark's standalone cluster manager, or infinite " +
"(all available cores) on Mesos.")
.intConf
.createOptional
private[spark] val MEMORY_OFFHEAP_ENABLED = ConfigBuilder("spark.memory.offHeap.enabled")
.doc("If true, Spark will attempt to use off-heap memory for certain operations. " +
"If off-heap memory use is enabled, then spark.memory.offHeap.size must be positive.")
.withAlternative("spark.unsafe.offHeap")
.booleanConf
.createWithDefault(false)
private[spark] val MEMORY_OFFHEAP_SIZE = ConfigBuilder("spark.memory.offHeap.size")
.doc("The absolute amount of memory in bytes which can be used for off-heap allocation. " +
"This setting has no impact on heap memory usage, so if your executors' total memory " +
"consumption must fit within some hard limit then be sure to shrink your JVM heap size " +
"accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ >= 0, "The off-heap memory size must not be negative")
.createWithDefault(0)
private[spark] val MEMORY_STORAGE_FRACTION = ConfigBuilder("spark.memory.storageFraction")
.doc("Amount of storage memory immune to eviction, expressed as a fraction of the " +
"size of the region set aside by spark.memory.fraction. The higher this is, the " +
"less working memory may be available to execution and tasks may spill to disk more " +
"often. Leaving this at the default value is recommended. ")
.doubleConf
.checkValue(v => v >= 0.0 && v < 1.0, "Storage fraction must be in [0,1)")
.createWithDefault(0.5)
private[spark] val MEMORY_FRACTION = ConfigBuilder("spark.memory.fraction")
.doc("Fraction of (heap space - 300MB) used for execution and storage. The " +
"lower this is, the more frequently spills and cached data eviction occur. " +
"The purpose of this config is to set aside memory for internal metadata, " +
"user data structures, and imprecise size estimation in the case of sparse, " +
"unusually large records. Leaving this at the default value is recommended. ")
.doubleConf
.createWithDefault(0.6)
private[spark] val STORAGE_SAFETY_FRACTION = ConfigBuilder("spark.storage.safetyFraction")
.doubleConf
.createWithDefault(0.9)
private[spark] val STORAGE_UNROLL_MEMORY_THRESHOLD =
ConfigBuilder("spark.storage.unrollMemoryThreshold")
.doc("Initial memory to request before unrolling any block")
.longConf
.createWithDefault(1024 * 1024)
private[spark] val STORAGE_REPLICATION_PROACTIVE =
ConfigBuilder("spark.storage.replication.proactive")
.doc("Enables proactive block replication for RDD blocks. " +
"Cached RDD block replicas lost due to executor failures are replenished " +
"if there are any existing available replicas. This tries to " +
"get the replication level of the block to the initial number")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_MEMORY_MAP_THRESHOLD =
ConfigBuilder("spark.storage.memoryMapThreshold")
.doc("Size in bytes of a block above which Spark memory maps when " +
"reading a block from disk. " +
"This prevents Spark from memory mapping very small blocks. " +
"In general, memory mapping has high overhead for blocks close to or below " +
"the page size of the operating system.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("2m")
private[spark] val STORAGE_REPLICATION_POLICY =
ConfigBuilder("spark.storage.replication.policy")
.stringConf
.createWithDefaultString(classOf[RandomBlockReplicationPolicy].getName)
private[spark] val STORAGE_REPLICATION_TOPOLOGY_MAPPER =
ConfigBuilder("spark.storage.replication.topologyMapper")
.stringConf
.createWithDefaultString(classOf[DefaultTopologyMapper].getName)
private[spark] val STORAGE_CACHED_PEERS_TTL = ConfigBuilder("spark.storage.cachedPeersTtl")
.intConf.createWithDefault(60 * 1000)
private[spark] val STORAGE_MAX_REPLICATION_FAILURE =
ConfigBuilder("spark.storage.maxReplicationFailures")
.intConf.createWithDefault(1)
private[spark] val STORAGE_REPLICATION_TOPOLOGY_FILE =
ConfigBuilder("spark.storage.replication.topologyFile").stringConf.createOptional
private[spark] val STORAGE_EXCEPTION_PIN_LEAK =
ConfigBuilder("spark.storage.exceptionOnPinLeak")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_BLOCKMANAGER_TIMEOUTINTERVAL =
ConfigBuilder("spark.storage.blockManagerTimeoutIntervalMs")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("60s")
private[spark] val STORAGE_BLOCKMANAGER_SLAVE_TIMEOUT =
ConfigBuilder("spark.storage.blockManagerSlaveTimeoutMs")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString(Network.NETWORK_TIMEOUT.defaultValueString)
private[spark] val STORAGE_CLEANUP_FILES_AFTER_EXECUTOR_EXIT =
ConfigBuilder("spark.storage.cleanupFilesAfterExecutorExit")
.doc("Whether or not cleanup the non-shuffle files on executor exits.")
.booleanConf
.createWithDefault(true)
private[spark] val DISKSTORE_SUB_DIRECTORIES =
ConfigBuilder("spark.diskStore.subDirectories")
.doc("Number of subdirectories inside each path listed in spark.local.dir for " +
"hashing Block files into.")
.intConf
.createWithDefault(64)
private[spark] val BLOCK_FAILURES_BEFORE_LOCATION_REFRESH =
ConfigBuilder("spark.block.failures.beforeLocationRefresh")
.doc("Max number of failures before this block manager refreshes " +
"the block locations from the driver.")
.intConf
.createWithDefault(5)
private[spark] val IS_PYTHON_APP = ConfigBuilder("spark.yarn.isPython").internal()
.booleanConf.createWithDefault(false)
private[spark] val CPUS_PER_TASK = ConfigBuilder("spark.task.cpus").intConf.createWithDefault(1)
private[spark] val DYN_ALLOCATION_ENABLED =
ConfigBuilder("spark.dynamicAllocation.enabled").booleanConf.createWithDefault(false)
private[spark] val DYN_ALLOCATION_TESTING =
ConfigBuilder("spark.dynamicAllocation.testing").booleanConf.createWithDefault(false)
private[spark] val DYN_ALLOCATION_MIN_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.minExecutors").intConf.createWithDefault(0)
private[spark] val DYN_ALLOCATION_INITIAL_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.initialExecutors")
.fallbackConf(DYN_ALLOCATION_MIN_EXECUTORS)
private[spark] val DYN_ALLOCATION_MAX_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.maxExecutors").intConf.createWithDefault(Int.MaxValue)
private[spark] val DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO =
ConfigBuilder("spark.dynamicAllocation.executorAllocationRatio")
.doubleConf.createWithDefault(1.0)
private[spark] val DYN_ALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.cachedExecutorIdleTimeout")
.timeConf(TimeUnit.SECONDS).createWithDefault(Integer.MAX_VALUE)
private[spark] val DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.executorIdleTimeout")
.timeConf(TimeUnit.SECONDS).createWithDefault(60)
private[spark] val DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.schedulerBacklogTimeout")
.timeConf(TimeUnit.SECONDS).createWithDefault(1)
private[spark] val DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.sustainedSchedulerBacklogTimeout")
.fallbackConf(DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT)
private[spark] val LOCALITY_WAIT = ConfigBuilder("spark.locality.wait")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("3s")
private[spark] val SHUFFLE_SERVICE_ENABLED =
ConfigBuilder("spark.shuffle.service.enabled").booleanConf.createWithDefault(false)
private[spark] val SHUFFLE_SERVICE_PORT =
ConfigBuilder("spark.shuffle.service.port").intConf.createWithDefault(7337)
private[spark] val KEYTAB = ConfigBuilder("spark.kerberos.keytab")
.doc("Location of user's keytab.")
.stringConf.createOptional
private[spark] val PRINCIPAL = ConfigBuilder("spark.kerberos.principal")
.doc("Name of the Kerberos principal.")
.stringConf.createOptional
private[spark] val KERBEROS_RELOGIN_PERIOD = ConfigBuilder("spark.kerberos.relogin.period")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1m")
private[spark] val KERBEROS_RENEWAL_CREDENTIALS =
ConfigBuilder("spark.kerberos.renewal.credentials")
.doc(
"Which credentials to use when renewing delegation tokens for executors. Can be either " +
"'keytab', the default, which requires a keytab to be provided, or 'ccache', which uses " +
"the local credentials cache.")
.stringConf
.checkValues(Set("keytab", "ccache"))
.createWithDefault("keytab")
private[spark] val KERBEROS_FILESYSTEMS_TO_ACCESS =
ConfigBuilder("spark.kerberos.access.hadoopFileSystems")
.doc("Extra Hadoop filesystem URLs for which to request delegation tokens. The filesystem " +
"that hosts fs.defaultFS does not need to be listed here.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val EXECUTOR_INSTANCES = ConfigBuilder("spark.executor.instances")
.intConf
.createOptional
private[spark] val PY_FILES = ConfigBuilder("spark.yarn.dist.pyFiles")
.internal()
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val TASK_MAX_DIRECT_RESULT_SIZE =
ConfigBuilder("spark.task.maxDirectResultSize")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(1L << 20)
private[spark] val TASK_MAX_FAILURES =
ConfigBuilder("spark.task.maxFailures")
.intConf
.createWithDefault(4)
private[spark] val TASK_REAPER_ENABLED =
ConfigBuilder("spark.task.reaper.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val TASK_REAPER_KILL_TIMEOUT =
ConfigBuilder("spark.task.reaper.killTimeout")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(-1)
private[spark] val TASK_REAPER_POLLING_INTERVAL =
ConfigBuilder("spark.task.reaper.pollingInterval")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("10s")
private[spark] val TASK_REAPER_THREAD_DUMP =
ConfigBuilder("spark.task.reaper.threadDump")
.booleanConf
.createWithDefault(true)
// Blacklist confs
private[spark] val BLACKLIST_ENABLED =
ConfigBuilder("spark.blacklist.enabled")
.booleanConf
.createOptional
private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerExecutor")
.intConf
.createWithDefault(1)
private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC =
ConfigBuilder("spark.blacklist.application.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE =
ConfigBuilder("spark.blacklist.application.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val BLACKLIST_TIMEOUT_CONF =
ConfigBuilder("spark.blacklist.timeout")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val BLACKLIST_KILL_ENABLED =
ConfigBuilder("spark.blacklist.killBlacklistedExecutors")
.booleanConf
.createWithDefault(false)
private[spark] val BLACKLIST_LEGACY_TIMEOUT_CONF =
ConfigBuilder("spark.scheduler.executorTaskBlacklistTime")
.internal()
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val BLACKLIST_FETCH_FAILURE_ENABLED =
ConfigBuilder("spark.blacklist.application.fetchFailure.enabled")
.booleanConf
.createWithDefault(false)
// End blacklist confs
private[spark] val UNREGISTER_OUTPUT_ON_HOST_ON_FETCH_FAILURE =
ConfigBuilder("spark.files.fetchFailure.unRegisterOutputOnHost")
.doc("Whether to un-register all the outputs on the host in condition that we receive " +
" a FetchFailure. This is set default to false, which means, we only un-register the " +
" outputs related to the exact executor(instead of the host) on a FetchFailure.")
.booleanConf
.createWithDefault(false)
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
.intConf
.checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
.createWithDefault(10000)
private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
ConfigBuilder("spark.scheduler.listenerbus.metrics.maxListenerClassesTimed")
.internal()
.intConf
.createWithDefault(128)
// This property sets the root namespace for metrics reporting
private[spark] val METRICS_NAMESPACE = ConfigBuilder("spark.metrics.namespace")
.stringConf
.createOptional
private[spark] val METRICS_CONF = ConfigBuilder("spark.metrics.conf")
.stringConf
.createOptional
private[spark] val PYSPARK_DRIVER_PYTHON = ConfigBuilder("spark.pyspark.driver.python")
.stringConf
.createOptional
private[spark] val PYSPARK_PYTHON = ConfigBuilder("spark.pyspark.python")
.stringConf
.createOptional
// To limit how many applications are shown in the History Server summary ui
private[spark] val HISTORY_UI_MAX_APPS =
ConfigBuilder("spark.history.ui.maxApplications").intConf.createWithDefault(Integer.MAX_VALUE)
private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val IO_ENCRYPTION_KEYGEN_ALGORITHM =
ConfigBuilder("spark.io.encryption.keygen.algorithm")
.stringConf
.createWithDefault("HmacSHA1")
private[spark] val IO_ENCRYPTION_KEY_SIZE_BITS = ConfigBuilder("spark.io.encryption.keySizeBits")
.intConf
.checkValues(Set(128, 192, 256))
.createWithDefault(128)
private[spark] val IO_CRYPTO_CIPHER_TRANSFORMATION =
ConfigBuilder("spark.io.crypto.cipher.transformation")
.internal()
.stringConf
.createWithDefaultString("AES/CTR/NoPadding")
private[spark] val DRIVER_HOST_ADDRESS = ConfigBuilder("spark.driver.host")
.doc("Address of driver endpoints.")
.stringConf
.createWithDefault(Utils.localCanonicalHostName())
private[spark] val DRIVER_PORT = ConfigBuilder("spark.driver.port")
.doc("Port of driver endpoints.")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_SUPERVISE = ConfigBuilder("spark.driver.supervise")
.doc("If true, restarts the driver automatically if it fails with a non-zero exit status. " +
"Only has effect in Spark standalone mode or Mesos cluster deploy mode.")
.booleanConf
.createWithDefault(false)
private[spark] val DRIVER_BIND_ADDRESS = ConfigBuilder("spark.driver.bindAddress")
.doc("Address where to bind network listen sockets on the driver.")
.fallbackConf(DRIVER_HOST_ADDRESS)
private[spark] val BLOCK_MANAGER_PORT = ConfigBuilder("spark.blockManager.port")
.doc("Port to use for the block manager when a more specific setting is not provided.")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_BLOCK_MANAGER_PORT = ConfigBuilder("spark.driver.blockManager.port")
.doc("Port to use for the block manager on the driver.")
.fallbackConf(BLOCK_MANAGER_PORT)
private[spark] val IGNORE_CORRUPT_FILES = ConfigBuilder("spark.files.ignoreCorruptFiles")
.doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " +
"encountering corrupted or non-existing files and contents that have been read will still " +
"be returned.")
.booleanConf
.createWithDefault(false)
private[spark] val IGNORE_MISSING_FILES = ConfigBuilder("spark.files.ignoreMissingFiles")
.doc("Whether to ignore missing files. If true, the Spark jobs will continue to run when " +
"encountering missing files and the contents that have been read will still be returned.")
.booleanConf
.createWithDefault(false)
private[spark] val APP_CALLER_CONTEXT = ConfigBuilder("spark.log.callerContext")
.stringConf
.createOptional
private[spark] val FILES_MAX_PARTITION_BYTES = ConfigBuilder("spark.files.maxPartitionBytes")
.doc("The maximum number of bytes to pack into a single partition when reading files.")
.longConf
.createWithDefault(128 * 1024 * 1024)
private[spark] val FILES_OPEN_COST_IN_BYTES = ConfigBuilder("spark.files.openCostInBytes")
.doc("The estimated cost to open a file, measured by the number of bytes could be scanned in" +
" the same time. This is used when putting multiple files into a partition. It's better to" +
" over estimate, then the partitions with small files will be faster than partitions with" +
" bigger files.")
.longConf
.createWithDefault(4 * 1024 * 1024)
private[spark] val HADOOP_RDD_IGNORE_EMPTY_SPLITS =
ConfigBuilder("spark.hadoopRDD.ignoreEmptySplits")
.internal()
.doc("When true, HadoopRDD/NewHadoopRDD will not create partitions for empty input splits.")
.booleanConf
.createWithDefault(false)
private[spark] val SECRET_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.regex")
.doc("Regex to decide which Spark configuration properties and environment variables in " +
"driver and executor environments contain sensitive information. When this regex matches " +
"a property key or value, the value is redacted from the environment UI and various logs " +
"like YARN and event logs.")
.regexConf
.createWithDefault("(?i)secret|password|token".r)
private[spark] val STRING_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.string.regex")
.doc("Regex to decide which parts of strings produced by Spark contain sensitive " +
"information. When this regex matches a string part, that string part is replaced by a " +
"dummy value. This is currently used to redact the output of SQL explain commands.")
.regexConf
.createOptional
private[spark] val AUTH_SECRET =
ConfigBuilder("spark.authenticate.secret")
.stringConf
.createOptional
private[spark] val AUTH_SECRET_BIT_LENGTH =
ConfigBuilder("spark.authenticate.secretBitLength")
.intConf
.createWithDefault(256)
private[spark] val NETWORK_AUTH_ENABLED =
ConfigBuilder("spark.authenticate")
.booleanConf
.createWithDefault(false)
private[spark] val SASL_ENCRYPTION_ENABLED =
ConfigBuilder("spark.authenticate.enableSaslEncryption")
.booleanConf
.createWithDefault(false)
private[spark] val AUTH_SECRET_FILE =
ConfigBuilder("spark.authenticate.secret.file")
.doc("Path to a file that contains the authentication secret to use. The secret key is " +
"loaded from this path on both the driver and the executors if overrides are not set for " +
"either entity (see below). File-based secret keys are only allowed when using " +
"Kubernetes.")
.stringConf
.createOptional
private[spark] val AUTH_SECRET_FILE_DRIVER =
ConfigBuilder("spark.authenticate.secret.driver.file")
.doc("Path to a file that contains the authentication secret to use. Loaded by the " +
"driver. In Kubernetes client mode it is often useful to set a different secret " +
"path for the driver vs. the executors, since the driver may not be running in " +
"a pod unlike the executors. If this is set, an accompanying secret file must " +
"be specified for the executors. The fallback configuration allows the same path to be " +
"used for both the driver and the executors when running in cluster mode. File-based " +
"secret keys are only allowed when using Kubernetes.")
.fallbackConf(AUTH_SECRET_FILE)
private[spark] val AUTH_SECRET_FILE_EXECUTOR =
ConfigBuilder("spark.authenticate.secret.executor.file")
.doc("Path to a file that contains the authentication secret to use. Loaded by the " +
"executors only. In Kubernetes client mode it is often useful to set a different " +
"secret path for the driver vs. the executors, since the driver may not be running " +
"in a pod unlike the executors. If this is set, an accompanying secret file must be " +
"specified for the executors. The fallback configuration allows the same path to be " +
"used for both the driver and the executors when running in cluster mode. File-based " +
"secret keys are only allowed when using Kubernetes.")
.fallbackConf(AUTH_SECRET_FILE)
private[spark] val BUFFER_WRITE_CHUNK_SIZE =
ConfigBuilder("spark.buffer.write.chunkSize")
.internal()
.doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
"The chunk size during writing out the bytes of ChunkedByteBuffer should" +
s" be less than or equal to ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(64 * 1024 * 1024)
private[spark] val CHECKPOINT_COMPRESS =
ConfigBuilder("spark.checkpoint.compress")
.doc("Whether to compress RDD checkpoints. Generally a good idea. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_ACCURATE_BLOCK_THRESHOLD =
ConfigBuilder("spark.shuffle.accurateBlockThreshold")
.doc("Threshold in bytes above which the size of shuffle blocks in " +
"HighlyCompressedMapStatus is accurately recorded. This helps to prevent OOM " +
"by avoiding underestimating shuffle block size when fetch shuffle blocks.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(100 * 1024 * 1024)
private[spark] val SHUFFLE_REGISTRATION_TIMEOUT =
ConfigBuilder("spark.shuffle.registration.timeout")
.doc("Timeout in milliseconds for registration to the external shuffle service.")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(5000)
private[spark] val SHUFFLE_REGISTRATION_MAX_ATTEMPTS =
ConfigBuilder("spark.shuffle.registration.maxAttempts")
.doc("When we fail to register to the external shuffle service, we will " +
"retry for maxAttempts times.")
.intConf
.createWithDefault(3)
private[spark] val REDUCER_MAX_BLOCKS_IN_FLIGHT_PER_ADDRESS =
ConfigBuilder("spark.reducer.maxBlocksInFlightPerAddress")
.doc("This configuration limits the number of remote blocks being fetched per reduce task " +
"from a given host port. When a large number of blocks are being requested from a given " +
"address in a single fetch or simultaneously, this could crash the serving executor or " +
"Node Manager. This is especially useful to reduce the load on the Node Manager when " +
"external shuffle is enabled. You can mitigate the issue by setting it to a lower value.")
.intConf
.checkValue(_ > 0, "The max no. of blocks in flight cannot be non-positive.")
.createWithDefault(Int.MaxValue)
private[spark] val MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM =
ConfigBuilder("spark.maxRemoteBlockSizeFetchToMem")
.doc("Remote block will be fetched to disk when size of the block is above this threshold " +
"in bytes. This is to avoid a giant request takes too much memory. Note this " +
"configuration will affect both shuffle fetch and block manager remote block fetch. " +
"For users who enabled external shuffle service, this feature can only work when " +
"external shuffle service is at least 2.3.0.")
.bytesConf(ByteUnit.BYTE)
// fetch-to-mem is guaranteed to fail if the message is bigger than 2 GB, so we might
// as well use fetch-to-disk in that case. The message includes some metadata in addition
// to the block data itself (in particular UploadBlock has a lot of metadata), so we leave
// extra room.
.checkValue(
_ <= Int.MaxValue - 512,
"maxRemoteBlockSizeFetchToMem cannot be larger than (Int.MaxValue - 512) bytes.")
.createWithDefaultString("200m")
private[spark] val TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES =
ConfigBuilder("spark.taskMetrics.trackUpdatedBlockStatuses")
.doc("Enable tracking of updatedBlockStatuses in the TaskMetrics. Off by default since " +
"tracking the block statuses can use a lot of memory and its not used anywhere within " +
"spark.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_FILE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.file.buffer")
.doc("Size of the in-memory buffer for each shuffle file output stream, in KiB unless " +
"otherwise specified. These buffers reduce the number of disk seeks and system calls " +
"made in creating intermediate shuffle files.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The file buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_UNSAFE_FILE_OUTPUT_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.unsafe.file.output.buffer")
.doc("The file system for this buffer size after each partition " +
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_DISK_WRITE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.spill.diskWriteBufferSize")
.doc("The buffer size, in bytes, to use when writing the sorted records to an on-disk file.")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v > 12 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
s"The buffer size must be greater than 12 and less than or equal to " +
s"${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(1024 * 1024)
private[spark] val UNROLL_MEMORY_CHECK_PERIOD =
ConfigBuilder("spark.storage.unrollMemoryCheckPeriod")
.internal()
.doc("The memory check period is used to determine how often we should check whether "
+ "there is a need to request more memory when we try to unroll the given block in memory.")
.longConf
.createWithDefault(16)
private[spark] val UNROLL_MEMORY_GROWTH_FACTOR =
ConfigBuilder("spark.storage.unrollMemoryGrowthFactor")
.internal()
.doc("Memory to request as a multiple of the size that used to unroll the block.")
.doubleConf
.createWithDefault(1.5)
private[spark] val FORCE_DOWNLOAD_SCHEMES =
ConfigBuilder("spark.yarn.dist.forceDownloadSchemes")
.doc("Comma-separated list of schemes for which resources will be downloaded to the " +
"local disk prior to being added to YARN's distributed cache. For use in cases " +
"where the YARN service does not support schemes that are supported by Spark, like http, " +
"https and ftp, or jars required to be in the local YARN client's classpath. Wildcard " +
"'*' is denoted to download resources for all the schemes.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val EXTRA_LISTENERS = ConfigBuilder("spark.extraListeners")
.doc("Class names of listeners to add to SparkContext during initialization.")
.stringConf
.toSequence
.createOptional
private[spark] val SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD =
ConfigBuilder("spark.shuffle.spill.numElementsForceSpillThreshold")
.internal()
.doc("The maximum number of elements in memory before forcing the shuffle sorter to spill. " +
"By default it's Integer.MAX_VALUE, which means we never force the sorter to spill, " +
"until we reach some limitations, like the max page size limitation for the pointer " +
"array in the sorter.")
.intConf
.createWithDefault(Integer.MAX_VALUE)
private[spark] val SHUFFLE_MAP_OUTPUT_PARALLEL_AGGREGATION_THRESHOLD =
ConfigBuilder("spark.shuffle.mapOutput.parallelAggregationThreshold")
.internal()
.doc("Multi-thread is used when the number of mappers * shuffle partitions is greater than " +
"or equal to this threshold. Note that the actual parallelism is calculated by number of " +
"mappers * shuffle partitions / this threshold + 1, so this threshold should be positive.")
.intConf
.checkValue(v => v > 0, "The threshold should be positive.")
.createWithDefault(10000000)
private[spark] val MAX_RESULT_SIZE = ConfigBuilder("spark.driver.maxResultSize")
.doc("Size limit for results.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("1g")
private[spark] val CREDENTIALS_RENEWAL_INTERVAL_RATIO =
ConfigBuilder("spark.security.credentials.renewalRatio")
.doc("Ratio of the credential's expiration time when Spark should fetch new credentials.")
.doubleConf
.createWithDefault(0.75d)
private[spark] val CREDENTIALS_RENEWAL_RETRY_WAIT =
ConfigBuilder("spark.security.credentials.retryWait")
.doc("How long to wait before retrying to fetch new credentials after a failure.")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1h")
private[spark] val SHUFFLE_SORT_INIT_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.sort.initialBufferSize")
.internal()
.intConf
.checkValue(v => v > 0, "The value should be a positive integer.")
.createWithDefault(4096)
private[spark] val SHUFFLE_COMPRESS =
ConfigBuilder("spark.shuffle.compress")
.doc("Whether to compress shuffle output. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SPILL_COMPRESS =
ConfigBuilder("spark.shuffle.spill.compress")
.doc("Whether to compress data spilled during shuffles. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SPILL_INITIAL_MEM_THRESHOLD =
ConfigBuilder("spark.shuffle.spill.initialMemoryThreshold")
.internal()
.doc("Initial threshold for the size of a collection before we start tracking its " +
"memory usage.")
.longConf
.createWithDefault(5 * 1024 * 1024)
private[spark] val SHUFFLE_SPILL_BATCH_SIZE =
ConfigBuilder("spark.shuffle.spill.batchSize")
.internal()
.doc("Size of object batches when reading/writing from serializers.")
.longConf
.createWithDefault(10000)
private[spark] val SHUFFLE_SORT_BYPASS_MERGE_THRESHOLD =
ConfigBuilder("spark.shuffle.sort.bypassMergeThreshold")
.doc("In the sort-based shuffle manager, avoid merge-sorting data if there is no " +
"map-side aggregation and there are at most this many reduce partitions")
.intConf
.createWithDefault(200)
private[spark] val SHUFFLE_MANAGER =
ConfigBuilder("spark.shuffle.manager")
.stringConf
.createWithDefault("sort")
private[spark] val SHUFFLE_REDUCE_LOCALITY_ENABLE =
ConfigBuilder("spark.shuffle.reduceLocality.enabled")
.doc("Whether to compute locality preferences for reduce tasks")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_MAPOUTPUT_MIN_SIZE_FOR_BROADCAST =
ConfigBuilder("spark.shuffle.mapOutput.minSizeForBroadcast")
.doc("The size at which we use Broadcast to send the map output statuses to the executors.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("512k")
private[spark] val SHUFFLE_MAPOUTPUT_DISPATCHER_NUM_THREADS =
ConfigBuilder("spark.shuffle.mapOutput.dispatcher.numThreads")
.intConf
.createWithDefault(8)
private[spark] val SHUFFLE_DETECT_CORRUPT =
ConfigBuilder("spark.shuffle.detectCorrupt")
.doc("Whether to detect any corruption in fetched blocks.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SYNC =
ConfigBuilder("spark.shuffle.sync")
.doc("Whether to force outstanding writes to disk.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_UNDAFE_FAST_MERGE_ENABLE =
ConfigBuilder("spark.shuffle.unsafe.fastMergeEnabled")
.doc("Whether to perform a fast spill merge.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SORT_USE_RADIXSORT =
ConfigBuilder("spark.shuffle.sort.useRadixSort")
.doc("Whether to use radix sort for sorting in-memory partition ids. Radix sort is much " +
"faster, but requires additional memory to be reserved memory as pointers are added.")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_MIN_NUM_PARTS_TO_HIGHLY_COMPRESS =
ConfigBuilder("spark.shuffle.minNumPartitionsToHighlyCompress")
.internal()
.doc("Number of partitions to determine if MapStatus should use HighlyCompressedMapStatus")
.intConf
.checkValue(v => v > 0, "The value should be a positive integer.")
.createWithDefault(2000)
private[spark] val MEMORY_MAP_LIMIT_FOR_TESTS =
ConfigBuilder("spark.storage.memoryMapLimitForTests")
.internal()
.doc("For testing only, controls the size of chunks when memory mapping a file")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH)
private[spark] val BARRIER_SYNC_TIMEOUT =
ConfigBuilder("spark.barrier.sync.timeout")
.doc("The timeout in seconds for each barrier() call from a barrier task. If the " +
"coordinator didn't receive all the sync messages from barrier tasks within the " +
"configed time, throw a SparkException to fail all the tasks. The default value is set " +
"to 31536000(3600 * 24 * 365) so the barrier() call shall wait for one year.")
.timeConf(TimeUnit.SECONDS)
.checkValue(v => v > 0, "The value should be a positive time value.")
.createWithDefaultString("365d")
private[spark] val UNSCHEDULABLE_TASKSET_TIMEOUT =
ConfigBuilder("spark.scheduler.blacklist.unschedulableTaskSetTimeout")
.doc("The timeout in seconds to wait to acquire a new executor and schedule a task " +
"before aborting a TaskSet which is unschedulable because of being completely blacklisted.")
.timeConf(TimeUnit.SECONDS)
.checkValue(v => v >= 0, "The value should be a non negative time value.")
.createWithDefault(120)
private[spark] val BARRIER_MAX_CONCURRENT_TASKS_CHECK_INTERVAL =
ConfigBuilder("spark.scheduler.barrier.maxConcurrentTasksCheck.interval")
.doc("Time in seconds to wait between a max concurrent tasks check failure and the next " +
"check. A max concurrent tasks check ensures the cluster can launch more concurrent " +
"tasks than required by a barrier stage on job submitted. The check can fail in case " +
"a cluster has just started and not enough executors have registered, so we wait for a " +
"little while and try to perform the check again. If the check fails more than a " +
"configured max failure times for a job then fail current job submission. Note this " +
"config only applies to jobs that contain one or more barrier stages, we won't perform " +
"the check on non-barrier jobs.")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("15s")
private[spark] val BARRIER_MAX_CONCURRENT_TASKS_CHECK_MAX_FAILURES =
ConfigBuilder("spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures")
.doc("Number of max concurrent tasks check failures allowed before fail a job submission. " +
"A max concurrent tasks check ensures the cluster can launch more concurrent tasks than " +
"required by a barrier stage on job submitted. The check can fail in case a cluster " +
"has just started and not enough executors have registered, so we wait for a little " +
"while and try to perform the check again. If the check fails more than a configured " +
"max failure times for a job then fail current job submission. Note this config only " +
"applies to jobs that contain one or more barrier stages, we won't perform the check on " +
"non-barrier jobs.")
.intConf
.checkValue(v => v > 0, "The max failures should be a positive value.")
.createWithDefault(40)
private[spark] val UNSAFE_EXCEPTION_ON_MEMORY_LEAK =
ConfigBuilder("spark.unsafe.exceptionOnMemoryLeak")
.internal()
.booleanConf
.createWithDefault(false)
private[spark] val UNSAFE_SORTER_SPILL_READ_AHEAD_ENABLED =
ConfigBuilder("spark.unsafe.sorter.spill.read.ahead.enabled")
.internal()
.booleanConf
.createWithDefault(true)
private[spark] val UNSAFE_SORTER_SPILL_READER_BUFFER_SIZE =
ConfigBuilder("spark.unsafe.sorter.spill.reader.buffer.size")
.internal()
.bytesConf(ByteUnit.BYTE)
.checkValue(v => 1024 * 1024 <= v && v <= MAX_BUFFER_SIZE_BYTES,
s"The value must be in allowed range [1,048,576, ${MAX_BUFFER_SIZE_BYTES}].")
.createWithDefault(1024 * 1024)
private[spark] val EXECUTOR_PLUGINS =
ConfigBuilder("spark.executor.plugins")
.doc("Comma-separated list of class names for \"plugins\" implementing " +
"org.apache.spark.ExecutorPlugin. Plugins have the same privileges as any task " +
"in a Spark executor. They can also interfere with task execution and fail in " +
"unexpected ways. So be sure to only use this for trusted plugins.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val CLEANER_PERIODIC_GC_INTERVAL =
ConfigBuilder("spark.cleaner.periodicGC.interval")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("30min")
private[spark] val CLEANER_REFERENCE_TRACKING =
ConfigBuilder("spark.cleaner.referenceTracking")
.booleanConf
.createWithDefault(true)
private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING =
ConfigBuilder("spark.cleaner.referenceTracking.blocking")
.booleanConf
.createWithDefault(true)
private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING_SHUFFLE =
ConfigBuilder("spark.cleaner.referenceTracking.blocking.shuffle")
.booleanConf
.createWithDefault(false)
private[spark] val CLEANER_REFERENCE_TRACKING_CLEAN_CHECKPOINTS =
ConfigBuilder("spark.cleaner.referenceTracking.cleanCheckpoints")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_LOGS_ROLLING_STRATEGY =
ConfigBuilder("spark.executor.logs.rolling.strategy").stringConf.createWithDefault("")
private[spark] val EXECUTOR_LOGS_ROLLING_TIME_INTERVAL =
ConfigBuilder("spark.executor.logs.rolling.time.interval").stringConf.createWithDefault("daily")
private[spark] val EXECUTOR_LOGS_ROLLING_MAX_SIZE =
ConfigBuilder("spark.executor.logs.rolling.maxSize")
.stringConf
.createWithDefault((1024 * 1024).toString)
private[spark] val EXECUTOR_LOGS_ROLLING_MAX_RETAINED_FILES =
ConfigBuilder("spark.executor.logs.rolling.maxRetainedFiles").intConf.createWithDefault(-1)
private[spark] val EXECUTOR_LOGS_ROLLING_ENABLE_COMPRESSION =
ConfigBuilder("spark.executor.logs.rolling.enableCompression")
.booleanConf
.createWithDefault(false)
private[spark] val MASTER_REST_SERVER_ENABLED = ConfigBuilder("spark.master.rest.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val MASTER_REST_SERVER_PORT = ConfigBuilder("spark.master.rest.port")
.intConf
.createWithDefault(6066)
private[spark] val MASTER_UI_PORT = ConfigBuilder("spark.master.ui.port")
.intConf
.createWithDefault(8080)
private[spark] val IO_COMPRESSION_SNAPPY_BLOCKSIZE =
ConfigBuilder("spark.io.compression.snappy.blockSize")
.doc("Block size in bytes used in Snappy compression, in the case when " +
"Snappy compression codec is used. Lowering this block size " +
"will also lower shuffle memory usage when Snappy is used")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_LZ4_BLOCKSIZE =
ConfigBuilder("spark.io.compression.lz4.blockSize")
.doc("Block size in bytes used in LZ4 compression, in the case when LZ4 compression" +
"codec is used. Lowering this block size will also lower shuffle memory " +
"usage when LZ4 is used.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_CODEC =
ConfigBuilder("spark.io.compression.codec")
.doc("The codec used to compress internal data such as RDD partitions, event log, " +
"broadcast variables and shuffle outputs. By default, Spark provides four codecs: " +
"lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify " +
"the codec")
.stringConf
.createWithDefaultString("lz4")
private[spark] val IO_COMPRESSION_ZSTD_BUFFERSIZE =
ConfigBuilder("spark.io.compression.zstd.bufferSize")
.doc("Buffer size in bytes used in Zstd compression, in the case when Zstd " +
"compression codec is used. Lowering this size will lower the shuffle " +
"memory usage when Zstd is used, but it might increase the compression " +
"cost because of excessive JNI call overhead")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_ZSTD_LEVEL =
ConfigBuilder("spark.io.compression.zstd.level")
.doc("Compression level for Zstd compression codec. Increasing the compression " +
"level will result in better compression at the expense of more CPU and memory")
.intConf
.createWithDefault(1)
private[spark] val BUFFER_SIZE =
ConfigBuilder("spark.buffer.size")
.intConf
.createWithDefault(65536)
private[spark] val LOCALITY_WAIT_PROCESS = ConfigBuilder("spark.locality.wait.process")
.fallbackConf(LOCALITY_WAIT)
private[spark] val LOCALITY_WAIT_NODE = ConfigBuilder("spark.locality.wait.node")
.fallbackConf(LOCALITY_WAIT)
private[spark] val LOCALITY_WAIT_RACK = ConfigBuilder("spark.locality.wait.rack")
.fallbackConf(LOCALITY_WAIT)
private[spark] val REDUCER_MAX_SIZE_IN_FLIGHT = ConfigBuilder("spark.reducer.maxSizeInFlight")
.doc("Maximum size of map outputs to fetch simultaneously from each reduce task, " +
"in MiB unless otherwise specified. Since each output requires us to create a " +
"buffer to receive it, this represents a fixed memory overhead per reduce task, " +
"so keep it small unless you have a large amount of memory")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("48m")
private[spark] val REDUCER_MAX_REQS_IN_FLIGHT = ConfigBuilder("spark.reducer.maxReqsInFlight")
.doc("This configuration limits the number of remote requests to fetch blocks at " +
"any given point. When the number of hosts in the cluster increase, " +
"it might lead to very large number of inbound connections to one or more nodes, " +
"causing the workers to fail under load. By allowing it to limit the number of " +
"fetch requests, this scenario can be mitigated")
.intConf
.createWithDefault(Int.MaxValue)
private[spark] val BROADCAST_COMPRESS = ConfigBuilder("spark.broadcast.compress")
.doc("Whether to compress broadcast variables before sending them. " +
"Generally a good idea. Compression will use spark.io.compression.codec")
.booleanConf.createWithDefault(true)
private[spark] val BROADCAST_BLOCKSIZE = ConfigBuilder("spark.broadcast.blockSize")
.doc("Size of each piece of a block for TorrentBroadcastFactory, in " +
"KiB unless otherwise specified. Too large a value decreases " +
"parallelism during broadcast (makes it slower); however, " +
"if it is too small, BlockManager might take a performance hit")
.bytesConf(ByteUnit.KiB)
.createWithDefaultString("4m")
private[spark] val BROADCAST_CHECKSUM = ConfigBuilder("spark.broadcast.checksum")
.doc("Whether to enable checksum for broadcast. If enabled, " +
"broadcasts will include a checksum, which can help detect " +
"corrupted blocks, at the cost of computing and sending a little " +
"more data. It's possible to disable it if the network has other " +
"mechanisms to guarantee data won't be corrupted during broadcast")
.booleanConf.createWithDefault(true)
private[spark] val RDD_COMPRESS = ConfigBuilder("spark.rdd.compress")
.doc("Whether to compress serialized RDD partitions " +
"(e.g. for StorageLevel.MEMORY_ONLY_SER in Scala " +
"or StorageLevel.MEMORY_ONLY in Python). Can save substantial " +
"space at the cost of some extra CPU time. " +
"Compression will use spark.io.compression.codec")
.booleanConf.createWithDefault(false)
private[spark] val RDD_PARALLEL_LISTING_THRESHOLD =
ConfigBuilder("spark.rdd.parallelListingThreshold")
.intConf
.createWithDefault(10)
private[spark] val RDD_LIMIT_SCALE_UP_FACTOR =
ConfigBuilder("spark.rdd.limit.scaleUpFactor")
.intConf
.createWithDefault(4)
private[spark] val SERIALIZER = ConfigBuilder("spark.serializer")
.stringConf
.createWithDefault("org.apache.spark.serializer.JavaSerializer")
private[spark] val SERIALIZER_OBJECT_STREAM_RESET =
ConfigBuilder("spark.serializer.objectStreamReset")
.intConf
.createWithDefault(100)
private[spark] val SERIALIZER_EXTRA_DEBUG_INFO = ConfigBuilder("spark.serializer.extraDebugInfo")
.booleanConf
.createWithDefault(true)
private[spark] val JARS = ConfigBuilder("spark.jars")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val FILES = ConfigBuilder("spark.files")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val SUBMIT_DEPLOY_MODE = ConfigBuilder("spark.submit.deployMode")
.stringConf
.createWithDefault("client")
private[spark] val SUBMIT_PYTHON_FILES = ConfigBuilder("spark.submit.pyFiles")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val SCHEDULER_ALLOCATION_FILE =
ConfigBuilder("spark.scheduler.allocation.file")
.stringConf
.createOptional
private[spark] val SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO =
ConfigBuilder("spark.scheduler.minRegisteredResourcesRatio")
.doubleConf
.createOptional
private[spark] val SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME =
ConfigBuilder("spark.scheduler.maxRegisteredResourcesWaitingTime")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("30s")
private[spark] val SCHEDULER_MODE =
ConfigBuilder("spark.scheduler.mode")
.stringConf
.createWithDefault(SchedulingMode.FIFO.toString)
private[spark] val SCHEDULER_REVIVE_INTERVAL =
ConfigBuilder("spark.scheduler.revive.interval")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val SPECULATION_ENABLED =
ConfigBuilder("spark.speculation")
.booleanConf
.createWithDefault(false)
private[spark] val SPECULATION_INTERVAL =
ConfigBuilder("spark.speculation.interval")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(100)
private[spark] val SPECULATION_MULTIPLIER =
ConfigBuilder("spark.speculation.multiplier")
.doubleConf
.createWithDefault(1.5)
private[spark] val SPECULATION_QUANTILE =
ConfigBuilder("spark.speculation.quantile")
.doubleConf
.createWithDefault(0.75)
private[spark] val STAGING_DIR = ConfigBuilder("spark.yarn.stagingDir")
.doc("Staging directory used while submitting applications.")
.stringConf
.createOptional
}
|
yanboliang/spark
|
core/src/main/scala/org/apache/spark/internal/config/package.scala
|
Scala
|
apache-2.0
| 57,215 |
package ch.epfl.lamp.odds
import org.scalatest.FlatSpecLike
import org.scalatest.Matchers
import inference._
trait CListOddsLang extends OddsLang {
import Rand.ToScalaMonadic
// A list where the tail itself is a random var.
sealed abstract class CList[+A] {
def length: Rand[Int] = this match {
case CNil => 0
case CCons(hd, tl) => tl.length + 1
}
def isEmpty: Boolean = this match {
case CNil => true
case CCons(hd, tl) => false
}
def splitAt(n: Int): Rand[(CList[A], CList[A])] = {
if (n==0) always((CNil, this)) else this match {
case CCons(head, tail) =>
for (tl <- tail; (s1, s2) <- tl.splitAt(n-1))
yield (CCons(head, always(s1)), s2)
}
}
def uniformSplit: Rand[(CList[A], CList[A])] =
for (l <- length; i <- uniform(0 to l: _*); r <- splitAt(i))
yield r
}
case object CNil extends CList[Nothing]
case class CCons[+A](head: A, taill: Rand[CList[A]]) extends CList[A]
def infix_head[A](rs: Rand[CList[A]]): Rand[A] =
for (s <- rs) yield (s match {
case CCons(head, tail) => head
})
def infix_tail[A](rs: Rand[CList[A]]): Rand[CList[A]] =
for (s <- rs; tl <- s match {
case CCons(head, tail) => tail
}) yield tl
def nil[A]: Rand[CList[A]] = always(CNil)
type PTransform[A] = CList[A] => Rand[CList[A]]
def lmap[A](f: A => Rand[A]): PTransform[A] = x => x match {
case CNil => nil
case CCons(head, tail) =>
for (hd <- f(head); tl <- tail) yield CCons(hd, lmap(f)(tl))
}
def lappend[A](xs: Rand[CList[A]], ys: Rand[CList[A]]): Rand[CList[A]] = xs flatMap {
case CNil => ys
case CCons(head, tail) => always(CCons(head, lappend(tail, ys)))
}
def lobserve[A](s: Rand[CList[A]], l: List[A]): Rand[Boolean] = l match {
case Nil => true when s.isEmpty
case hd::tl =>
s flatMap {
case CNil => never //always(false)
case CCons(rhd, rtail) =>
if (rhd == hd) lobserve(rtail, tl)
else never //always(false)
}
}
def asCList[A](x: List[A]): CList[A] = x match {
case Nil => CNil
case x::xs => CCons(x, always(asCList(xs)))
}
def asLists[A](x: Rand[CList[A]]): Rand[List[A]] = x flatMap {
case CNil => always(Nil)
case CCons(x, xs) => asLists(xs).map(xs=>x::xs)
}
}
// Translated from
// http://okmij.org/ftp/kakuritu/index.html#music
// http://okmij.org/ftp/kakuritu/music1a.ml
trait CMusicWarmUpModel extends CListOddsLang with Notes {
import Rand.ToScalaMonadic
// Note Transformations
// Transpose a note by 1 interval
def transpose1(n: Note) = n match {
case C => choose(Csharp -> 0.3, D -> 0.6, Dsharp -> 0.1)
case Csharp => choose(D -> 0.4, Dsharp -> 0.6)
case D => choose(Dsharp -> 0.3, E -> 0.7)
case Dsharp => choose(E -> 0.7, F -> 0.3)
case E => choose(F -> 0.6, Fsharp -> 0.4)
case F => choose(Fsharp -> 0.3, G -> 0.6, Gsharp -> 0.1)
case Fsharp => choose(G -> 0.4, Gsharp -> 0.6)
case G => choose(Gsharp -> 0.3, A -> 0.6, Asharp -> 0.1)
case Gsharp => choose(A -> 0.4, Asharp -> 0.6)
case A => choose(Asharp -> 0.3, B -> 0.7)
case Asharp => choose(B -> 0.7, C -> 0.3)
case B => choose(C -> 0.6, Csharp -> 0.4)
}
// Transpose a note by 5 intervals
def transpose5(n: Note) = n match {
case C => choose(F -> 0.3, Fsharp -> 0.1, G -> 0.55, Gsharp -> 0.05)
case Csharp => choose(Fsharp -> 0.3, G -> 0.4, Gsharp -> 0.3)
case D => choose(G -> 0.3, Gsharp -> 0.1, A -> 0.55, Asharp -> 0.05)
case Dsharp => choose(Gsharp -> 0.3, A -> 0.4, Asharp -> 0.3)
case E => choose(A -> 0.3, Asharp -> 0.1, B -> 0.55, C -> 0.05)
case F => choose(Asharp -> 0.1, B -> 0.2, C -> 0.6, Csharp -> 0.1)
case Fsharp => choose(B -> 0.3, C -> 0.4, Csharp -> 0.3)
case G => choose(C -> 0.3, Csharp -> 0.1, D -> 0.55, Dsharp -> 0.05)
case Gsharp => choose(Csharp -> 0.3, D -> 0.4, Dsharp -> 0.3)
case A => choose(D -> 0.3, Dsharp -> 0.1, E -> 0.55, F -> 0.05)
case Asharp => choose(Dsharp -> 0.3, E -> 0.3, F -> 0.4)
case B => choose(E -> 0.3, F -> 0.3, Fsharp -> 0.3, G -> 0.1)
}
val f_ide: PTransform[Note] = x => always(x)
val f_del: PTransform[Note] = x => nil
val f_tr1: PTransform[Note] = lmap(transpose1)
val f_tr5: PTransform[Note] = lmap(transpose5)
val transform: PTransform[Note] = x => x match {
case CNil => nil
case CCons(headd, tail) =>
for (
input <- always(x);
f1 <- choose(
f_ide -> 0.5,
f_del -> 0.2,
f_tr1 -> 0.2,
f_tr5 -> 0.1);
f2 <- choose(
f_ide -> 0.5,
f_del -> 0.2,
f_tr1 -> 0.2,
f_tr5 -> 0.1);
s <- input.uniformSplit;
r <- lappend(f1(s._1), f2(s._2)))
yield r
}
def main = {
val input = asCList(List(A, B, C))
val x = transform(input)
x.tail.head when (x.length == 3 && x.head == Asharp)
}
}
class CMusicWarmUpModelExactTest
extends CMusicWarmUpModel
with ExactInference
with OddsPrettyPrint
with FlatSpecLike
with Matchers {
behavior of "CMusicWarmUpModel with Exact Inference"
it should "show the results of exactly inferring main" in {
val r = normalize(reify(main))
show(r, "exact main")
r.toMap foreach {
case (G, p) => p should be (0.002777777 +- 1e-9)
case (Fsharp, p) => p should be (0.008333333 +- 1e-9)
case (F, p) => p should be (0.008333333 +- 1e-9)
case (E, p) => p should be (0.008333333 +- 1e-9)
case (C, p) => p should be (0.5 +- 1e-9)
case (Csharp, p) => p should be (0.333333333 +- 1e-9)
case (B, p) => p should be (0.138888888 +- 1e-9)
case x => throw new Exception("unexpected note: " + x)
}
}
}
class CMusicWarmUpModelLISTest
extends CMusicWarmUpModel
with LocalImportanceSampling
with OddsPrettyPrint
with FlatSpecLike {
behavior of "CMusicWarmUpModel with LIS"
it should "show the results of LIS inferring main" in {
val r = sample(1000, 3)(main)
show(r, "LIS exact main")
}
}
|
sstucki/odds
|
core/src/test/scala/odds/CMusicWarmUpModelTest.scala
|
Scala
|
bsd-3-clause
| 6,206 |
/*
Copyright (c) 2016, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.pilar.test.ast
import org.sireum.test.{UTestTestFramework, TestDefProvider}
import utest._
object AstJsTest extends UTestTestFramework {
def main(args: Array[String]): Unit = {
generate()
}
override lazy val provider: TestDefProvider =
new AstTestDefProvider(this)
def tests = utest.Tests {
// This uTest list is auto-generated from data in
// AstTestDefProvider
"EmptyModel" - {
test("EmptyModel")
}
"PicklingEmptyModel" - {
test("PicklingEmptyModel")
}
"PicklingModel" - {
test("PicklingModel")
}
"PicklingModelFastParsed1" - {
test("PicklingModelFastParsed1")
}
"PicklingModelFastParsed1Loc" - {
test("PicklingModelFastParsed1Loc")
}
"PicklingModelFastParsed2" - {
test("PicklingModelFastParsed2")
}
"PicklingModelFastParsed2Loc" - {
test("PicklingModelFastParsed2Loc")
}
"PicklingModelFastParsed3" - {
test("PicklingModelFastParsed3")
}
"PicklingModelFastParsed3Loc" - {
test("PicklingModelFastParsed3Loc")
}
"RewriteAnnotationId" - {
test("RewriteAnnotationId")
}
}
}
|
sireum/v3
|
pilar/js/src/test/scala/org/sireum/pilar/test/ast/AstJsTest.scala
|
Scala
|
bsd-2-clause
| 2,518 |
import org.scalatestplus.play._
import play.api.test._
import play.api.test.Helpers._
/**
* Add your spec here.
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
class ApplicationSpec extends PlaySpec with OneAppPerTest {
"Routes" should {
"send 404 on a bad request" in {
route(app, FakeRequest(GET, "/boum")).map(status(_)) mustBe Some(NOT_FOUND)
}
}
"HomeController" should {
"render the index page" in {
val home = route(app, FakeRequest(GET, "/")).get
status(home) mustBe OK
contentType(home) mustBe Some("text/html")
contentAsString(home) must include ("Your new application is ready.")
}
}
"CountController" should {
"return an increasing count" in {
contentAsString(route(app, FakeRequest(GET, "/count")).get) mustBe "0"
contentAsString(route(app, FakeRequest(GET, "/count")).get) mustBe "1"
contentAsString(route(app, FakeRequest(GET, "/count")).get) mustBe "2"
}
}
}
|
eatcodenet/trainwatch
|
tw-webapp/_archived/test/ApplicationSpec.scala
|
Scala
|
mit
| 1,045 |
package nl.codecentric.assumption.dsl.core.definition
import scala.concurrent.duration._
/**
* Created by hylke on 06/07/15.
*/
class SampleDefinition extends BaseDefinition {
"this is baseline" baseline (() => {
println("x")
})
"this is assume" assumes (() => {
println("y")
})
"this is failure" failure (() => {
println("y")
})
"this is success" success (() => {
println("y")
})
"this is time" time (60 seconds)
}
|
craftsmenlabs/gareth-poc
|
dsl/core/src/test/scala/nl/codecentric/assumption/dsl/core/definition/SampleDefinition.scala
|
Scala
|
gpl-2.0
| 460 |
import scala.collection.mutable.Map
// !< ε―η¨Arrayη±»δΌΌηζΉεΌεε»Ί
val treasureMap = Map[Int, String]()
treasureMap += (1 -> "Go to island")
treasureMap += (2 -> "Find big X on ground")
treasureMap += (3 -> "Dig.")
println(treasureMap)
// !< δΉε―δ»₯η©·δΈΎεε»Ί!
val romanNumeral = Map(1 -> "I", 2 -> "II", 3 -> "III", 4 -> "IV", 5 -> "V")
|
fangguanya/study
|
Java/scala_shell/scalamap.scala
|
Scala
|
mit
| 352 |
package com.googlecode.kanbanik.builders
import org.bson.types.ObjectId
import com.googlecode.kanbanik.model._
import com.googlecode.kanbanik.commands.TaskManipulation
import com.googlecode.kanbanik.dtos.TaskDto
class TaskBuilder extends TaskManipulation {
lazy val classOfServiceBuilder = new ClassOfServiceBuilder
def buildDto(task: Task): TaskDto = {
TaskDto(
Some(task.id.get.toString),
task.name,
{
if (task.description == null || task.description == "") {
None
} else {
Some(task.description)
}
},
{
if (task.classOfService.isDefined) {
Some(classOfServiceBuilder.buildDto(task.classOfService.get))
} else {
None
}
},
Some(task.ticketId),
task.workflowitemId.toString,
task.version,
task.projectId.toString,
{
if (task.assignee.isDefined) {
Some(UserBuilder.buildDto(task.assignee.get, ""))
} else {
None
}
},
Some(task.order),
if (task.dueData == null || task.dueData == "") {
None
} else {
Some(task.dueData)
},
{
if (task.boardId == null) {
null
} else {
task.boardId.toString
}
},
task.taskTags
)
}
def buildEntity(taskDto: TaskDto): Task = {
new Task(
{
if (!taskDto.id.isDefined) {
None
} else {
Some(new ObjectId(taskDto.id.get))
}
},
taskDto.name,
taskDto.description.getOrElse(""),
{
if (!taskDto.classOfService.isDefined) {
None
} else {
Some(classOfServiceBuilder.buildEntity(taskDto.classOfService.get))
}
},
determineTicketId(taskDto),
taskDto.version,
taskDto.order.orNull,
{
if (!taskDto.assignee.isDefined) {
None
} else {
Some(UserBuilder.buildEntity(taskDto.assignee.get))
}
},
taskDto.dueDate.getOrElse(""),
new ObjectId(taskDto.workflowitemId),
new ObjectId(taskDto.boardId),
new ObjectId(taskDto.projectId),
taskDto.taskTags
)
}
private def determineTicketId(taskDto: TaskDto): String = {
if (!taskDto.id.isDefined) {
return generateUniqueTicketId()
}
if (taskDto.id.isDefined && !taskDto.ticketId.isDefined) {
throw new IllegalStateException("The task " + taskDto.id.get + " has not set a ticket id!")
}
taskDto.ticketId.get
}
}
|
kanbanik/kanbanik
|
kanbanik-server/src/main/scala/com/googlecode/kanbanik/builders/TaskBuilder.scala
|
Scala
|
apache-2.0
| 2,492 |
package db
import anorm._
import io.flow.common.v0.models.UserReference
import io.flow.delta.v0.models.{Publication, Subscription, SubscriptionForm}
import io.flow.postgresql.{OrderBy, Query}
import io.flow.util.IdGenerator
import play.api.db._
import scala.util.{Failure, Success, Try}
@javax.inject.Singleton
class SubscriptionsDao @javax.inject.Inject() (
db: Database,
usersDao: UsersDao,
delete: Delete
) {
private[this] val BaseQuery = Query(s"""
select subscriptions.id,
subscriptions.user_id,
subscriptions.publication
from subscriptions
""")
private[this] val InsertQuery = """
insert into subscriptions
(id, user_id, publication, updated_by_user_id)
values
({id}, {user_id}, {publication}, {updated_by_user_id})
"""
private[db] def validate(
form: SubscriptionForm
): Seq[String] = {
val userErrors = usersDao.findById(form.userId) match {
case None => Seq("User not found")
case Some(_) => Nil
}
val publicationErrors = form.publication match {
case Publication.UNDEFINED(_) => Seq("Invalid publication")
case _ => Nil
}
userErrors ++ publicationErrors
}
def upsert(createdBy: UserReference, form: SubscriptionForm): Subscription = {
findByUserIdAndPublication(form.userId, form.publication).getOrElse {
Try {
create(createdBy, form) match {
case Left(errors) => sys.error(errors.mkString(", "))
case Right(sub) => sub
}
} match {
case Success(sub) => sub
case Failure(ex) => {
findByUserIdAndPublication(form.userId, form.publication).getOrElse {
throw new Exception("Failed to upsert subscription", ex)
}
}
}
}
}
def create(createdBy: UserReference, form: SubscriptionForm): Either[Seq[String], Subscription] = {
validate(form) match {
case Nil => {
val id = IdGenerator("sub").randomId()
db.withConnection { implicit c =>
SQL(InsertQuery).on(
Symbol("id") ->id,
Symbol("user_id") ->form.userId,
Symbol("publication") ->form.publication.toString,
Symbol("updated_by_user_id") ->createdBy.id
).execute()
}
Right(
findById(id).getOrElse {
sys.error("Failed to create subscription")
}
)
}
case errors => Left(errors)
}
}
def delete(deletedBy: UserReference, subscription: Subscription): Unit = {
delete.delete("subscriptions", deletedBy.id, subscription.id)
}
def findByUserIdAndPublication(
userId: String,
publication: Publication
): Option[Subscription] = {
findAll(
userId = Some(userId),
publication = Some(publication),
limit = Some(1)
).headOption
}
def findById(id: String): Option[Subscription] = {
findAll(id = Some(id), limit = Some(1)).headOption
}
def findAll(
id: Option[String] = None,
ids: Option[Seq[String]] = None,
userId: Option[String] = None,
identifier: Option[String] = None,
publication: Option[Publication] = None,
orderBy: OrderBy = OrderBy("subscriptions.created_at"),
limit: Option[Long],
offset: Long = 0
): Seq[Subscription] = {
db.withConnection { implicit c =>
Standards.query(
BaseQuery,
tableName = "subscriptions",
auth = Clause.True, // TODO
id = id,
ids = ids,
orderBy = orderBy.sql,
limit = limit,
offset = offset
).
equals("subscriptions.user_id", userId).
optionalText("subscriptions.publication", publication).
and(
identifier.map { _ =>
"subscriptions.user_id in (select user_id from user_identifiers where value = trim({identifier}))"
}
).bind("identifier", identifier).
as(
io.flow.delta.v0.anorm.parsers.Subscription.parser().*
)
}
}
}
|
flowcommerce/delta
|
api/app/db/SubscriptionsDao.scala
|
Scala
|
mit
| 3,985 |
package usbinstall.controllers
import java.net.URL
import java.util.ResourceBundle
import javafx.event.ActionEvent
import javafx.fxml.{FXML, Initializable}
import javafx.scene.control.{Button, ComboBox}
import javafx.stage.{Stage, Window}
import scala.annotation.unused
import suiryc.scala.log.LogLevel
import suiryc.scala.settings.SettingsSnapshot
import usbinstall.settings.{ErrorAction, Settings}
class OptionsController extends Initializable {
@FXML
protected var logInstallThreshold: ComboBox[LogLevel.LevelValue] = _
@FXML
protected var componentInstallError: ComboBox[ErrorAction.Value] = _
@FXML
protected var clearButton: Button = _
@FXML
protected var cancelButton: Button = _
protected val snapshot = new SettingsSnapshot()
override def initialize(fxmlFileLocation: URL, resources: ResourceBundle): Unit = {
logInstallThreshold.getItems.setAll(LogLevel.levels.toList:_*)
componentInstallError.getItems.setAll(ErrorAction.values.toList:_*)
Settings.core.snapshot(snapshot)
update()
}
protected def updateCancelButton(): Unit = {
cancelButton.setDisable(!snapshot.changed())
}
protected def update(): Unit = {
logInstallThreshold.getSelectionModel.select(Settings.core.logInstallThreshold.get)
componentInstallError.getSelectionModel.select(Settings.core.componentInstallError.get)
updateCancelButton()
}
def onLogInstallThreshold(@unused event: ActionEvent): Unit = {
Settings.core.logInstallThreshold.set(logInstallThreshold.getValue)
updateCancelButton()
}
def onComponentInstallError(@unused event: ActionEvent): Unit = {
Settings.core.componentInstallError.set(componentInstallError.getValue)
updateCancelButton()
}
def onReset(@unused event: ActionEvent): Unit = {
Settings.core.logInstallThreshold.reset()
Settings.core.componentInstallError.reset()
// Note: we need to update the pane; alternatively we could make persistent
// properties out of those persistent settings and update the control upon
// value changing.
update()
}
def onCancel(@unused event: ActionEvent): Unit = {
snapshot.reset()
update()
}
def onDone(@unused event: ActionEvent): Unit = {
window.asInstanceOf[Stage].close()
}
protected def window: Window =
logInstallThreshold.getScene.getWindow
}
|
suiryc/usbinstall
|
src/main/scala/usbinstall/controllers/OptionsController.scala
|
Scala
|
gpl-3.0
| 2,344 |
package com.googlecode.warikan.infrastructure.inject
import com.google.inject._
/**
* Injector.
*
* @author yukei
*/
object Injector {
private var _config:AbstractModule = _
/**
* Set configuration.
*
* @param config Configuration
*/
def config_= (config:AbstractModule) { _config = config }
/**
* Get instance of specified class.
*
* @param clazz Class to get instance
* @return T instance of specified class
*/
def getInstance[T](clazz:Class[T]):T = {
Guice.createInjector(_config).getInstance(clazz)
}
}
|
digitalsoul0124/warikan
|
src/main/scala/com/googlecode/warikan/infrastructure/inject/Injector.scala
|
Scala
|
mit
| 596 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2007-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.swing
import Swing._
/**
* A container with exactly two children. Arranges them side by side, either
* horizontally or vertically. Displays a draggable divider component between
* them that lets the user adjust the size ratio of the children.
*
* @see javax.swing.JSplitPane
*/
class SplitPane(o: Orientation.Value, left: Component, right: Component) extends Component with Container with Orientable.Wrapper {
override lazy val peer: javax.swing.JSplitPane =
new javax.swing.JSplitPane(o.id, left.peer, right.peer) with SuperMixin
def this(o: Orientation.Value) = this(o, new Component {}, new Component {})
def this() = this(Orientation.Horizontal)
def contents: Seq[Component] = List(leftComponent, rightComponent)
def contents_=(left: Component, right: Component) {
peer.setLeftComponent(nullPeer(left))
peer.setRightComponent(nullPeer(right))
}
def topComponent: Component =
UIElement.cachedWrapper[Component](peer.getTopComponent.asInstanceOf[javax.swing.JComponent])
def topComponent_=(c: Component) { peer.setTopComponent(nullPeer(c)) }
def bottomComponent: Component =
UIElement.cachedWrapper[Component](peer.getBottomComponent.asInstanceOf[javax.swing.JComponent])
def bottomComponent_=(c: Component) { peer.setBottomComponent(nullPeer(c)) }
def leftComponent: Component = topComponent
def leftComponent_=(c: Component) { topComponent = c }
def rightComponent: Component = bottomComponent
def rightComponent_=(c: Component) { bottomComponent = c }
def dividerLocation: Int = peer.getDividerLocation
def dividerLocation_=(n: Int) { peer.setDividerLocation(n) }
/*def proportionalDividerLocation: Double =
if (orientation == Orientation.Vertical) dividerLocation / (size.height - dividerSize)
else dividerLocation / (size.width - dividerSize)*/
def dividerLocation_=(f: Double) { peer.setDividerLocation(f) }
def dividerSize: Int = peer.getDividerSize
def dividerSize_=(n: Int) { peer.setDividerSize(n) }
def resizeWeight: Double = peer.getResizeWeight
def resizeWeight_=(n: Double) { peer.setResizeWeight(n) }
def resetToPreferredSizes() { peer.resetToPreferredSizes() }
def oneTouchExpandable: Boolean = peer.isOneTouchExpandable
def oneTouchExpandable_=(b: Boolean) { peer.setOneTouchExpandable(b) }
def continuousLayout: Boolean = peer.isContinuousLayout
def continuousLayout_=(b: Boolean) { peer.setContinuousLayout(b) }
}
|
SethTisue/scala-swing
|
src/main/scala/scala/swing/SplitPane.scala
|
Scala
|
bsd-3-clause
| 2,973 |
package me.lachlanap.oldtoby.server.helpers
import com.fasterxml.jackson.core.JsonProcessingException
import com.ning.http.client.AsyncHttpClientConfig
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.exceptions.TestFailedException
import org.scalatest.time.SpanSugar._
import play.api.libs.json.{JsNull, JsResultException, JsValue, Json}
import play.api.libs.ws.WSResponse
import play.api.libs.ws.ning.NingWSClient
import scala.concurrent.Future
/**
* Performs the HTTP to the server.
*/
class ServerInterface(address: String) extends ScalaFutures {
import scala.concurrent.ExecutionContext.Implicits.global
val timeLimit = 500.millis
implicit override val patienceConfig = PatienceConfig(timeout = timeLimit, interval = 5.millis)
private val client = {
val builder = new AsyncHttpClientConfig.Builder()
new NingWSClient(builder.build())
}
def status: Status =
get("/status") {
case (200, _) => Up()
case _ => Down
}
def createJob(name: String, pipeline: Id): Either[Int, Job] = {
val json = Json.obj("name" -> name,
"pipeline" -> pipeline.value)
post("/job", json) {
case (201, result) =>
Right(jobFrom(result))
case (e, _) => Left(e)
}
}
def getJobs: List[Job] =
get("/job") {
case (200, result) =>
result.as[List[JsValue]].map(jobFrom)
case _ => List.empty
}
def ingest(job: Id, pages: Int): List[Id] = {
val json = Json.obj("pages" -> pages)
post(s"/job/${job.value }/page", json) {
case (201, result) =>
result.as[List[String]].map(Id)
case _ => List.empty
}
}
def pushMetadata(page: Id, stage: String, metadata: Map[String, String]) = {
pushBulkMetadata((page, stage, metadata) :: Nil)
}
def pushBulkMetadata(batch: List[(Id, String, Map[String, String])]) = {
val json = Json.toJson(
batch.map { case (page, stage, metadata) =>
Json.obj("page" -> page.value,
"stage" -> stage,
"metadata" -> Json.toJson(metadata))
}
)
post(s"/metadata", json) {
case (201, _) =>
case (e, _) => throw new Exception(s"Failed to push metadata; got HTTP $e")
}
}
def getMetadata(page: Id): Either[Int, Map[String, String]] =
get(s"/metadata/${page.value }") {
case (200, result) =>
Right(result.as[Map[String, String]])
case (e, _) => Left(e)
}
private def jobFrom(js: JsValue) = Job(Id((js \ "id").as[String]),
(js \ "name").as[String],
Id((js \ "pipeline").as[String]))
private def get[A](u: String)(mapper: (Int, JsValue) => A) =
healF(() => client.url(url(u)).get().map(response(u, "GET", mapper)))
private def post[A](u: String, data: JsValue)(mapper: (Int, JsValue) => A) =
healF(() => client.url(url(u)).post(data).map(response(u, "POST", mapper)))
private def response[A](u: String, method: String, mapper: (Int, JsValue) => A) = (resp: WSResponse) => {
if (resp.body.isEmpty)
mapper(resp.status, JsNull)
else try {
mapper(resp.status, resp.json)
} catch {
case e: JsonProcessingException =>
throw new TestFailedException(s"When ${method }ing $u, failed to parse body of response:\n${resp.body }", e, 0)
case e@JsResultException(errs) =>
throw new TestFailedException(s"When ${method }ing $u, failed to parse body of response:\n${resp.body }\n$errs", e, 0)
}
}
private def healF[A](f: () => Future[A]) = {
try {
f().futureValue
} catch {
case e: TestFailedException if e.getCause.isInstanceOf[TestFailedException] => throw e.getCause
}
}
private def url(u: String) = {
val root = if (address.endsWith("/")) address else address + "/"
val sub = if (u.startsWith("/")) u.substring(1) else u
root + sub
}
}
sealed trait Status
case class DownError(why: Exception) extends Status
case object Down extends Status
case class Up() extends Status
case class Id(value: String)
case class Job(id: Id, name: String, pipeline: Id)
|
thorinii/oldtoby-server
|
acceptance/src/test/scala/me/lachlanap/oldtoby/server/helpers/ServerInterface.scala
|
Scala
|
mit
| 4,705 |
/*
* Copyright 2010 Michael Fortin <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.webapp.action.args
/**
* Document Me..
*
* @author Michael Fortin
*/
class UnknownActionArgException(msg:String) extends RuntimeException(msg)
|
m410/brzy
|
src/main/scala/org/brzy/webapp/action/args/UnknownActionArgException.scala
|
Scala
|
apache-2.0
| 772 |
package fbSpark
import org.apache.spark.SparkConf
import org.joda.time.format.DateTimeFormat
import org.joda.time.format.DateTimeFormatter
object Common{
val timeFormatter: DateTimeFormatter = DateTimeFormat
.forPattern("yyyy-MM-dd HH:mm:ss.S")
// Get default Spark configuration.
def getSparkConf(appName: String): SparkConf = {
new SparkConf()
.setAppName(appName)
.set("spark.storage.memoryFraction", "0.05")
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.set("spark.executor.cores", "2")
.set("spark.executor.memory", "3G")
.set("spark.default.parallelism", "24")
}
def printWrapper(a: Any): Unit = {
println()
println("----------")
println(a)
println("----------")
println()
Thread.sleep(1000)
}
}
|
PredictionIO/open-academy
|
MayuriSridhar/fb_spark/src/main/scala/fbSpark/Common.scala
|
Scala
|
apache-2.0
| 815 |
package org.example1_1.declaration.eta
class EtaClass {
}
|
JetBrains/intellij-scala
|
scala/scala-impl/testdata/move/allInOne_1_MoveXYZ/before/org/example1_1/declaration/eta/EtaClass.scala
|
Scala
|
apache-2.0
| 60 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package io.snappydata.benchmark.snappy.tpch
import java.io.{File, FileOutputStream, PrintStream}
import java.sql.{DriverManager, PreparedStatement}
import io.snappydata.benchmark.snappy.TPCH_Snappy
object QueryExecutionJdbc {
def main(args: Array[String]) {
val avgFileStream: FileOutputStream = new FileOutputStream(new File(s"Snappy_Average.out"))
val avgPrintStream: PrintStream = new PrintStream(avgFileStream)
val host = args(0)
val port = args(1)
val dbName = "TPCH"
val dbAddress = "jdbc:snappydata://" + host + ":" + port + "/"
val conn = DriverManager.getConnection(dbAddress)
val queries: Array[String] = args(2).split(",")
// scalastyle:off println
println(queries.length)
var isResultCollection: Boolean = args(3).toBoolean
var warmup: Integer = args(4).toInt
var runsForAverage: Integer = args(5).toInt
var isDynamic: Boolean = args(6).toBoolean
for (query <- queries) {
var prepStatement: PreparedStatement = null
query match {
case "1" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery1)
var parameters = TPCH_Queries.getQ1Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
}
case "2" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery2)
var parameters = TPCH_Queries.getQ2Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
}
case "3" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery3)
var parameters = TPCH_Queries.getQ3Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
}
case "4" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery4)
var parameters = TPCH_Queries.getQ4Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
}
case "5" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery5)
var parameters = TPCH_Queries.getQ5Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
}
case "6" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery6)
var parameters = TPCH_Queries.getQ6Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
prepStatement.setString(5, parameters(4))
}
case "7" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery7)
var parameters = TPCH_Queries.getQ7Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
}
case "8" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery8)
var parameters = TPCH_Queries.getQ8Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
}
case "9" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery9)
var parameters = TPCH_Queries.getQ9Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
}
case "10" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery10)
var parameters = TPCH_Queries.getQ10Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
}
case "11" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery11)
var parameters = TPCH_Queries.getQ11Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
}
case "12" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery12)
var parameters = TPCH_Queries.getQ12Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
}
case "13" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery13)
var parameters = TPCH_Queries.getQ13Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
}
case "14" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery14)
var parameters = TPCH_Queries.getQ14Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
}
case "15" => {
}
case "16" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery16)
var parameters = TPCH_Queries.getQ16Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
prepStatement.setString(5, parameters(4))
prepStatement.setString(6, parameters(5))
prepStatement.setString(7, parameters(6))
prepStatement.setString(8, parameters(7))
prepStatement.setString(9, parameters(8))
prepStatement.setString(10, parameters(9))
prepStatement.setString(11, parameters(10))
}
case "17" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery17)
var parameters = TPCH_Queries.getQ17Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
}
case "18" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery18)
var parameters = TPCH_Queries.getQ18Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
}
case "19" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery19)
var parameters = TPCH_Queries.getQ19Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
prepStatement.setString(5, parameters(4))
prepStatement.setString(6, parameters(5))
prepStatement.setString(7, parameters(6))
prepStatement.setString(8, parameters(7))
prepStatement.setString(9, parameters(8))
}
case "20" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery20)
var parameters = TPCH_Queries.getQ20Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
}
case "21" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery21)
var parameters = TPCH_Queries.getQ21Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
}
case "22" => {
prepStatement = conn.prepareStatement(TPCH_Queries.getQuery22)
var parameters = TPCH_Queries.getQ22Parameter(isDynamic)
prepStatement.setString(1, parameters(0))
prepStatement.setString(2, parameters(1))
prepStatement.setString(3, parameters(2))
prepStatement.setString(4, parameters(3))
prepStatement.setString(5, parameters(4))
prepStatement.setString(6, parameters(5))
prepStatement.setString(7, parameters(6))
prepStatement.setString(8, parameters(7))
prepStatement.setString(9, parameters(8))
prepStatement.setString(10, parameters(9))
prepStatement.setString(11, parameters(10))
prepStatement.setString(12, parameters(11))
prepStatement.setString(13, parameters(12))
prepStatement.setString(14, parameters(13))
}
}
QueryExecutor.execute_statement(query, isResultCollection, prepStatement, warmup,
runsForAverage, avgPrintStream)
prepStatement.close()
}
/* //code for SNAP- 1296
println("----------------------------------Use of Statement-------------------------------")
val stmt = conn.createStatement()
var rs = stmt.executeQuery(TPCH_Snappy.getQuery10)
var rsmd = rs.getMetaData()
println(s"KBKB : rsmd : $rsmd")
var columnsNumber = rsmd.getColumnCount();
println(s"KBKB : columnsNumber : $columnsNumber")
var count : Int = 0
while (rs.next()) {
count += 1
for (i <- 1 to columnsNumber) {
if (i > 1) print(",")
print(rs.getString(i))
}
println()
}
println(s"NUmber of results : $count")
stmt.close()
println("----------------------------------Use of PreparedStatement-------------------------------")
var prepStatement = conn.prepareStatement(TPCH_Snappy.getQuery10)
rs = prepStatement.executeQuery
rsmd = rs.getMetaData()
println(s"KBKB : rsmd : $rsmd")
columnsNumber = rsmd.getColumnCount();
println(s"KBKB : columnsNumber : $columnsNumber")
// rs.last()
// println(s"KBKBKB : totoal result size : ${rs.getRow}")
count = 0
while (rs.next()) {
count += 1
for (i <- 1 to columnsNumber) {
if (i > 1) print(",")
print(rs.getString(i))
}
println()
}
println(s"NUmber of results : $count")
prepStatement.close()
*/
avgPrintStream.close()
avgFileStream.close()
TPCH_Snappy.close
}
}
|
vjr/snappydata
|
cluster/src/test/scala/io/snappydata/benchmark/snappy/tpch/QueryExecutionJdbc.scala
|
Scala
|
apache-2.0
| 11,338 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors.scalastream
// Akka and Spray
import akka.actor.{ActorSystem, Props}
import akka.io.IO
import spray.can.Http
// Java
import java.io.File
// Argot
import org.clapper.argot._
// Config
import com.typesafe.config.{ConfigFactory,Config,ConfigException}
// Logging
import org.slf4j.LoggerFactory
// Snowplow
import sinks._
// Main entry point of the Scala collector.
object ScalaCollector extends App {
lazy val log = LoggerFactory.getLogger(getClass())
import log.{error, debug, info, trace}
import ArgotConverters._ // Argument specifications
val parser = new ArgotParser(
programName = generated.Settings.name,
compactUsage = true,
preUsage = Some("%s: Version %s. Copyright (c) 2013, %s.".format(
generated.Settings.name,
generated.Settings.version,
generated.Settings.organization)
)
)
// Optional config argument
val config = parser.option[Config](List("config"), "filename",
"Configuration file. Defaults to \"resources/application.conf\" " +
"(within .jar) if not set") { (c, opt) =>
val file = new File(c)
if (file.exists) {
ConfigFactory.parseFile(file)
} else {
parser.usage("Configuration file \"%s\" does not exist".format(c))
ConfigFactory.empty()
}
}
parser.parse(args)
val rawConf = config.value.getOrElse(ConfigFactory.load("application"))
implicit val system = ActorSystem.create("scala-stream-collector", rawConf)
val collectorConfig = new CollectorConfig(rawConf)
val sink = collectorConfig.sinkEnabled match {
case Sink.Kinesis => new KinesisSink(collectorConfig)
case Sink.Stdout => new StdoutSink
}
// The handler actor replies to incoming HttpRequests.
val handler = system.actorOf(
Props(classOf[CollectorServiceActor], collectorConfig, sink),
name = "handler"
)
IO(Http) ! Http.Bind(handler,
interface=collectorConfig.interface, port=collectorConfig.port)
}
// Return Options from the configuration.
object Helper {
implicit class RichConfig(val underlying: Config) extends AnyVal {
def getOptionalString(path: String): Option[String] = try {
Some(underlying.getString(path))
} catch {
case e: ConfigException.Missing => None
}
}
}
// Instead of comparing strings and validating every time
// the sink is accessed, validate the string here and
// store this enumeration.
object Sink extends Enumeration {
type Sink = Value
val Kinesis, Stdout, Test = Value
}
// Rigidly load the configuration file here to error when
// the collector process starts rather than later.
class CollectorConfig(config: Config) {
import Helper.RichConfig
private val collector = config.getConfig("collector")
val interface = collector.getString("interface")
val port = collector.getInt("port")
val production = collector.getBoolean("production")
private val p3p = collector.getConfig("p3p")
val p3pPolicyRef = p3p.getString("policyref")
val p3pCP = p3p.getString("CP")
private val cookie = collector.getConfig("cookie")
val cookieExpiration = cookie.getMilliseconds("expiration")
var cookieDomain = cookie.getOptionalString("domain")
private val sink = collector.getConfig("sink")
// TODO: either change this to ADTs or switch to withName generation
val sinkEnabled = sink.getString("enabled") match {
case "kinesis" => Sink.Kinesis
case "stdout" => Sink.Stdout
case "test" => Sink.Test
case _ => throw new RuntimeException("collector.sink.enabled unknown.")
}
private val kinesis = sink.getConfig("kinesis")
private val aws = kinesis.getConfig("aws")
val awsAccessKey = aws.getString("access-key")
val awsSecretKey = aws.getString("secret-key")
private val stream = kinesis.getConfig("stream")
val streamName = stream.getString("name")
val streamSize = stream.getInt("size")
val threadpoolSize = kinesis.hasPath("thread-pool-size") match {
case true => kinesis.getInt("thread-pool-size")
case _ => 10
}
}
|
pkallos/snowplow
|
2-collectors/scala-stream-collector/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/ScalaCollectorApp.scala
|
Scala
|
apache-2.0
| 4,729 |
/*
* (C) Copyright 2015 Atomic BITS (http://atomicbits.io).
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Affero General Public License
* (AGPL) version 3.0 which accompanies this distribution, and is available in
* the LICENSE file or at http://www.gnu.org/licenses/agpl-3.0.en.html
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Affero General Public License for more details.
*
* Contributors:
* Peter Rigole
*
*/
package io.atomicbits.scraml
import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.client.WireMock
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.core.WireMockConfiguration._
import io.atomicbits.scraml.dsl.Response
import io.atomicbits.scraml.dsl.support.StringPart
import io.atomicbits.scraml.examples.TestClient01
import io.atomicbits.scraml.examples.TestClient01._
import org.scalatest.{BeforeAndAfterAll, GivenWhenThen, FeatureSpec}
import play.api.libs.json.Format
import play.libs.Json
import scala.concurrent.{Await, Future}
import scala.language.{postfixOps, reflectiveCalls}
import scala.concurrent.duration._
/**
* Created by peter on 17/05/15, Atomic BITS bvba (http://atomicbits.io).
*/
class FooRamlModelGeneratorTest extends FeatureSpec with GivenWhenThen with BeforeAndAfterAll {
val port = 8281
val host = "localhost"
val wireMockServer = new WireMockServer(wireMockConfig().port(port))
override def beforeAll() = {
wireMockServer.start()
WireMock.configureFor(host, port)
}
override def afterAll() = {
wireMockServer.stop()
}
feature("Use the DSL based on a RAML specification") {
val client = TestClient01(host = host, port = port,
defaultHeaders = Map("Accept" -> "application/vnd-v1.0+json"))
val userResource = client.rest.user
val userFoobarResource = userResource.userid("foobar")
scenario("test a GET request") {
Given("a matching web service")
stubFor(
get(urlEqualTo(s"/rest/user?age=51.0&firstName=John&organization=ESA&organization=NASA"))
.withHeader("Accept", equalTo("application/vnd-v1.0+json"))
.willReturn(
aResponse()
.withBody( """{"address": {"streetAddress": "Mulholland Drive", "city": "LA", "state": "California"}, "firstName":"John", "lastName": "Doe", "age": 21, "id": "1"}""")
.withStatus(200)))
When("execute a GET request")
val eventualUserResponse: Future[User] =
userResource
.get(age = Some(51), firstName = Some("John"), lastName = None, organization = List("ESA", "NASA"))
.call().asType
Then("we should get the correct user object")
val user = User(
homePage = None,
address = Some(UserAddress("Mulholland Drive", "LA", "California")),
age = 21,
firstName = "John",
lastName = "Doe",
id = "1"
)
val userResponse = Await.result(eventualUserResponse, 2 seconds)
assertResult(user)(userResponse)
}
scenario("test a form POST request") {
Given("a matching web service")
stubFor(
post(urlEqualTo(s"/rest/user/foobar"))
.withHeader("Content-Type", equalTo("application/x-www-form-urlencoded"))
.withHeader("Accept", equalTo("application/vnd-v1.0+json"))
.withRequestBody(equalTo( """text=Hello%20Foobar"""))
.willReturn(
aResponse()
.withBody("Post OK")
.withStatus(200)
)
)
When("execute a form POST request")
val eventualPostResponse: Future[String] =
userFoobarResource
.post(text = "Hello Foobar", value = None).call().asString
Then("we should get the correct response")
val postResponse = Await.result(eventualPostResponse, 2 seconds)
assertResult("Post OK")(postResponse)
}
scenario("test a PUT request") {
Given("a matching web service")
val user = User(
homePage = Some(Link("http://foo.bar", "GET", None)),
address = Some(UserAddress("Mulholland Drive", "LA", "California")),
age = 21,
firstName = "John",
lastName = "Doe",
id = "1"
)
val link = Link("http://foo.bar", "GET", None)
import User._
import Link._
def userToJson()(implicit formatter: Format[User]) = {
formatter.writes(user).toString()
}
def linkToJson()(implicit formatter: Format[Link]) = {
formatter.writes(link).toString()
}
stubFor(
put(urlEqualTo(s"/rest/user/foobar"))
.withHeader("Content-Type", equalTo("application/vnd-v1.0+json"))
.withHeader("Accept", equalTo("application/vnd-v1.0+json"))
.withRequestBody(equalTo(userToJson()))
.willReturn(
aResponse()
.withBody(linkToJson())
.withStatus(200)
)
)
When("execute a PUT request")
val eventualPutResponse: Future[Link] =
userFoobarResource
.put(user)
.headers(
"Content-Type" -> "application/vnd-v1.0+json",
"Accept" -> "application/vnd-v1.0+json"
)
.call().asType
Then("we should get the correct response")
val putResponse = Await.result(eventualPutResponse, 2 seconds)
assertResult(link)(putResponse)
}
scenario("test a DELETE request") {
Given("a matching web service")
stubFor(
delete(urlEqualTo(s"/rest/user/foobar"))
.withHeader("Accept", equalTo("application/vnd-v1.0+json"))
.willReturn(
aResponse()
.withBody("Delete OK")
.withStatus(200)
)
)
When("execute a DELETE request")
val eventualPutResponse: Future[String] = userFoobarResource.delete().call().asString
Then("we should get the correct response")
val putResponse = Await.result(eventualPutResponse, 2 seconds)
assertResult("Delete OK")(putResponse)
}
scenario("test a multipart/form-data POST request") {
Given("a form upload web service")
stubFor(
post(urlEqualTo(s"/rest/user/upload"))
.withHeader("Content-Type", equalTo("multipart/form-data"))
.willReturn(
aResponse()
.withBody("Post OK")
.withStatus(200)
)
)
When("a multipart/form-data POST request happens")
val multipartFormPostResponse =
client.rest.user.upload.post(List(StringPart(name = "test", value = "string part value"))).call().asType
Then("we should get the correct response")
}
scenario("test Lists as request and response body") {
Given("a form upload web service")
val user = User(
homePage = Some(Link("http://foo.bar", "GET", None)),
address = Some(UserAddress("Mulholland Drive", "LA", "California")),
age = 21,
firstName = "John",
lastName = "Doe",
id = "1"
)
// Imports needed to get the implicit JSON formatters for both types.
import User._
def userToJson()(implicit formatter: Format[List[User]]) = {
formatter.writes(List(user)).toString()
}
println(s"user: ${userToJson()}")
stubFor(
put(urlEqualTo(s"/rest/user/activate"))
.withHeader("Content-Type", equalTo("application/vnd-v1.0+json"))
.withHeader("Accept", equalTo("application/vnd-v1.0+json"))
.withRequestBody(equalTo(userToJson()))
.willReturn(
aResponse()
.withBody(userToJson())
.withStatus(200)
)
)
When("a request with list body happens")
val listBodyResponse =
client.rest.user.activate
.put(List(user))
.headers("Content-Type" -> "application/vnd-v1.0+json")
.call().asType
Then("we should get the correct response")
val listBody = Await.result(listBodyResponse, 2 seconds)
assertResult(List(user))(listBody)
}
}
}
|
rcavalcanti/scraml
|
modules/scraml-test/src/test/scala/io/atomicbits/scraml/FooRamlModelGeneratorTest.scala
|
Scala
|
agpl-3.0
| 8,367 |
package example
import common.messages.{WeatherResponse, Weather, User, UserResponse}
import example.JsParser._
import org.scalajs.dom.ext.Ajax
import org.scalajs.jquery.{jQuery => $}
import upickle._
import scala.concurrent.Future
import scala.scalajs.concurrent.JSExecutionContext.Implicits.runNow
import scala.scalajs.js.annotation.JSExport
@JSExport
object ForComprehension {
@JSExport
def main(): Unit = {
implicit val userReader = createUserReader
implicit val userPrinter = new BasicUserPrinter
getUsers map (_.foreach(user => {
getWeather(user.postalCode).map(w => especial(user.firstName, w.description))
}))
}
def getUsers(implicit reader: Reader[UserResponse]): Future[Seq[User]] =
Ajax.get("/api/user/20") map { request =>
read[UserResponse](request.responseText)
} map (_.users)
def getWeather(postalCode: String): Future[Weather] =
Ajax.get("/api/weather/es/"+postalCode) map { request =>
read[WeatherResponse](request.responseText)
} map (_.weathers.head)
def printUser(user: User)(implicit printer: UserPrinter) = printer.print(user)
def printWeather(weather: Weather) = {
val weatherNode = $("<div></div>").text(weather.description)
$("body").append(weatherNode)
}
def especial(name: String, weather: String) = {
val weatherNode = $("<div></div>").text(name + " - " + weather)
$("body").append(weatherNode)
}
}
|
47deg/scalajs-intro
|
example-client/src/main/scala/example/ForComprehension.scala
|
Scala
|
apache-2.0
| 1,429 |
/**
* Copyright Β© 2013-2015 Uncharted Software Inc.
*
* Property of Unchartedβ’, formerly Oculus Info Inc.
* http://uncharted.software/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.tilegen.datasets
import org.apache.spark.{AccumulableParam, Accumulable}
import scala.collection.mutable.ListBuffer
import scala.util.matching.Regex
/**
* An Accumulator used for recording the errors associated with lines rejected by CSVReader
* Instantiated and used here: com.oculusinfo.tilegen.pipeline.PipelineOperations$#loadCsvDataWithErrorsOp
*
* ErrorCollectorAccumulable
*
* For more info on these accumulators see:
* https://spark.apache.org/docs/1.0.0/api/scala/index.html#org.apache.spark.Accumulable
* https://spark.apache.org/docs/1.0.0/api/scala/index.html#org.apache.spark.AccumulableParam
*
* Created by llay on 28/7/2015.
*/
object ErrorAccumulator {
/**
* Accumulator for bad data characterization
*
* To add a new custom collector to this accumulator:
* - create concrete class that extends CustomCollector
* - when instantiating a StatCollectorAccumulable ensure an instance of your class is passed into the initialValue parameter
*
* See https://spark.apache.org/docs/1.0.0/api/scala/index.html#org.apache.spark.Accumulable (/AccumulableParam) for more info
*/
class ErrorCollectorAccumulable(val initialValue: ListBuffer[CustomCollector]) extends Accumulable[ListBuffer[CustomCollector], (String, Throwable)](initialValue, new ErrorCollectorAccumulableParam) {
}
/**
* A helper for ErrorCollectorAccumulable.
*/
class ErrorCollectorAccumulableParam extends AccumulableParam[ListBuffer[CustomCollector], (String, Throwable)]() {
// Add additional data to the accumulator value.
override def addAccumulator(r: ListBuffer[CustomCollector], t: (String, Throwable)): ListBuffer[CustomCollector] = {
r.foreach(s => s.addRow(t))
r
}
// Merge two accumulated values together.
override def addInPlace(r1: ListBuffer[CustomCollector], r2: ListBuffer[CustomCollector]): ListBuffer[CustomCollector] = {
for (i <- 0 until r1.length) {
r1(i).merge(r2(i))
}
r1
}
// Return the "zero" (identity) value for an accumulator type, given its initial value.
override def zero(initialValue: ListBuffer[CustomCollector]): ListBuffer[CustomCollector] = {
initialValue
}
}
/**
* The base class for custom error info generation.
* Accumulated by ErrorCollectorAccumulable
*/
abstract class CustomCollector extends Serializable {
def addRow(r: (String, Throwable)): Unit
// Add a row of the rdd into this stat collector
def merge(accum: CustomCollector): Unit
// merge two SummaryErrors of the same type together (for parallel processing)
def getError: collection.mutable.Map[String, Int]
}
/**
* Custom stat collector: Aggregates the errors and provides a count for each one
*/
class ErrorCollector extends CustomCollector {
val errors = collection.mutable.Map[String, Int]().withDefaultValue(0)
override def addRow(r: (String, Throwable)): Unit = {
// strip source line from number exception
val prefix = """^java.lang.NumberFormatException: For input string: (.*)""".r
try {
val prefix(suffix) = r._2.toString
errors("java.lang.NumberFormatException") += 1
} catch {
case e: scala.MatchError =>
errors(r._2.toString) += 1
}
}
override def getError = {
errors
}
override def merge(accum: CustomCollector) = {
// Since scala won't let you pass in a subclass, we have to do this ugly casting
// Would much prefer "accum: NumRecordsError"
val a: ErrorCollector = accum.asInstanceOf[ErrorCollector]
a.errors.foreach { case (error, count) =>
errors(error) += count
}
}
}
}
|
unchartedsoftware/aperture-tiles
|
tile-generation/src/main/scala/com/oculusinfo/tilegen/datasets/ErrorAccumulator.scala
|
Scala
|
mit
| 4,833 |
package es.own3dh2so4
import java.io.ByteArrayInputStream
import java.nio.charset.StandardCharsets
import scala.io.Source
/**
* Created by david on 1/04/17.
*/
object Properties {
def apply(): Properties = new Properties("/conf.properties")
def apply(file : String): Properties = new Properties(file)
}
class Properties (file: String) {
val properties = new java.util.Properties()
properties.load(new ByteArrayInputStream(Source.fromInputStream( getClass.getResourceAsStream(file) ).
getLines.mkString("\n").getBytes(StandardCharsets.UTF_8)))
def apply(key: String): Option[String] = {
Option( properties.getProperty(key))
}
}
|
own3dh2so4/spark2-fast-data-processing-book
|
src/main/scala/es/own3dh2so4/Properties.scala
|
Scala
|
apache-2.0
| 659 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.controllers
import cats.instances.future._
import java.util.UUID
import play.api.libs.json.Json
import play.api.mvc.{ Action, Request, RequestHeader }
import scala.concurrent.Future
import uk.gov.hmrc.bforms.models.{ Form, FormData, FormId, FormTypeId }
import uk.gov.hmrc.bforms.repositories.{ FormRepository, FormTemplateRepository, SubmissionRepository }
import uk.gov.hmrc.bforms.services.{ FormService, SubmissionService, MongoOperation, SaveOperation, SaveTolerantOperation, UpdateOperation, UpdateTolerantOperation }
import uk.gov.hmrc.bforms.typeclasses.{ FusFeUrl, FusUrl, ServiceUrl }
import uk.gov.hmrc.play.microservice.controller.BaseController
import uk.gov.hmrc.play.http.logging.MdcLoggingExecutionContext._
class Forms()(
implicit
formRepo: FormRepository,
formTemplateRepo: FormTemplateRepository,
submissionRepo: SubmissionRepository,
fusUrl: ServiceUrl[FusUrl],
fusFeUrl: ServiceUrl[FusFeUrl]
) extends BaseController {
def all() = Action.async { implicit request =>
Future.successful(NotImplemented)
}
def save(tolerant: Option[Boolean]) = Action.async(parse.json[FormData]) { implicit request =>
val _id = FormId(UUID.randomUUID().toString())
val operation = tolerant match {
case Some(true) => SaveTolerantOperation
case _ => SaveOperation
}
saveOrUpdate(_id, operation)
}
def allById(formTypeId: FormTypeId) = Action.async { implicit request =>
FormService.allById(formTypeId).fold(
error => error.toResult,
response => {
val links = response.map(formLink)
Ok(Json.toJson(links))
}
)
}
def getByIdAndVersion(formTypeId: FormTypeId, version: String) = Action.async { implicit request =>
FormService.getByIdAndVersion(formTypeId, version).fold(
error => error.toResult,
response => {
val links = response.map(formLink)
Ok(Json.toJson(links))
}
)
}
def get(formTypeId: FormTypeId, version: String, formId: FormId) = Action.async { implicit request =>
FormService.get(formTypeId, version, formId).fold(
error => error.toResult,
response => Ok(Json.toJson(response.formData))
)
}
def update(formId: FormId, tolerant: Option[Boolean]) = Action.async(parse.json[FormData]) { implicit request =>
val operation = tolerant match {
case Some(true) => UpdateTolerantOperation
case _ => UpdateOperation
}
saveOrUpdate(formId, operation)
}
def delete(formTypeId: FormTypeId, version: String, formId: FormId) = Action.async { implicit request =>
Future.successful(NotImplemented)
}
def submission(formTypeId: FormTypeId, formId: FormId) = Action.async { implicit request =>
SubmissionService.submission(formTypeId, formId).fold(
error => error.toResult,
response => Ok(response)
)
}
def submissionStatus(formTypeId: FormTypeId, formId: FormId) = Action.async { implicit request =>
Future.successful(NotImplemented)
}
private def saveOrUpdate(formId: FormId, mongoOperation: MongoOperation)(implicit request: Request[FormData]) = {
val formData = request.body
val form = Form(formId, formData)
FormService.saveOrUpdate(form, mongoOperation).fold(
error => error.toResult,
response => {
val formRoute = formLink(form)
Ok(Json.obj("success" -> formRoute))
}
)
}
private def formLink(form: Form)(implicit request: RequestHeader) = {
val Form(formId, formData) = form
routes.Forms.get(formData.formTypeId, formData.version, formId).absoluteURL()
}
}
|
VlachJosef/bforms
|
app/uk/gov/hmrc/bforms/controllers/Forms.scala
|
Scala
|
apache-2.0
| 4,213 |
package net.sansa_stack.rdf.spark.model.ds
import com.holdenkarau.spark.testing.{DataFrameSuiteBase, SharedSparkContext}
import org.apache.jena.datatypes.xsd.impl.XSDDouble
import org.apache.jena.graph.{Node, NodeFactory, Triple}
import org.apache.jena.riot.Lang
import org.apache.spark.sql.{Dataset, SparkSession}
import org.scalatest.FunSuite
import net.sansa_stack.rdf.spark.io._
class DSTripleOpsTests extends FunSuite with SharedSparkContext {
System.setProperty("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
System.setProperty("spark.kryo.registrator", "net.sansa_stack.rdf.spark.io.JenaKryoRegistrator")
lazy val spark = SparkSession.builder.config(
conf
.set("spark.sql.crossJoin.enabled", "true")
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.set("spark.kryo.registrator", "net.sansa_stack.rdf.spark.io.JenaKryoRegistrator")
).getOrCreate()
import net.sansa_stack.rdf.spark.model._
val lang: Lang = Lang.NTRIPLES
var path: String = _
var triples: Dataset[Triple] = _
@transient private var _spark: SparkSession = _
override def beforeAll(): Unit = {
conf.set("spark.sql.crossJoin.enabled", "true")
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.set("spark.kryo.registrator", "net.sansa_stack.rdf.spark.io.JenaKryoRegistrator")
_spark = SparkSession.builder.config(conf).master("local[1]").getOrCreate()
path = getClass.getResource("/loader/data.nt").getPath
triples = spark.read.rdf(lang)(path).toDS().cache()
}
test("converting Dataset of triples into RDD of Triples should pass") {
val graph = triples.toRDD()
val size = graph.count()
assert(size == 10)
}
test("converting Dataset of triples into DataFrame should pass") {
val graph = triples.toDF()
val size = graph.count()
assert(size == 10)
}
test("union of two RDF graph should match") {
val other = triples
val graph = triples.union(other)
val size = graph.count()
assert(size == 20)
}
test("difference of two RDF graph should match") {
val other = triples
val graph = triples.difference(other)
val size = graph.count()
assert(size == 0)
}
test("intersection of two RDF graph should match") {
val other = triples
val graph = triples.intersection(other)
val size = graph.count()
// PW: Expected size was 10 before but the count here and now is 9 since a
// duplicate triple was removed. Having the duplicate trile removed is IMO
// in line with the RDF 1.1 standard which is talking about RDF datasets
// being sets of triples
assert(size == 9)
}
test("add a statement to the RDF graph should match") {
val triple = Triple.create(
NodeFactory.createURI("http://dbpedia.org/resource/Guy_de_Maupassant"),
NodeFactory.createURI("http://xmlns.com/foaf/0.1/givenName"),
NodeFactory.createLiteral("Guy De"))
val graph = triples.add(triple)
val size = graph.count()
assert(size == 11)
}
test("add a list of statements to the RDF graph should match") {
val triple1 = Triple.create(
NodeFactory.createURI("http://dbpedia.org/resource/Guy_de_Maupassant"),
NodeFactory.createURI("http://xmlns.com/foaf/0.1/givenName"),
NodeFactory.createLiteral("Guy De"))
val triple2 = Triple.create(
NodeFactory.createURI("http://dbpedia.org/resource/Guy_de_Maupassant"),
NodeFactory.createURI("http://dbpedia.org/ontology/influenced"),
NodeFactory.createURI("http://dbpedia.org/resource/Tobias_Wolff"))
val triple3 = Triple.create(
NodeFactory.createURI("http://dbpedia.org/resource/Guy_de_Maupassant"),
NodeFactory.createURI("http://xmlns.com/foaf/0.1/givenName"),
NodeFactory.createURI("http://dbpedia.org/resource/Henry_James"))
val statements = Seq(triple1, triple2, triple3)
val graph = triples.addAll(statements)
val size = graph.count()
assert(size == 13)
}
test("remove a statement from the RDF graph should match") {
val triple = Triple.create(
NodeFactory.createURI("http://en.wikipedia.org/wiki/Helium"),
NodeFactory.createURI("http://example.org/elements/specificGravity"),
NodeFactory.createLiteral("1.663E-4", new XSDDouble("double")))
val graph = triples.remove(triple)
val size = graph.count()
assert(size == 8)
}
test("triple containment") {
var triple = Triple.create(
NodeFactory.createURI("http://example.org/show/218"),
NodeFactory.createURI("http://www.w3.org/2000/01/rdf-schema#label"),
NodeFactory.createLiteral("That Seventies Show", "en"))
assert(!triples.contains(triple))
triple = Triple.create(
NodeFactory.createURI("http://example.org/show/218"),
NodeFactory.createURI("http://example.org/show/localName"),
NodeFactory.createLiteral("That Seventies Show", "en"))
assert(triples.contains(triple))
triple = Triple.create(
Node.ANY,
NodeFactory.createURI("http://example.org/show/localName"),
NodeFactory.createLiteral("That Seventies Show", "en"))
assert(triples.contains(triple))
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-rdf/sansa-rdf-spark/src/test/scala/net/sansa_stack/rdf/spark/model/ds/DSTripleOpsTests.scala
|
Scala
|
apache-2.0
| 5,200 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.{util => ju}
import java.text.SimpleDateFormat
import java.util.Date
import org.scalatest.{BeforeAndAfter, Matchers}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.streaming.OutputMode._
class EventTimeWatermarkSuite extends StreamTest with BeforeAndAfter with Matchers with Logging {
import testImplicits._
after {
sqlContext.streams.active.foreach(_.stop())
}
test("EventTimeStats") {
val epsilon = 10E-6
val stats = EventTimeStats(max = 100, min = 10, avg = 20.0, count = 5)
stats.add(80L)
stats.max should be (100)
stats.min should be (10)
stats.avg should be (30.0 +- epsilon)
stats.count should be (6)
val stats2 = EventTimeStats(80L, 5L, 15.0, 4)
stats.merge(stats2)
stats.max should be (100)
stats.min should be (5)
stats.avg should be (24.0 +- epsilon)
stats.count should be (10)
}
test("EventTimeStats: avg on large values") {
val epsilon = 10E-6
val largeValue = 10000000000L // 10B
// Make sure `largeValue` will cause overflow if we use a Long sum to calc avg.
assert(largeValue * largeValue != BigInt(largeValue) * BigInt(largeValue))
val stats =
EventTimeStats(max = largeValue, min = largeValue, avg = largeValue, count = largeValue - 1)
stats.add(largeValue)
stats.avg should be (largeValue.toDouble +- epsilon)
val stats2 = EventTimeStats(
max = largeValue + 1,
min = largeValue,
avg = largeValue + 1,
count = largeValue)
stats.merge(stats2)
stats.avg should be ((largeValue + 0.5) +- epsilon)
}
test("error on bad column") {
val inputData = MemoryStream[Int].toDF()
val e = intercept[AnalysisException] {
inputData.withWatermark("badColumn", "1 minute")
}
assert(e.getMessage contains "badColumn")
}
test("error on wrong type") {
val inputData = MemoryStream[Int].toDF()
val e = intercept[AnalysisException] {
inputData.withWatermark("value", "1 minute")
}
assert(e.getMessage contains "value")
assert(e.getMessage contains "int")
}
test("event time and watermark metrics") {
// No event time metrics when there is no watermarking
val inputData1 = MemoryStream[Int]
val aggWithoutWatermark = inputData1.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(aggWithoutWatermark, outputMode = Complete)(
AddData(inputData1, 15),
CheckAnswer((15, 1)),
assertEventStats { e => assert(e.isEmpty) },
AddData(inputData1, 10, 12, 14),
CheckAnswer((10, 3), (15, 1)),
assertEventStats { e => assert(e.isEmpty) }
)
// All event time metrics where watermarking is set
val inputData2 = MemoryStream[Int]
val aggWithWatermark = inputData2.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(aggWithWatermark)(
AddData(inputData2, 15),
CheckAnswer(),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(15))
assert(e.get("min") === formatTimestamp(15))
assert(e.get("avg") === formatTimestamp(15))
assert(e.get("watermark") === formatTimestamp(0))
},
AddData(inputData2, 10, 12, 14),
CheckAnswer(),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(14))
assert(e.get("min") === formatTimestamp(10))
assert(e.get("avg") === formatTimestamp(12))
assert(e.get("watermark") === formatTimestamp(5))
},
AddData(inputData2, 25),
CheckAnswer(),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(25))
assert(e.get("min") === formatTimestamp(25))
assert(e.get("avg") === formatTimestamp(25))
assert(e.get("watermark") === formatTimestamp(5))
},
AddData(inputData2, 25),
CheckAnswer((10, 3)),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(25))
assert(e.get("min") === formatTimestamp(25))
assert(e.get("avg") === formatTimestamp(25))
assert(e.get("watermark") === formatTimestamp(15))
}
)
}
test("append mode") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckLastBatch(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckLastBatch(),
assertNumStateRows(3),
AddData(inputData, 25), // Emit items less than watermark and drop their state
CheckLastBatch((10, 5)),
assertNumStateRows(2),
AddData(inputData, 10), // Should not emit anything as data less than watermark
CheckLastBatch(),
assertNumStateRows(2)
)
}
test("update mode") {
val inputData = MemoryStream[Int]
spark.conf.set("spark.sql.shuffle.partitions", "10")
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation, OutputMode.Update)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckLastBatch((10, 5), (15, 1)),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckLastBatch((25, 1)),
assertNumStateRows(3),
AddData(inputData, 10, 25), // Ignore 10 as its less than watermark
CheckLastBatch((25, 2)),
assertNumStateRows(2),
AddData(inputData, 10), // Should not emit anything as data less than watermark
CheckLastBatch(),
assertNumStateRows(2)
)
}
test("delay in months and years handled correctly") {
val currentTimeMs = System.currentTimeMillis
val currentTime = new Date(currentTimeMs)
val input = MemoryStream[Long]
val aggWithWatermark = input.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "2 years 5 months")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
def monthsSinceEpoch(date: Date): Int = { date.getYear * 12 + date.getMonth }
testStream(aggWithWatermark)(
AddData(input, currentTimeMs / 1000),
CheckAnswer(),
AddData(input, currentTimeMs / 1000),
CheckAnswer(),
assertEventStats { e =>
assert(timestampFormat.parse(e.get("max")).getTime === (currentTimeMs / 1000) * 1000)
val watermarkTime = timestampFormat.parse(e.get("watermark"))
val monthDiff = monthsSinceEpoch(currentTime) - monthsSinceEpoch(watermarkTime)
// monthsSinceEpoch is like `math.floor(num)`, so monthDiff has two possible values.
assert(monthDiff === 29 || monthDiff === 30,
s"currentTime: $currentTime, watermarkTime: $watermarkTime")
}
)
}
test("recovery") {
val inputData = MemoryStream[Int]
val df = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(df)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckLastBatch(),
AddData(inputData, 25), // Advance watermark to 15 seconds
StopStream,
StartStream(),
CheckLastBatch(),
AddData(inputData, 25), // Evict items less than previous watermark.
CheckLastBatch((10, 5)),
StopStream,
AssertOnQuery { q => // purge commit and clear the sink
val commit = q.batchCommitLog.getLatest().map(_._1).getOrElse(-1L) + 1L
q.batchCommitLog.purge(commit)
q.sink.asInstanceOf[MemorySink].clear()
true
},
StartStream(),
CheckLastBatch((10, 5)), // Recompute last batch and re-evict timestamp 10
AddData(inputData, 30), // Advance watermark to 20 seconds
CheckLastBatch(),
StopStream,
StartStream(), // Watermark should still be 15 seconds
AddData(inputData, 17),
CheckLastBatch(), // We still do not see next batch
AddData(inputData, 30), // Advance watermark to 20 seconds
CheckLastBatch(),
AddData(inputData, 30), // Evict items less than previous watermark.
CheckLastBatch((15, 2)) // Ensure we see next window
)
}
test("dropping old data") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10, 11, 12),
CheckAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckAnswer(),
AddData(inputData, 25), // Evict items less than previous watermark.
CheckAnswer((10, 3)),
AddData(inputData, 10), // 10 is later than 15 second watermark
CheckAnswer((10, 3)),
AddData(inputData, 25),
CheckAnswer((10, 3)) // Should not emit an incorrect partial result.
)
}
test("watermark with 2 streams") {
import org.apache.spark.sql.functions.sum
val first = MemoryStream[Int]
val firstDf = first.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.select('value)
val second = MemoryStream[Int]
val secondDf = second.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "5 seconds")
.select('value)
withTempDir { checkpointDir =>
val unionWriter = firstDf.union(secondDf).agg(sum('value))
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("memory")
.outputMode("complete")
.queryName("test")
val union = unionWriter.start()
def getWatermarkAfterData(
firstData: Seq[Int] = Seq.empty,
secondData: Seq[Int] = Seq.empty,
query: StreamingQuery = union): Long = {
if (firstData.nonEmpty) first.addData(firstData)
if (secondData.nonEmpty) second.addData(secondData)
query.processAllAvailable()
// add a dummy batch so lastExecution has the new watermark
first.addData(0)
query.processAllAvailable()
// get last watermark
val lastExecution = query.asInstanceOf[StreamingQueryWrapper].streamingQuery.lastExecution
lastExecution.offsetSeqMetadata.batchWatermarkMs
}
// Global watermark starts at 0 until we get data from both sides
assert(getWatermarkAfterData(firstData = Seq(11)) == 0)
assert(getWatermarkAfterData(secondData = Seq(6)) == 1000)
// Global watermark stays at left watermark 1 when right watermark moves to 2
assert(getWatermarkAfterData(secondData = Seq(8)) == 1000)
// Global watermark switches to right side value 2 when left watermark goes higher
assert(getWatermarkAfterData(firstData = Seq(21)) == 3000)
// Global watermark goes back to left
assert(getWatermarkAfterData(secondData = Seq(17, 28, 39)) == 11000)
// Global watermark stays on left as long as it's below right
assert(getWatermarkAfterData(firstData = Seq(31)) == 21000)
assert(getWatermarkAfterData(firstData = Seq(41)) == 31000)
// Global watermark switches back to right again
assert(getWatermarkAfterData(firstData = Seq(51)) == 34000)
// Global watermark is updated correctly with simultaneous data from both sides
assert(getWatermarkAfterData(firstData = Seq(100), secondData = Seq(100)) == 90000)
assert(getWatermarkAfterData(firstData = Seq(120), secondData = Seq(110)) == 105000)
assert(getWatermarkAfterData(firstData = Seq(130), secondData = Seq(125)) == 120000)
// Global watermark doesn't decrement with simultaneous data
assert(getWatermarkAfterData(firstData = Seq(100), secondData = Seq(100)) == 120000)
assert(getWatermarkAfterData(firstData = Seq(140), secondData = Seq(100)) == 120000)
assert(getWatermarkAfterData(firstData = Seq(100), secondData = Seq(135)) == 130000)
// Global watermark recovers after restart, but left side watermark ahead of it does not.
assert(getWatermarkAfterData(firstData = Seq(200), secondData = Seq(190)) == 185000)
union.stop()
val union2 = unionWriter.start()
assert(getWatermarkAfterData(query = union2) == 185000)
// Even though the left side was ahead of 185000 in the last execution, the watermark won't
// increment until it gets past it in this execution.
assert(getWatermarkAfterData(secondData = Seq(200), query = union2) == 185000)
assert(getWatermarkAfterData(firstData = Seq(200), query = union2) == 190000)
}
}
test("complete mode") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
// No eviction when asked to compute complete results.
testStream(windowedAggregation, OutputMode.Complete)(
AddData(inputData, 10, 11, 12),
CheckAnswer((10, 3)),
AddData(inputData, 25),
CheckAnswer((10, 3), (25, 1)),
AddData(inputData, 25),
CheckAnswer((10, 3), (25, 2)),
AddData(inputData, 10),
CheckAnswer((10, 4), (25, 2)),
AddData(inputData, 25),
CheckAnswer((10, 4), (25, 3))
)
}
test("group by on raw timestamp") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy($"eventTime")
.agg(count("*") as 'count)
.select($"eventTime".cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10),
CheckAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckAnswer(),
AddData(inputData, 25), // Evict items less than previous watermark.
CheckAnswer((10, 1))
)
}
test("delay threshold should not be negative.") {
val inputData = MemoryStream[Int].toDF()
var e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "-1 year")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "1 year -13 months")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "1 month -40 days")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "-10 seconds")
}
assert(e.getMessage contains "should not be negative.")
}
test("the new watermark should override the old one") {
val df = MemoryStream[(Long, Long)].toDF()
.withColumn("first", $"_1".cast("timestamp"))
.withColumn("second", $"_2".cast("timestamp"))
.withWatermark("first", "1 minute")
.withWatermark("second", "2 minutes")
val eventTimeColumns = df.logicalPlan.output
.filter(_.metadata.contains(EventTimeWatermark.delayKey))
assert(eventTimeColumns.size === 1)
assert(eventTimeColumns(0).name === "second")
}
test("EventTime watermark should be ignored in batch query.") {
val df = testData
.withColumn("eventTime", $"key".cast("timestamp"))
.withWatermark("eventTime", "1 minute")
.select("eventTime")
.as[Long]
checkDataset[Long](df, 1L to 100L: _*)
}
test("SPARK-21565: watermark operator accepts attributes from replacement") {
withTempDir { dir =>
dir.delete()
val df = Seq(("a", 100.0, new java.sql.Timestamp(100L)))
.toDF("symbol", "price", "eventTime")
df.write.json(dir.getCanonicalPath)
val input = spark.readStream.schema(df.schema)
.json(dir.getCanonicalPath)
val groupEvents = input
.withWatermark("eventTime", "2 seconds")
.groupBy("symbol", "eventTime")
.agg(count("price") as 'count)
.select("symbol", "eventTime", "count")
val q = groupEvents.writeStream
.outputMode("append")
.format("console")
.start()
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
}
private def assertNumStateRows(numTotalRows: Long): AssertOnQuery = AssertOnQuery { q =>
val progressWithData = q.recentProgress.filter(_.numInputRows > 0).lastOption.get
assert(progressWithData.stateOperators(0).numRowsTotal === numTotalRows)
true
}
private def assertEventStats(body: ju.Map[String, String] => Unit): AssertOnQuery = {
AssertOnQuery { q =>
body(q.recentProgress.filter(_.numInputRows > 0).lastOption.get.eventTime)
true
}
}
private val timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") // ISO8601
timestampFormat.setTimeZone(ju.TimeZone.getTimeZone("UTC"))
private def formatTimestamp(sec: Long): String = {
timestampFormat.format(new ju.Date(sec * 1000))
}
}
|
minixalpha/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/EventTimeWatermarkSuite.scala
|
Scala
|
apache-2.0
| 19,767 |
package com.programmaticallyspeaking.ncd.e2e
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.{Actor, ActorRef, Props}
import com.programmaticallyspeaking.ncd.chrome.domains.Debugger.{CallFrame, PausedEventParams}
import com.programmaticallyspeaking.ncd.chrome.domains.Messages
import com.programmaticallyspeaking.ncd.messaging.{Observable, Observer, SerializedSubject}
import com.programmaticallyspeaking.ncd.nashorn.NashornScriptHostTestFixture
import com.programmaticallyspeaking.ncd.testing.UnitTest
import org.scalatest.exceptions.TestFailedException
import scala.collection.concurrent.TrieMap
import scala.collection.mutable
import scala.concurrent.{Await, ExecutionContext, Future, Promise}
import scala.util.control.NonFatal
class E2ETestFixture extends UnitTest with NashornScriptHostTestFixture {
override implicit val executionContext: ExecutionContext = ExecutionContext.global
type Tester = (Seq[CallFrame]) => Any
// Return this from a callframe tester to prevent auto resume
case object DontAutoResume
private val domainEventSubject = new SerializedSubject[Messages.DomainMessage]
private val requestor = system.actorOf(Props(new Requestor), "E2E-Requestor")
private val currentId: AtomicInteger = new AtomicInteger(0)
private val promises = new TrieMap[String, Promise[Any]]()
protected def scriptEvents: Observable[Messages.DomainMessage] = domainEventSubject
protected def sendRequestAndWait(target: ActorRef, msg: AnyRef): Any = {
val id = currentId.incrementAndGet()
val promise = Promise[Any]()
promises += id.toString -> promise
target.tell(Messages.Request(id.toString, msg), requestor)
Await.result(promise.future, resultTimeout)
}
override def sendAndReceive(actorRef: ActorRef, msg: AnyRef): Any =
throw new IllegalStateException("Use sendRequestAndWait instead")
override def sendAndReceiveMatching[T](actorRef: ActorRef, msg: AnyRef)(pf: PartialFunction[Any, T]): T =
throw new IllegalStateException("Use sendRequestAndWait instead")
protected def runScript(script: String)(testers: Tester*): Unit = {
assert(script.contains("debugger;"), "Script must contain a 'debugger' statement")
val donePromise = Promise[Unit]()
val testerQueue = mutable.Queue(testers: _*)
var callFrameIdLists = Seq[Seq[String]]()
val eventSubscription = domainEventSubject.subscribe(Observer.from[Messages.DomainMessage] {
case Messages.Event(_, PausedEventParams(callFrames, _, _, _)) =>
Future {
callFrameIdLists :+= callFrames.map(_.callFrameId)
val tester = testerQueue.dequeue()
try {
tester(callFrames) match {
case DontAutoResume =>
case _ => getHost.resume()
}
if (testerQueue.isEmpty) {
donePromise.trySuccess(())
}
} catch {
case t: TestFailedException =>
donePromise.tryFailure(t)
case NonFatal(t) =>
val ids = callFrameIdLists.map(_.mkString("[ ", ", ", " ]")).mkString("[ ", ", ", " ]")
val errMsg = s"ERROR '${t.getMessage}' (call frame IDs: $ids), progress = \\n${summarizeProgress()}"
// Gradle shortens any stack trace too much, so suppress the stack trace of the wrapper exception
donePromise.tryFailure(new NoStackRuntimeException(errMsg, t))
}
}
})
donePromise.future.onComplete(_ => eventSubscription.unsubscribe())
observeAndRunScriptAsync(script) { _ => donePromise.future }
}
class Requestor extends Actor {
override def receive: Receive = {
case msg: Messages.Accepted =>
promises.remove(msg.id).foreach(_.trySuccess(()))
case Messages.Response(id, data) =>
promises.remove(id).foreach(_.trySuccess(data))
case Messages.ErrorResponse(id, error) =>
val msg = if (error == null) "<null>" else (if (error == "") "<unknown>" else error)
val ex = new Exception(msg)
promises.remove(id).foreach(_.tryFailure(ex))
case event: Messages.DomainMessage =>
domainEventSubject.onNext(event)
}
}
}
class NoStackRuntimeException(msg: String, cause: Throwable) extends RuntimeException(msg, cause) {
override def fillInStackTrace(): Throwable = this
}
|
provegard/ncdbg
|
src/test/scala/com/programmaticallyspeaking/ncd/e2e/E2ETestFixture.scala
|
Scala
|
bsd-3-clause
| 4,337 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.tensor
import breeze.linalg.{DenseMatrix => BrzDenseMatrix, DenseVector => BrzDenseVector}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Table
import org.apache.spark.mllib.linalg.{DenseMatrix, DenseVector, Matrix, Vector}
import scala.reflect.ClassTag
abstract class QuantizedTensorUnsupported[T: ClassTag] extends Tensor[T] {
val errorString = s"QuantizeTensor doesn't support this operation now"
/**
* Fill with a given value. It will change the value of the current tensor and return itself
*
* @param v value to fill the tensor
* @return current tensor
*/
override def fill(v: T): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with zero. It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def zero(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(normal gaussian distribution).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def randn(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(normal gaussian distribution with the specified mean
* and stdv).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def randn(mean: Double, stdv: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(uniform distribution).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def rand(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(uniform distribution between [lowerBound, upperBound])
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def rand(lowerBound: Double, upperBound: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(bernoulli distribution).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def bernoulli(p: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** *
* Create a new tensor which exchanges the given dimensions of the current tensor
*
* @param dim1 dimension to be exchanged, count from one
* @param dim2 dimension to be exchanged, count from one
* @return new tensor
*/
override def transpose(dim1: Int, dim2: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Shortcut of transpose(1, 2) for 2D tensor
*
* @see transpose()
*/
override def t(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Query tensor on a given index. Tensor should not be empty
*
* @param index count from 1
* @return
*/
override def apply(index: Int): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Query the value on a given index. Tensor should not be empty
*
* @param indexes the indexes length should be same as the tensor dimension length and each
* value count from 1
* @return the value on the given index
*/
override def apply(indexes: Array[Int]): T = throw new UnsupportedOperationException(errorString)
/**
* Query the value on a given position. The number of parameters
* should be equal to the dimension number of the tensor.
* Tensor should not be empty.
*
* @param d1 ,( d2, d3, d4, d5) the given position
* @return the value on a given position
*/
override def valueAt(d1: Int): T = throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int): T = throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int, d3: Int): T =
throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int, d3: Int, d4: Int): T =
throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int, d3: Int, d4: Int, d5: Int): T =
throw new UnsupportedOperationException(errorString)
/**
* Subset the tensor by apply the element of the given table to corresponding dimension of the
* tensor. The element of the given table can be an Int or another Table.
* An Int means select on current dimension; A table means narrow on current dimension,
* the table should has two elements, of which the first is start index and
* the second is the end index. An empty table is equals to Table(1, size_of_current_dimension)
* If the table length is less than the tensor dimension, the missing dimension is applied by
* an empty table
*
* @see select
* @see narrow
* @param t The table length should be less than or equal to the tensor dimensions
* @return
*/
override def apply(t: Table): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* For tensor(i) = value. If tensor(i) is another tensor, it will fill the selected subset by
* the given value
*
* @param index index
* @param value value to write
*/
override def update(index: Int, value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Copy the give tensor value to the select subset of the current tensor by the given index.
* The subset should
* has the same size of the given tensor
*
* @param index index
* @param src tensor to write
*/
override def update(index: Int, src: Tensor[T]): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Write the value to the value indexed by the given index array
*
* @param indexes index array. It should has same length with the tensor dimension
* @param value value to write
*/
override def update(indexes: Array[Int], value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Write the value on a given position. The number of parameters
* should be equal to the dimension number of the tensor.
*
* @param d1 ,( d2, d3, d4, d5) the given position
* @param value the written value
* @return
*/
override def setValue(d1: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, d3: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, d3: Int, d4: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, d3: Int, d4: Int, d5: Int,
value: T): this.type = throw new UnsupportedOperationException(errorString)
/**
* Fill the select subset of the current tensor with the given value.
* The element of the given table can be an Int or another Table. An Int means select on current
* dimension; A tablemeans narrow on current dimension, the table should has two elements,
* of which the first is start index and the second is the end index. An empty table is equals
* to Table(1, size_of_current_dimension) If the table length is less than the tensor dimension,
* the missing dimension is applied by an empty table
*
* @param t subset table
* @param value value to write
*/
override def update(t: Table, value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Copy the given tensor value to the select subset of the current tensor
* The element of the given table can be an Int or another Table. An Int means select on current
* dimension; A table means narrow on current dimension, the table should has two elements,
* of which the first is start index and the second is the end index. An empty table is equals
* to Table(1, size_of_current_dimension) If the table length is less than the tensor dimension,
* the missing dimension is applied by an empty table
*
* @param t subset table
* @param src tensor to copy
*/
override def update(t: Table, src: Tensor[T]): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Update the value meeting the filter criteria with the give value
*
* @param filter filter
* @param value value to update
*/
override def update(filter: (T) => Boolean, value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Check if the tensor is contiguous on the storage
*
* @return true if it's contiguous
*/
override def isContiguous(): Boolean = throw new UnsupportedOperationException(errorString)
/**
* Get a contiguous tensor from current tensor
*
* @return the current tensor if it's contiguous; or a new contiguous tensor with separated
* storage
*/
override def contiguous(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Check if the size is same with the give tensor
*
* @param other tensor to be compared
* @return true if they have same size
*/
override def isSameSizeAs(other: Tensor[_]): Boolean =
throw new UnsupportedOperationException(errorString)
/**
* Resize the current tensor to the same size of the given tensor. It will still use the same
* storage if the storage
* is sufficient for the new size
*
* @param src target tensor
* @return current tensor
*/
override def resizeAs(src: Tensor[_]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Remove the dim-th dimension and return the subset part. For instance
* tensor =
* 1 2 3
* 4 5 6
* tensor.select(1, 1) is [1 2 3]
* tensor.select(1, 2) is [4 5 6]
* tensor.select(2, 3) is [3 6]
*
* @param dim
* @param index
* @return
*/
override def select(dim: Int, index: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Get the storage
*
* @return storage
*/
override def storage(): Storage[T] =
throw new UnsupportedOperationException(errorString)
/**
* tensor offset on the storage
*
* @return storage offset, count from 1
*/
override def storageOffset(): Int =
throw new UnsupportedOperationException(errorString)
/**
* The Tensor is now going to "view" the given storage, starting at position storageOffset (>=1)
* with the given dimension sizes and the optional given strides. As the result, any
* modification in the elements of the Storage will have an impact on the elements of the Tensor,
* and vice-versa. This is an efficient method, as there is no memory copy!
*
* If only storage is provided, the whole storage will be viewed as a 1D Tensor.
*
* @param storage
* @param storageOffset
* @param sizes
* @param strides
* @return current tensor
*/
override def set(storage: Storage[T], storageOffset: Int, sizes: Array[Int],
strides: Array[Int]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Get a subset of the tensor on dim-th dimension. The offset is given by index, and length is
* give by size. The important difference with select is that it will not reduce the dimension
* number. For Instance
* tensor =
* 1 2 3
* 4 5 6
* tensor.narrow(1, 1, 1) is [1 2 3]
* tensor.narrow(2, 2, 2) is
* 2 3
* 5 6
*
* @param dim
* @param index
* @param size
* @return
*/
override def narrow(dim: Int, index: Int, size: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Apply a function to each element of the tensor and modified it value if it return a double
*
* @param func applied function
* @return current tensor
*/
override def apply1(func: (T) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Map value of another tensor to corresponding value of current tensor and apply function on
* the two value and change the value of the current tensor
* The another tensor should has the same size of the current tensor
*
* @param other another tensor
* @param func applied function
* @return current tensor
*/
override def map(other: Tensor[T], func: (T, T) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Removes all singleton dimensions of the tensor
*
* @return current tensor
*/
override def squeeze(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Removes given dimensions of the tensor if it's singleton
*
* @return current tensor
*/
override def squeeze(dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Create a new tensor that removes all singleton dimensions of the tensor
*
* @return create a new tensor
*/
override def squeezeNewTensor(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def view(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
*
* Returns a tensor which contains all slices of size @param size
* in the dimension @param dim. Step between two slices is given by @param step.
*
* @param dim
* @param size
* @param step Step between two slices
* @return new tensor
*/
override def unfold(dim: Int, size: Int, step: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Repeating a tensor allocates new memory, unless result is provided, in which case its memory
* is resized. sizes specify the number of times the tensor is repeated in each dimension.
*
* @param sizes
* @return
*/
override def repeatTensor(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* This is equivalent to this.expand(template.size())
*
* @param template the given tensor
* @return
*/
override def expandAs(template: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Expanding a tensor allocates new memory, tensor where singleton dimensions can be expanded
* to multiple ones by setting the stride to 0. Any dimension that has size 1 can be expanded
* to arbitrary value with new memory allocation. Attempting to expand along a dimension that
* does not have size 1 will result in an error.
*
* @param sizes the size that tensor will expend to
* @return
*/
override def expand(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Splits current tensor along dimension dim into a result table of Tensors of size size
* (a number) or less (in the case of the last Tensor). The sizes of the non-dim dimensions
* remain unchanged. Internally, a series of narrows are performed along dimensions dim.
* Argument dim defaults to 1.
*
* @param size
* @param dim
* @return
*/
override def split(size: Int, dim: Int): Array[Tensor[T]] =
throw new UnsupportedOperationException(errorString)
/**
* spilt one tensor into multi tensor along the `dim` dimension
*
* @param dim the specific dimension
* @return
*/
override def split(dim: Int): Array[Tensor[T]] =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to BreezeVector, the dimension of the tensor need to be 1.
*
* @return BrzDenseVector
*/
override def toBreezeVector(): BrzDenseVector[T] =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to MLlibVector, the dimension of the
* tensor need to be 1, and tensor need to be continuous.
*
* @return Vector
*/
override def toMLlibVector(): Vector =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to BreezeMatrix, the dimension of the tensor need to be 2.
*
* @return BrzDenseMatrix
*/
override def toBreezeMatrix(): BrzDenseMatrix[T] =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to MLlibMatrix, the dimension of the
* tensor need to be 2, and tensor need to be continuous.
*
* @return Matrix
*/
override def toMLlibMatrix(): Matrix =
throw new UnsupportedOperationException(errorString)
/**
* return the tensor datatype( DoubleType or FloatType)
*
* @return
*/
override def getType(): TensorDataType =
throw new UnsupportedOperationException(errorString)
/**
* Compare and print differences between two tensors
*
* @param other
* @param count
* @return true if there's difference, vice versa
*/
override def diff(other: Tensor[T], count: Int, reverse: Boolean): Boolean =
throw new UnsupportedOperationException(errorString)
/**
* view this.tensor and add a Singleton Dimension to `dim` dimension
*
* @param t source tensor
* @param dim the specific dimension, default is 1
* @return this
*/
override def addSingletonDimension(t: Tensor[T], dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* create a new tensor without any change of the tensor
*
* @param sizes the size of the new Tensor
* @return
*/
override def reshape(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Save the tensor to given path
*
* @param path
* @param overWrite
* @return
*/
override def save(path: String, overWrite: Boolean): this.type =
throw new UnsupportedOperationException(errorString)
// scalastyle:off methodName
/**
* Add all elements of this with value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def +(s: T): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Add a Tensor to another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def +(t: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* subtract all elements of this with the value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def -(s: T): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Subtract a Tensor from another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def -(t: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def unary_-(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* divide all elements of this with value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def /(s: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Divide a Tensor by another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def /(t: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* multiply all elements of this with value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def *(s: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Multiply a Tensor by another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def *(t: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
// scalastyle:on methodName
/**
* returns the sum of the elements of this
*
* @return
*/
override def sum(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the sum operation over the dimension dim
*
* @param dim
* @return
*/
override def sum(dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sum(x: Tensor[T], dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
def prod(): T =
throw new UnsupportedOperationException(errorString)
def prod(x: Tensor[T], dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the mean of all elements of this.
*
* @return
*/
override def mean(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the mean operation over the dimension dim.
*
* @param dim
* @return
*/
override def mean(dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the single biggest element of x
*
* @return
*/
override def max(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the max operation over the dimension n
*
* @param dim
* @return
*/
override def max(dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* performs the max operation over the dimension n
*
* @param values
* @param indices
* @param dim
* @return
*/
override def max(values: Tensor[T], indices: Tensor[T], dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* returns the single minimum element of x
*
* @return
*/
override def min(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the min operation over the dimension n
*
* @param dim
* @return
*/
override def min(dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* performs the min operation over the dimension n
*
* @param values
* @param indices
* @param dim
* @return
*/
override def min(values: Tensor[T], indices: Tensor[T], dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* Writes all values from tensor src into this tensor at the specified indices
*
* @param dim
* @param index
* @param src
* @return this
*/
override def scatter(dim: Int, index: Tensor[T], src: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* change this tensor with values from the original tensor by gathering a number of values
* from each "row", where the rows are along the dimension dim.
*
* @param dim
* @param index
* @param src
* @return this
*/
override def gather(dim: Int, index: Tensor[T], src: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* This function computes 2 dimensional convolution of a single image
* with a single kernel (2D output). the dimensions of input and kernel
* need to be 2, and Input image needs to be bigger than kernel. The
* last argument controls if the convolution is a full ('F') or valid
* ('V') convolution. The default is valid convolution.
*
* @param kernel
* @param vf full ('F') or valid ('V') convolution.
* @return
*/
override def conv2(kernel: Tensor[T], vf: Char): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* This function operates with same options and input/output configurations as conv2,
* but performs cross-correlation of the input with the kernel k.
*
* @param kernel
* @param vf full ('F') or valid ('V') convolution.
* @return
*/
override def xcorr2(kernel: Tensor[T], vf: Char): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* replaces all elements in-place with the square root of the elements of this.
*
* @return
*/
override def sqrt(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* replaces all elements in-place with the absolute values of the elements of this.
*
* @return
*/
override def abs(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* x.add(value,y) multiply-accumulates values of y into x.
*
* @param value scalar
* @param y other tensor
* @return current tensor
*/
override def add(value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* accumulates all elements of y into this
*
* @param y other tensor
* @return current tensor
*/
override def add(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* z.add(x, value, y) puts the result of x + value * y in z.
*
* @param x
* @param value
* @param y
* @return
*/
override def add(x: Tensor[T], value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* x.add(value) : add value to all elements of x in place.
*
* @param value
* @return
*/
override def add(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def add(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the dot product. The number of elements must match: both Tensors are seen as a 1D
* vector.
*
* @param y
* @return
*/
override def dot(y: Tensor[T]): T =
throw new UnsupportedOperationException(errorString)
/**
* For each elements of the tensor, performs the max operation compared with the given value
* vector.
*
* @param value
* @return
*/
override def cmax(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the p-norm distance calculation between two tensors
*
* @param y the secode Tensor
* @param norm the norm of distance
* @return
*/
override def dist(y: Tensor[T], norm: Int): T =
throw new UnsupportedOperationException(errorString)
/**
* Performs the element-wise multiplication of tensor1 by tensor2, multiply the result by the
* scalar value (1 if not present) and add it to x. The number of elements must match, but sizes
* do not matter.
*
* @param value
* @param tensor1
* @param tensor2
*/
override def addcmul(value: T, tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def addcmul(tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the element-wise division of tensor1 by tensor2, multiply the result by the scalar
* value and add it to x.
* The number of elements must match, but sizes do not matter.
*
* @param value
* @param tensor1
* @param tensor2
* @return
*/
override def addcdiv(value: T, tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(x: Tensor[T], value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* subtracts all elements of y from this
*
* @param y other tensor
* @return current tensor
*/
override def sub(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise multiply
* x.cmul(y) multiplies all elements of x with corresponding elements of y.
* x = x * y
*
* @param y tensor
* @return current tensor
*/
override def cmul(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise multiply
* z.cmul(x, y) equals z = x * y
*
* @param x tensor
* @param y tensor
* @return current tensor
*/
override def cmul(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise divide
* x.cdiv(y) all elements of x divide all elements of y.
* x = x / y
*
* @param y tensor
* @return current tensor
*/
override def cdiv(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise divide
* z.cdiv(x, y) means z = x / y
*
* @param x tensor
* @param y tensor
* @return current tensor
*/
override def cdiv(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* multiply all elements of this with value in-place.
*
* @param value
* @return
*/
override def mul(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* divide all elements of this with value in-place.
*
* @param value
* @return
*/
override def div(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* put the result of x * value in current tensor
*
* @param value
* @return
*/
override def mul(x: Tensor[T], value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs a matrix-matrix multiplication between mat1 (2D tensor) and mat2 (2D tensor).
* Optional values v1 and v2 are scalars that multiply M and mat1 * mat2 respectively.
* Optional value beta is a scalar that scales the result tensor, before accumulating the result
* into the tensor. Defaults to 1.0.
* If mat1 is a n x m matrix, mat2 a m x p matrix, M must be a n x p matrix.
*
* res = (v1 * M) + (v2 * mat1*mat2)
*
* @param v1
* @param M
* @param v2
* @param mat1
* @param mat2
*/
override def addmm(v1: T, M: Tensor[T], v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = M + (mat1*mat2) */
override def addmm(M: Tensor[T], mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + mat1 * mat2 */
override def addmm(mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + v2 * mat1 * mat2 */
override def addmm(v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = v1 * res + v2 * mat1*mat2 */
override def addmm(v1: T, v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = mat1*mat2 */
override def mm(mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the outer-product between vec1 (1D tensor) and vec2 (1D tensor).
* Optional values v1 and v2 are scalars that multiply mat and vec1 [out] vec2 respectively.
* In other words,
* res_ij = (v1 * mat_ij) + (v2 * vec1_i * vec2_j)
*
* @param t1
* @param t2
* @return
*/
override def addr(t1: Tensor[T], t2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def addr(v1: T, t1: Tensor[T], t2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def addr(v1: T, t1: Tensor[T], v2: T, t2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the outer-product between vec1 (1D Tensor) and vec2 (1D Tensor).
* Optional values v1 and v2 are scalars that multiply mat and vec1 [out] vec2 respectively.
* In other words,res_ij = (v1 * mat_ij) + (v2 * vec1_i * vec2_j)
*
* @param v1
* @param t1
* @param v2
* @param t2
* @param t3
* @return
*/
override def addr(v1: T, t1: Tensor[T], v2: T, t2: Tensor[T], t3: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* return pseudo-random numbers, require 0<=args.length<=2
* if args.length = 0, return [0, 1)
* if args.length = 1, return [1, args(0)] or [args(0), 1]
* if args.length = 2, return [args(0), args(1)]
*
* @param args
*/
override def uniform(args: T*): T =
throw new UnsupportedOperationException(errorString)
/**
* Performs a matrix-vector multiplication between mat (2D Tensor) and vec2 (1D Tensor) and add
* it to vec1. Optional values v1 and v2 are scalars that multiply vec1 and vec2 respectively.
*
* In other words,
* res = (beta * vec1) + alpha * (mat * vec2)
*
* Sizes must respect the matrix-multiplication operation: if mat is a n Γ m matrix,
* vec2 must be vector of size m and vec1 must be a vector of size n.
*/
override def addmv(beta: T, vec1: Tensor[T], alpha: T, mat: Tensor[T],
vec2: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/** res = beta * res + alpha * (mat * vec2) */
override def addmv(beta: T, alpha: T, mat: Tensor[T], vec2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + alpha * (mat * vec2) */
override def addmv(alpha: T, mat: Tensor[T], vec2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + (mat * vec2) */
override def mv(mat: Tensor[T], vec2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Perform a batch matrix matrix multiplication of matrices and stored in batch1 and batch2
* with batch add. batch1 and batch2 must be 3D Tensors each containing the same number of
* matrices. If batch1 is a b Γ n Γ m Tensor, batch2 a b Γ m Γ p Tensor, res will be a
* b Γ n Γ p Tensor.
*
* In other words,
* res_i = (beta * M_i) + (alpha * batch1_i * batch2_i)
*/
override def baddbmm(beta: T, M: Tensor[T], alpha: T, batch1: Tensor[T],
batch2: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/** res_i = (beta * res_i) + (alpha * batch1_i * batch2_i) */
override def baddbmm(beta: T, alpha: T, batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res_i = res_i + (alpha * batch1_i * batch2_i) */
override def baddbmm(alpha: T, batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res_i = res_i + batch1_i * batch2_i */
override def bmm(batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Replaces all elements in-place with the elements of x to the power of n
*
* @param y
* @param n
* @return current tensor reference
*/
override def pow(y: Tensor[T], n: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def pow(n: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def square(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Get the top k smallest values and their indices.
*
* @param result result buffer
* @param indices indices buffer
* @param k
* @param dim dimension, default is the last dimension
* @param increase sort order, set it to true if you want to get the smallest top k values
* @return
*/
override def topk(k: Int, dim: Int, increase: Boolean, result: Tensor[T],
indices: Tensor[T], sortedResult: Boolean = true): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* Replaces all elements in-place with the elements of lnx
*
* @param y
* @return current tensor reference
*/
override def log(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def exp(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sqrt(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def log1p(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def log(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def exp(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def log1p(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def abs(x: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the p-norms of the Tensor x computed over the dimension dim.
*
* @param y result buffer
* @param value
* @param dim
* @return
*/
override def norm(y: Tensor[T], value: Int, dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements > operator comparing each element in x with y
*
* @param x
* @param y
* @return current tensor reference
*/
override def gt(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements < operator comparing each element in x with y
*
* @param x
* @param y
* @return current tensor reference
*/
override def lt(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements <= operator comparing each element in x with y
*
* @param x
* @param y
* @return current tensor reference
*/
override def le(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements == operator comparing each element in x with y
*
* @param y
* @return current tensor reference
*/
override def eq(x: Tensor[T], y: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Fills the masked elements of itself with value val
*
* @param mask
* @param e
* @return current tensor reference
*/
override def maskedFill(mask: Tensor[T], e: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Copies the elements of tensor into mask locations of itself.
*
* @param mask
* @param y
* @return current tensor reference
*/
override def maskedCopy(mask: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Returns a new Tensor which contains all elements aligned to a 1 in the corresponding mask.
*
* @param mask
* @param y
* @return current tensor reference
*/
override def maskedSelect(mask: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the sum of the n-norms on the Tensor x
*
* @param value the n-norms
* @return
*/
override def norm(value: Int): T =
throw new UnsupportedOperationException(errorString)
/**
* returns a new Tensor with the sign (+/- 1 or 0) of the elements of x.
*
* @return
*/
override def sign(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements >= operator comparing each element in x with value
*
* @param x
* @param value
* @return
*/
override def ge(x: Tensor[T], value: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Accumulate the elements of tensor into the original tensor by adding to the indices
* in the order given in index. The shape of tensor must exactly match the elements indexed
* or an error will be thrown.
*
* @param dim
* @param index
* @param y
* @return
*/
override def indexAdd(dim: Int, index: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Accumulate the elements of tensor into the original tensor by adding to the indices
* in the order given in index. The shape of tensor must exactly match the elements indexed
* or an error will be thrown.
*
* @param dim
* @param index
* @param y
* @return
*/
override def index(dim: Int, index: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in x.
* x.cmax(y) = max(x, y)
*
* @param y tensor
* @return current tensor
*/
override def cmax(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in x.
* x.cmin(y) = min(x, y)
*
* @param y tensor
* @return current tensor
*/
override def cmin(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in z.
* z.cmax(x, y) means z = max(x, y)
*
* @param x tensor
* @param y tensor
*/
override def cmax(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in z.
* z.cmin(x, y) means z = min(x, y)
*
* @param x tensor
* @param y tensor
*/
override def cmin(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* resize this tensor size to floor((xmax - xmin) / step) + 1 and set values from
* xmin to xmax with step (default to 1).
*
* @param xmin
* @param xmax
* @param step
* @return this tensor
*/
override def range(xmin: Double, xmax: Double, step: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def toTensor[D](implicit ev: TensorNumeric[D]): Tensor[D] =
throw new UnsupportedOperationException(errorString)
override def tanh(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def tanh(y: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def resize(sizes: Array[Int], strides: Array[Int]): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int): this.type = throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int, size3: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int, size3: Int, size4: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int, size3: Int, size4: Int, size5: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def isEmpty: Boolean =
throw new UnsupportedOperationException(errorString)
override def isScalar: Boolean =
throw new UnsupportedOperationException(errorString)
override def value(): T =
throw new UnsupportedOperationException(errorString)
override def setValue(value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def zipWith[A: ClassTag, B: ClassTag](t1: Tensor[A], t2: Tensor[B],
func: (A, B) => T): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def forceFill(v: Any): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def emptyInstance(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def applyFun[A: ClassTag](t: Tensor[A], func: (A) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def cast[D: ClassTag](castTensor: Tensor[D])(implicit ev: TensorNumeric[D]): Tensor[D] =
throw new UnsupportedOperationException(errorString)
override def div(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def floor(y: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def floor(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def ceil(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def negative(x: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def inv(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def reduce(dim: Int, result: Tensor[T], reducer: (T, T) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def toArray(): Array[T] =
throw new UnsupportedOperationException(errorString)
override def erf(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def erfc(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def logGamma(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def digamma(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def clamp(minValue: Float, maxValue: Float): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sumSquare(): T =
throw new UnsupportedOperationException(errorString)
}
|
qiuxin2012/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/tensor/QuantizedTensorUnsupported.scala
|
Scala
|
apache-2.0
| 46,702 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.robolectric.shadows
import libcore.icu.{Transliterator => LibcoreTransliterator}
import com.ibm.icu.text.{Transliterator => ICU4JTransliterator}
import org.robolectric.annotation.{Implementation, Implements}
@Implements(classOf[LibcoreTransliterator]) class ShadowTransliterator {
private var delegate: ICU4JTransliterator = _
def __constructor__(id: String): Unit = delegate = ICU4JTransliterator.getInstance(id)
@Implementation def transliterate(it: String): String = delegate transliterate it
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/test/scala/org/robolectric/shadows/ShadowTransliterator.scala
|
Scala
|
gpl-3.0
| 1,220 |
package net.elodina.mesos.exhibitor
import java.util
import java.util.concurrent.TimeUnit
import java.util.{Collections, Date}
import net.elodina.mesos.util.Repr
import com.google.protobuf.ByteString
import net.elodina.mesos.exhibitor.Util.Str
import net.elodina.mesos.exhibitor.exhibitorapi._
import org.apache.log4j._
import org.apache.mesos.Protos._
import org.apache.mesos.{MesosSchedulerDriver, SchedulerDriver}
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
object Scheduler extends org.apache.mesos.Scheduler {
private val logger = Logger.getLogger(this.getClass)
private val ensembleLock = new Object()
private[exhibitor] val cluster = Cluster()
private var driver: SchedulerDriver = null
def start() {
initLogging()
logger.info(s"Starting ${getClass.getSimpleName}:\\n$Config")
cluster.load()
HttpServer.start()
val frameworkBuilder = FrameworkInfo.newBuilder()
frameworkBuilder.setUser(Config.user)
cluster.frameworkId.foreach(id => frameworkBuilder.setId(FrameworkID.newBuilder().setValue(id)))
frameworkBuilder.setName(Config.frameworkName)
frameworkBuilder.setFailoverTimeout(Config.frameworkTimeout.toUnit(TimeUnit.SECONDS))
frameworkBuilder.setRole(Config.frameworkRole)
frameworkBuilder.setCheckpoint(true)
val creds = for {
principal <- Config.principal
secret <- Config.secret
} yield (principal, secret)
val driver = creds.map { case (principal, secret) =>
frameworkBuilder.setPrincipal(principal)
new MesosSchedulerDriver(this, frameworkBuilder.build, Config.master, Credential.newBuilder()
.setPrincipal(principal)
.setSecret(ByteString.copyFromUtf8(secret))
.build())
}.getOrElse(new MesosSchedulerDriver(this, frameworkBuilder.build, Config.master))
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
HttpServer.stop()
}
})
val status = if (driver.run eq Status.DRIVER_STOPPED) 0 else 1
sys.exit(status)
}
override def registered(driver: SchedulerDriver, id: FrameworkID, master: MasterInfo) {
logger.info("[registered] framework:" + Str.id(id.getValue) + " master:" + Str.master(master))
cluster.frameworkId = Some(id.getValue)
cluster.save()
this.driver = driver
reconcileTasks(force = true)
}
override def offerRescinded(driver: SchedulerDriver, id: OfferID) {
logger.info("[offerRescinded] " + Str.id(id.getValue))
}
override def disconnected(driver: SchedulerDriver) {
logger.info("[disconnected]")
this.driver = null
}
override def reregistered(driver: SchedulerDriver, master: MasterInfo) {
logger.info("[reregistered] master:" + Str.master(master))
this.driver = driver
reconcileTasks(force = true)
}
override def slaveLost(driver: SchedulerDriver, id: SlaveID) {
logger.info("[slaveLost] " + Str.id(id.getValue))
}
override def error(driver: SchedulerDriver, message: String) {
logger.info("[error] " + message)
}
override def statusUpdate(driver: SchedulerDriver, status: TaskStatus) {
logger.info("[statusUpdate] " + Str.taskStatus(status))
onServerStatus(driver, status)
}
override def frameworkMessage(driver: SchedulerDriver, executorId: ExecutorID, slaveId: SlaveID, data: Array[Byte]) {
logger.info("[frameworkMessage] executor:" + Str.id(executorId.getValue) + " slave:" + Str.id(slaveId.getValue) + " data: " + new String(data))
}
override def resourceOffers(driver: SchedulerDriver, offers: util.List[Offer]) {
logger.debug("[resourceOffers]\\n" + Str.offers(offers))
onResourceOffers(offers.toList)
}
override def executorLost(driver: SchedulerDriver, executorId: ExecutorID, slaveId: SlaveID, status: Int) {
logger.info("[executorLost] executor:" + Str.id(executorId.getValue) + " slave:" + Str.id(slaveId.getValue) + " status:" + status)
}
private def onResourceOffers(offers: List[Offer]) {
offers.foreach { offer =>
acceptOffer(offer).foreach { declineReason =>
driver.declineOffer(offer.getId)
logger.info(s"Declined offer:\\n $declineReason")
}
}
reconcileTasks()
Scheduler.cluster.save()
}
private[exhibitor] def acceptOffer(offer: Offer): Option[String] = {
val ensemble = cluster.defaultEnsemble()
ensemble.servers(Exhibitor.Stopped).filterNot(e => e.failover.isWaitingDelay(new Date())) match {
case Nil => Some("all servers are running")
case servers =>
val reason = servers.flatMap { server =>
Reservation.reserve(offer, ensemble, server) match {
case Right(reservation) =>
server.matches(offer, new Date(), otherTasksAttributes) match {
case Some(declineReason) => Some(s"server ${server.id}: $declineReason")
case None =>
launchTask(reservation, ensemble, server, offer)
return None
}
case Left(declineReason) => Some(declineReason)
}
}.mkString(", ")
if (reason.isEmpty) None else Some(reason)
}
}
private def launchTask(reservation: Reservation, ensemble: Ensemble, server: Exhibitor, offer: Offer) {
ensemble.updatePorts(reservation)
val task = server.createTask(offer, reservation)
val taskId = task.getTaskId.getValue
val attributes = offer.getAttributesList.toList.filter(_.hasText).map(attr => attr.getName -> attr.getText.getValue).toMap
server.task = Exhibitor.Task(taskId, task.getSlaveId.getValue, task.getExecutor.getExecutorId.getValue, attributes, offer.getHostname)
server.state = Exhibitor.Staging
driver.launchTasks(util.Arrays.asList(offer.getId), util.Arrays.asList(task), Filters.newBuilder().setRefuseSeconds(1).build)
logger.info(s"Starting server ${server.id}: launching task $taskId for offer ${offer.getId.getValue}")
}
private def onServerStatus(driver: SchedulerDriver, status: TaskStatus) {
val server = cluster.defaultEnsemble().getServer(Exhibitor.idFromTaskId(status.getTaskId.getValue))
status.getState match {
case TaskState.TASK_RUNNING =>
new Thread {
override def run() {
onServerStarted(server, driver, status)
}
}.start()
case TaskState.TASK_LOST | TaskState.TASK_FAILED | TaskState.TASK_ERROR =>
onServerFailed(server, status)
case TaskState.TASK_FINISHED | TaskState.TASK_KILLED =>
onServerFinished(server, status)
case _ => logger.warn("Got unexpected task state: " + status.getState)
}
Scheduler.cluster.save()
}
private def onServerStarted(serverOpt: Option[Exhibitor], driver: SchedulerDriver, status: TaskStatus) {
serverOpt match {
case Some(server) =>
this.synchronized {
if (server.state != Exhibitor.Running) {
server.state = Exhibitor.Running
server.registerStart(server.task.hostname)
server.config.exhibitorHostname = status.getData.toStringUtf8
addToEnsemble(server).onFailure { case t =>
logger.info(s"Failed to add server ${server.id} to ensemble, force fail")
if (server.task != null) {
driver.sendFrameworkMessage(
ExecutorID.newBuilder().setValue(server.task.executorId).build(),
SlaveID.newBuilder().setValue(server.task.slaveId).build(),
"fail".getBytes)
}
}
}
}
case None =>
logger.info(s"Got ${status.getState} for unknown/stopped server, killing task ${status.getTaskId}")
driver.killTask(status.getTaskId)
}
}
private def onServerFailed(serverOpt: Option[Exhibitor], status: TaskStatus) {
serverOpt match {
case Some(server) =>
server.task = null
server.config.hostname = ""
server.config.exhibitorHostname = ""
server.registerStop(new Date(), failed = true)
if (server.failover.isMaxTriesExceeded) {
server.state = Exhibitor.Added
} else {
server.state = Exhibitor.Stopped
}
logFailureStatus(server)
case None => logger.info(s"Got ${status.getState} for unknown/stopped server with task ${status.getTaskId}")
}
}
private def logFailureStatus(server: Exhibitor): Unit = {
var msg = s"Server ${server.id} failed ${server.failover.failures}"
server.failover.maxTries.foreach {
tries => msg += "/" + tries
}
if (!server.failover.isMaxTriesExceeded) {
msg += ", waiting " + server.failover.currentDelay
msg += ", next start ~ " + Repr.dateTime(server.failover.delayExpires)
} else {
msg += ", failure limit exceeded"
msg += ", deactivating server"
}
logger.info(msg)
}
private def onServerFinished(serverOpt: Option[Exhibitor], status: TaskStatus) {
serverOpt match {
case Some(server) =>
server.state = Exhibitor.Added
server.task = null
server.config.hostname = ""
server.config.exhibitorHostname = ""
server.registerStop(new Date(), failed = false)
logger.info(s"Task ${status.getTaskId.getValue} has finished")
case None => logger.info(s"Got ${status.getState} for unknown/stopped server with task ${status.getTaskId}")
}
}
private[exhibitor] def stopServer(id: String): Option[Exhibitor] = {
cluster.defaultEnsemble().getServer(id).map { server =>
if (server.state == Exhibitor.Staging || server.state == Exhibitor.Running)
driver.killTask(TaskID.newBuilder().setValue(server.task.id).build())
server.state = Exhibitor.Added
server.failover.resetFailures()
server
}
}
private[exhibitor] def removeServer(id: String): Option[Exhibitor] = {
cluster.defaultEnsemble().getServer(id).map { server =>
stopServer(id)
cluster.defaultEnsemble().removeServer(server)
removeFromEnsemble(server).onFailure { case t =>
logger.info(s"Failed to remove server ${server.id} from ensemble")
}
server
}
}
private def addToEnsemble(server: Exhibitor): Future[Unit] = {
def tryAddToEnsemble(retriesLeft: Int) {
getSharedConfig(server) match {
case (Some(sharedConfig), None) =>
trySaveSharedConfig(sharedConfig, retriesLeft)
case (Some(sharedConfig), Some(failureMessage)) =>
if (retriesLeft > 0) {
logger.debug(s"$failureMessage: retrying...")
Thread.sleep(Config.ensembleModifyBackoff)
tryAddToEnsemble(retriesLeft - 1)
} else {
logger.info(s"Failed to get non-default Exhibitor Shared Configuration after ${Config.ensembleModifyRetries} retries. Using default.")
trySaveSharedConfig(sharedConfig, retriesLeft)
}
case (None, Some(failureMessage)) =>
if (retriesLeft > 0) {
logger.debug(s"$failureMessage: retrying...")
Thread.sleep(Config.ensembleModifyBackoff)
tryAddToEnsemble(retriesLeft - 1)
} else throw new IllegalStateException(failureMessage)
case (None, None) => throw new IllegalStateException("Received unexpected getSharedConfig() state, probably a bug")
}
}
def trySaveSharedConfig(sharedConfig: SharedConfig, retriesLeft: Int) {
val updatedSharedConfig = server.config.sharedConfigOverride.foldLeft(sharedConfig) { case (conf, (key, value)) =>
key match {
case ConfigNames.ZOOKEEPER_INSTALL_DIRECTORY => conf.copy(zookeeperInstallDirectory = value)
case ConfigNames.ZOOKEEPER_DATA_DIRECTORY => conf.copy(zookeeperDataDirectory = value)
case ConfigNames.ZOOKEEPER_LOG_DIRECTORY => conf.copy(zookeeperLogDirectory = value)
case ConfigNames.LOG_INDEX_DIRECTORY => conf.copy(logIndexDirectory = value)
case ConfigNames.CLIENT_PORT => conf.copy(ports = conf.ports.copy(client = value.toInt))
case ConfigNames.CONNECT_PORT => conf.copy(ports = conf.ports.copy(connect = value.toInt))
case ConfigNames.ELECTION_PORT => conf.copy(ports = conf.ports.copy(election = value.toInt))
case invalid => throw new IllegalArgumentException(s"Unacceptable shared configuration parameter: $invalid")
}
}
val updatedServersSpec = (s"S:${server.config.id}:${server.config.exhibitorHostname}" :: updatedSharedConfig.serversSpec.split(",").foldLeft(List[String]()) { (servers, srv) =>
srv.split(":") match {
// ignore duplicate ids or unknown instances
case Array(_, id, _) if id == server.id || !cluster.defaultEnsemble().contains(id) => servers
case Array(_, _, _) => srv :: servers
case _ => servers
}
}).sorted.mkString(",")
Try(ExhibitorAPIClient.setConfig(updatedSharedConfig.copy(serversSpec = updatedServersSpec), server.url)) match {
case Success(_) => logger.info(s"Successfully added server ${server.id} to ensemble")
case Failure(e) =>
logger.debug(s"Failed to save Exhibitor Shared Configuration: ${e.getMessage}")
if (retriesLeft > 0) {
logger.debug("Retrying...")
Thread.sleep(Config.ensembleModifyBackoff)
trySaveSharedConfig(sharedConfig, retriesLeft - 1)
} else throw e
}
}
Future {
ensembleLock.synchronized {
logger.info(s"Adding server ${server.id} to ensemble")
tryAddToEnsemble(Config.ensembleModifyRetries)
}
}
}
private def removeFromEnsemble(server: Exhibitor): Future[Unit] = {
def tryRemoveFromEnsemble(aliveServer: Exhibitor, retriesLeft: Int) {
getSharedConfig(server) match {
case (Some(sharedConfig), None) =>
trySaveSharedConfig(sharedConfig, aliveServer, retriesLeft)
case (Some(sharedConfig), Some(failureMessage)) =>
if (retriesLeft > 0) {
logger.debug(s"$failureMessage: retrying...")
Thread.sleep(Config.ensembleModifyBackoff)
tryRemoveFromEnsemble(aliveServer, retriesLeft - 1)
} else {
logger.info(s"Failed to get non-default Exhibitor Shared Configuration after ${Config.ensembleModifyRetries} retries. Using default.")
trySaveSharedConfig(sharedConfig, aliveServer, retriesLeft)
}
case (None, Some(failureMessage)) =>
if (retriesLeft > 0) {
logger.debug(s"$failureMessage: retrying...")
Thread.sleep(Config.ensembleModifyBackoff)
tryRemoveFromEnsemble(aliveServer, retriesLeft - 1)
} else throw new IllegalStateException(failureMessage)
case (None, None) => throw new IllegalStateException("Received unexpected getSharedConfig() state, probably a bug")
}
}
def trySaveSharedConfig(sharedConfig: SharedConfig, aliveServer: Exhibitor, retriesLeft: Int) {
val updatedServersSpec = sharedConfig.serversSpec.split(",").foldLeft(List[String]()) { (servers, srv) =>
srv.split(":") match {
case Array(_, _, serverHost) if serverHost == server.config.exhibitorHostname => servers
case Array(_, _, serverHost) => srv :: servers
case _ => servers
}
}.sorted.mkString(",")
Try(ExhibitorAPIClient.setConfig(sharedConfig.copy(serversSpec = updatedServersSpec), aliveServer.url)) match {
case Success(_) => logger.info(s"Successfully removed server ${server.id} from ensemble")
case Failure(e) =>
logger.debug(s"Failed to save Exhibitor Shared Configuration: ${e.getMessage}")
if (retriesLeft > 0) {
logger.debug("Retrying...")
Thread.sleep(Config.ensembleModifyBackoff)
trySaveSharedConfig(sharedConfig, aliveServer, retriesLeft - 1)
} else throw e
}
}
Future {
ensembleLock.synchronized {
cluster.defaultEnsemble().findWithState(Exhibitor.Running) match {
case Some(aliveServer) => tryRemoveFromEnsemble(aliveServer, Config.ensembleModifyRetries)
case None => logger.info(s"Server ${server.id} was the last alive in the cluster, no need to deregister it from ensemble.")
}
}
}
}
private def getSharedConfig(server: Exhibitor): (Option[SharedConfig], Option[String]) = {
Try(ExhibitorAPIClient.getSystemState(server.url)) match {
case Success(cfg) =>
if (cfg.zookeeperInstallDirectory != "") Some(cfg) -> None
else Some(cfg) -> Some("Failed to get non-default Exhibitor Shared Configuration")
case Failure(e) =>
None -> Some("Exhibitor API not available.")
}
}
private[exhibitor] def otherTasksAttributes(name: String): List[String] = {
def value(server: Exhibitor, name: String): Option[String] = {
if (name == "hostname") Option(server.config.hostname)
else server.task.attributes.get(name)
}
cluster.defaultEnsemble().runningServers().flatMap(value(_, name))
}
private[exhibitor] val RECONCILE_DELAY = 10 seconds
private[exhibitor] val RECONCILE_MAX_TRIES = 3
private[exhibitor] var reconciles = 0
private[exhibitor] var reconcileTime = new Date(0)
private[exhibitor] def reconcileTasks(force: Boolean = false, now: Date = new Date()) {
if (now.getTime - reconcileTime.getTime >= RECONCILE_DELAY.toMillis) {
if (!cluster.defaultEnsemble().isReconciling) reconciles = 0
reconciles += 1
reconcileTime = now
cluster.defaultEnsemble().servers().foreach(s => if (s.task == null && s.state == Exhibitor.Running) s.state = Exhibitor.Stopped)
if (reconciles > RECONCILE_MAX_TRIES) {
cluster.defaultEnsemble().servers().filter(s => s.isReconciling && s.task != null).foreach { server =>
logger.info(s"Reconciling exceeded $RECONCILE_MAX_TRIES tries for server ${server.id}, sending killTask for task ${server.task.id}")
driver.killTask(TaskID.newBuilder().setValue(server.task.id).build())
}
} else {
val statuses = cluster.defaultEnsemble().runningServers().flatMap { server =>
if (force || server.isReconciling) {
server.state = Exhibitor.Reconciling
logger.info(s"Reconciling $reconciles/$RECONCILE_MAX_TRIES state of server ${server.id}, task ${server.task.id}")
Some(TaskStatus.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(server.task.id))
.setState(TaskState.TASK_STAGING)
.build)
} else None
}
if (force || statuses.nonEmpty) driver.reconcileTasks(if (force) Collections.emptyList() else statuses)
}
}
}
/**
* Get Exhibitor cluster view for each of the exhibitor-on-mesos servers
*/
def getClusterStatus: ClusterStatus = {
val mesosServerStatuses =
cluster.defaultEnsemble().servers().map { server =>
val clusterViewOpt =
server.state match {
case Exhibitor.Running =>
Try(ExhibitorAPIClient.getClusterStatus(server.url)) match {
case Success(exhibitorClusterStateView) =>
Some(exhibitorClusterStateView)
case Failure(e) =>
logger.error(s"Failed to get exhibitor cluster view for server ${server.id}", e)
None
}
case _ =>
logger.debug(s"Server ${server.id} is in state ${server.id}, only RUNNING servers may request " +
s"exhibitor API to get cluster state")
None
}
ExhibitorMesosStatus(server, clusterViewOpt)
}
ClusterStatus(mesosServerStatuses)
}
private def initLogging() {
System.setProperty("org.eclipse.jetty.util.log.class", classOf[JettyLog4jLogger].getName)
BasicConfigurator.resetConfiguration()
val root = Logger.getRootLogger
root.setLevel(Level.INFO)
Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARN)
Logger.getLogger("org.I0Itec.zkclient").setLevel(Level.WARN)
val logger = Logger.getLogger(Scheduler.getClass)
logger.setLevel(if (Config.debug) Level.DEBUG else Level.INFO)
Logger.getLogger(ExhibitorAPIClient.getClass).setLevel(if (Config.debug) Level.DEBUG else Level.INFO)
val layout = new PatternLayout("%d [%t] %-5p %c %x - %m%n")
val appender: Appender = new ConsoleAppender(layout)
root.addAppender(appender)
}
class JettyLog4jLogger extends org.eclipse.jetty.util.log.Logger {
private var logger: Logger = Logger.getLogger("Jetty")
def this(logger: Logger) {
this()
this.logger = logger
}
def isDebugEnabled: Boolean = logger.isDebugEnabled
def setDebugEnabled(enabled: Boolean) = logger.setLevel(if (enabled) Level.DEBUG else Level.INFO)
def getName: String = logger.getName
def getLogger(name: String): org.eclipse.jetty.util.log.Logger = new JettyLog4jLogger(Logger.getLogger(name))
def info(s: String, args: AnyRef*) = logger.info(format(s, args))
def info(s: String, t: Throwable) = logger.info(s, t)
def info(t: Throwable) = logger.info("", t)
def debug(s: String, args: AnyRef*) = logger.debug(format(s, args))
def debug(s: String, t: Throwable) = logger.debug(s, t)
def debug(t: Throwable) = logger.debug("", t)
def warn(s: String, args: AnyRef*) = logger.warn(format(s, args))
def warn(s: String, t: Throwable) = logger.warn(s, t)
def warn(s: String) = logger.warn(s)
def warn(t: Throwable) = logger.warn("", t)
def ignore(t: Throwable) = logger.info("Ignored", t)
}
private def format(s: String, args: AnyRef*): String = {
var result: String = ""
var i: Int = 0
for (token <- s.split("\\\\{\\\\}")) {
result += token
if (args.length > i) result += args(i)
i += 1
}
result
}
}
|
elodina/exhibitor-mesos-framework
|
src/main/scala/net/elodina/mesos/exhibitor/Scheduler.scala
|
Scala
|
apache-2.0
| 22,079 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
import kafka.utils.{CoreUtils, IteratorTemplate, Logging}
import kafka.common.{KafkaException, LongRef}
import java.nio.ByteBuffer
import java.nio.channels._
import java.io._
import java.util.ArrayDeque
import kafka.message.ByteBufferMessageSet.FilterResult
import org.apache.kafka.common.errors.InvalidTimestampException
import org.apache.kafka.common.record.{MemoryRecords, TimestampType}
import org.apache.kafka.common.utils.Utils
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
object ByteBufferMessageSet {
private def create(offsetAssigner: OffsetAssigner,
compressionCodec: CompressionCodec,
wrapperMessageTimestamp: Option[Long],
timestampType: TimestampType,
messages: Message*): ByteBuffer = {
if (messages.isEmpty)
MessageSet.Empty.buffer
else if (compressionCodec == NoCompressionCodec) {
val buffer = ByteBuffer.allocate(MessageSet.messageSetSize(messages))
for (message <- messages) writeMessage(buffer, message, offsetAssigner.nextAbsoluteOffset())
buffer.rewind()
buffer
} else {
val magicAndTimestamp = wrapperMessageTimestamp match {
case Some(ts) => MagicAndTimestamp(messages.head.magic, ts)
case None => MessageSet.magicAndLargestTimestamp(messages)
}
val (messageWriter, lastOffset) = writeCompressedMessages(compressionCodec, offsetAssigner, magicAndTimestamp,
timestampType, messages)
val buffer = ByteBuffer.allocate(messageWriter.size + MessageSet.LogOverhead)
writeMessage(buffer, messageWriter, lastOffset)
buffer.rewind()
buffer
}
}
/** Deep iterator that decompresses the message sets and adjusts timestamp and offset if needed. */
def deepIterator(wrapperMessageAndOffset: MessageAndOffset, ensureMatchingMagic: Boolean = false): Iterator[MessageAndOffset] = {
import Message._
new IteratorTemplate[MessageAndOffset] {
val MessageAndOffset(wrapperMessage, wrapperMessageOffset) = wrapperMessageAndOffset
if (wrapperMessage.payload == null)
throw new KafkaException(s"Message payload is null: $wrapperMessage")
val wrapperMessageTimestampOpt: Option[Long] =
if (wrapperMessage.magic > MagicValue_V0) Some(wrapperMessage.timestamp) else None
val wrapperMessageTimestampTypeOpt: Option[TimestampType] =
if (wrapperMessage.magic > MagicValue_V0) Some(wrapperMessage.timestampType) else None
var lastInnerOffset = -1L
val messageAndOffsets = {
val inputStream = new ByteBufferBackedInputStream(wrapperMessage.payload)
val compressed = try {
new DataInputStream(CompressionFactory(wrapperMessage.compressionCodec, wrapperMessage.magic, inputStream))
} catch {
case ioe: IOException =>
throw new InvalidMessageException(s"Failed to instantiate input stream compressed with ${wrapperMessage.compressionCodec}", ioe)
}
val innerMessageAndOffsets = new ArrayDeque[MessageAndOffset]()
try {
while (true)
innerMessageAndOffsets.add(readMessageFromStream(compressed))
} catch {
case _: EOFException =>
// we don't do anything at all here, because the finally
// will close the compressed input stream, and we simply
// want to return the innerMessageAndOffsets
case ioe: IOException =>
throw new InvalidMessageException(s"Error while reading message from stream compressed with ${wrapperMessage.compressionCodec}", ioe)
} finally {
CoreUtils.swallow(compressed.close())
}
innerMessageAndOffsets
}
private def readMessageFromStream(compressed: DataInputStream): MessageAndOffset = {
val innerOffset = compressed.readLong()
val recordSize = compressed.readInt()
if (recordSize < MinMessageOverhead)
throw new InvalidMessageException(s"Message found with corrupt size `$recordSize` in deep iterator")
// read the record into an intermediate record buffer (i.e. extra copy needed)
val bufferArray = new Array[Byte](recordSize)
compressed.readFully(bufferArray, 0, recordSize)
val buffer = ByteBuffer.wrap(bufferArray)
// Override the timestamp if necessary
val newMessage = new Message(buffer, wrapperMessageTimestampOpt, wrapperMessageTimestampTypeOpt)
// Due to KAFKA-4298, it is possible for the inner and outer magic values to differ. We ignore
// this and depend on the outer message in order to decide how to compute the respective offsets
// for the inner messages
if (ensureMatchingMagic && newMessage.magic != wrapperMessage.magic)
throw new InvalidMessageException(s"Compressed message has magic value ${wrapperMessage.magic} " +
s"but inner message has magic value ${newMessage.magic}")
lastInnerOffset = innerOffset
MessageAndOffset(newMessage, innerOffset)
}
override def makeNext(): MessageAndOffset = {
messageAndOffsets.pollFirst() match {
case null => allDone()
case nextMessage@ MessageAndOffset(message, offset) =>
if (wrapperMessage.magic > MagicValue_V0) {
val relativeOffset = offset - lastInnerOffset
val absoluteOffset = wrapperMessageOffset + relativeOffset
MessageAndOffset(message, absoluteOffset)
} else {
nextMessage
}
}
}
}
}
private def writeCompressedMessages(codec: CompressionCodec,
offsetAssigner: OffsetAssigner,
magicAndTimestamp: MagicAndTimestamp,
timestampType: TimestampType,
messages: Seq[Message]): (MessageWriter, Long) = {
require(codec != NoCompressionCodec, s"compressionCodec must not be $NoCompressionCodec")
require(messages.nonEmpty, "cannot write empty compressed message set")
var offset = -1L
val magic = magicAndTimestamp.magic
val messageWriter = new MessageWriter(math.min(math.max(MessageSet.messageSetSize(messages) / 2, 1024), 1 << 16))
messageWriter.write(
codec = codec,
timestamp = magicAndTimestamp.timestamp,
timestampType = timestampType,
magicValue = magic) { outputStream =>
val output = new DataOutputStream(CompressionFactory(codec, magic, outputStream))
try {
for (message <- messages) {
offset = offsetAssigner.nextAbsoluteOffset()
if (message.magic != magicAndTimestamp.magic)
throw new IllegalArgumentException("Messages in the message set must have same magic value")
// Use inner offset if magic value is greater than 0
val innerOffset = if (magicAndTimestamp.magic > Message.MagicValue_V0)
offsetAssigner.toInnerOffset(offset)
else
offset
output.writeLong(innerOffset)
output.writeInt(message.size)
output.write(message.buffer.array, message.buffer.arrayOffset, message.buffer.limit)
}
} finally {
output.close()
}
}
(messageWriter, offset)
}
private[kafka] def writeCompressedMessages(buffer: ByteBuffer,
codec: CompressionCodec,
messageAndOffsets: Seq[MessageAndOffset]): Int = {
require(codec != NoCompressionCodec, s"compressionCodec must not be $NoCompressionCodec")
if (messageAndOffsets.isEmpty)
0
else {
val messages = messageAndOffsets.map(_.message)
val magicAndTimestamp = MessageSet.magicAndLargestTimestamp(messages)
// ensure that we use the magic from the first message in the set when writing the wrapper
// message in order to fix message sets corrupted by KAFKA-4298
val magic = magicAndTimestamp.magic
val firstMessageAndOffset = messageAndOffsets.head
val firstAbsoluteOffset = firstMessageAndOffset.offset
val offsetAssigner = OffsetAssigner(firstAbsoluteOffset, magic, messageAndOffsets)
val timestampType = firstMessageAndOffset.message.timestampType
val (messageWriter, lastOffset) = writeCompressedMessages(codec, offsetAssigner, magicAndTimestamp,
timestampType, messages)
writeMessage(buffer, messageWriter, lastOffset)
messageWriter.size + MessageSet.LogOverhead
}
}
private[kafka] def writeMessage(buffer: ByteBuffer, message: Message, offset: Long) {
buffer.putLong(offset)
buffer.putInt(message.size)
buffer.put(message.buffer)
message.buffer.rewind()
}
private[kafka] def writeMessage(buffer: ByteBuffer, messageWriter: MessageWriter, offset: Long) {
buffer.putLong(offset)
buffer.putInt(messageWriter.size)
messageWriter.writeTo(buffer)
}
case class FilterResult(messagesRead: Int,
bytesRead: Int,
messagesRetained: Int,
bytesRetained: Int,
maxTimestamp: Long,
offsetOfMaxTimestamp: Long)
}
private object OffsetAssigner {
def apply(offsetCounter: LongRef, size: Int): OffsetAssigner =
new OffsetAssigner(offsetCounter.value to offsetCounter.addAndGet(size))
def apply(baseOffset: Long, magic: Byte, messageAndOffsets: Seq[MessageAndOffset]): OffsetAssigner =
new OffsetAssigner(messageAndOffsets.map(_.offset))
}
private class OffsetAssigner(offsets: Seq[Long]) {
private var index = 0
def nextAbsoluteOffset(): Long = {
val result = offsets(index)
index += 1
result
}
def toInnerOffset(offset: Long): Long = offset - offsets.head
}
/**
* A sequence of messages stored in a byte buffer
*
* There are two ways to create a ByteBufferMessageSet
*
* Option 1: From a ByteBuffer which already contains the serialized message set. Consumers will use this method.
*
* Option 2: Give it a list of messages along with instructions relating to serialization format. Producers will use this method.
*
*
* Message format v1 has the following changes:
* - For non-compressed messages, timestamp and timestamp type attributes have been added. The offsets of
* the messages remain absolute offsets.
* - For compressed messages, timestamp and timestamp type attributes have been added and inner offsets (IO) are used
* for inner messages of compressed messages (see offset calculation details below). The timestamp type
* attribute is only set in wrapper messages. Inner messages always have CreateTime as the timestamp type in attributes.
*
* We set the timestamp in the following way:
* For non-compressed messages: the timestamp and timestamp type message attributes are set and used.
* For compressed messages:
* 1. Wrapper messages' timestamp type attribute is set to the proper value
* 2. Wrapper messages' timestamp is set to:
* - the max timestamp of inner messages if CreateTime is used
* - the current server time if wrapper message's timestamp = LogAppendTime.
* In this case the wrapper message timestamp is used and all the timestamps of inner messages are ignored.
* 3. Inner messages' timestamp will be:
* - used when wrapper message's timestamp type is CreateTime
* - ignored when wrapper message's timestamp type is LogAppendTime
* 4. Inner messages' timestamp type will always be ignored with one exception: producers must set the inner message
* timestamp type to CreateTime, otherwise the messages will be rejected by broker.
*
* Absolute offsets are calculated in the following way:
* Ideally the conversion from relative offset(RO) to absolute offset(AO) should be:
*
* AO = AO_Of_Last_Inner_Message + RO
*
* However, note that the message sets sent by producers are compressed in a streaming way.
* And the relative offset of an inner message compared with the last inner message is not known until
* the last inner message is written.
* Unfortunately we are not able to change the previously written messages after the last message is written to
* the message set when stream compression is used.
*
* To solve this issue, we use the following solution:
*
* 1. When the producer creates a message set, it simply writes all the messages into a compressed message set with
* offset 0, 1, ... (inner offset).
* 2. The broker will set the offset of the wrapper message to the absolute offset of the last message in the
* message set.
* 3. When a consumer sees the message set, it first decompresses the entire message set to find out the inner
* offset (IO) of the last inner message. Then it computes RO and AO of previous messages:
*
* RO = IO_of_a_message - IO_of_the_last_message
* AO = AO_Of_Last_Inner_Message + RO
*
* 4. This solution works for compacted message sets as well.
*
*/
class ByteBufferMessageSet(val buffer: ByteBuffer) extends MessageSet with Logging {
private var shallowValidByteCount = -1
private[kafka] def this(compressionCodec: CompressionCodec,
offsetCounter: LongRef,
wrapperMessageTimestamp: Option[Long],
timestampType: TimestampType,
messages: Message*) {
this(ByteBufferMessageSet.create(OffsetAssigner(offsetCounter, messages.size), compressionCodec,
wrapperMessageTimestamp, timestampType, messages:_*))
}
def this(compressionCodec: CompressionCodec, offsetCounter: LongRef, messages: Message*) {
this(compressionCodec, offsetCounter, None, TimestampType.CREATE_TIME, messages:_*)
}
def this(compressionCodec: CompressionCodec, offsetSeq: Seq[Long], messages: Message*) {
this(ByteBufferMessageSet.create(new OffsetAssigner(offsetSeq), compressionCodec,
None, TimestampType.CREATE_TIME, messages:_*))
}
def this(compressionCodec: CompressionCodec, messages: Message*) {
this(compressionCodec, new LongRef(0L), messages: _*)
}
def this(messages: Message*) {
this(NoCompressionCodec, messages: _*)
}
def getBuffer = buffer
override def asRecords: MemoryRecords = MemoryRecords.readableRecords(buffer.duplicate())
private def shallowValidBytes: Int = {
if (shallowValidByteCount < 0) {
this.shallowValidByteCount = this.internalIterator(isShallow = true).map { messageAndOffset =>
MessageSet.entrySize(messageAndOffset.message)
}.sum
}
shallowValidByteCount
}
/** Write the messages in this set to the given channel */
def writeFullyTo(channel: GatheringByteChannel): Int = {
buffer.mark()
var written = 0
while (written < sizeInBytes)
written += channel.write(buffer)
buffer.reset()
written
}
override def isMagicValueInAllWrapperMessages(expectedMagicValue: Byte): Boolean = {
for (messageAndOffset <- shallowIterator) {
if (messageAndOffset.message.magic != expectedMagicValue)
return false
}
true
}
/** default iterator that iterates over decompressed messages */
override def iterator: Iterator[MessageAndOffset] = internalIterator()
/** iterator over compressed messages without decompressing */
def shallowIterator: Iterator[MessageAndOffset] = internalIterator(isShallow = true)
/** When flag isShallow is set to be true, we do a shallow iteration: just traverse the first level of messages. **/
private def internalIterator(isShallow: Boolean = false, ensureMatchingMagic: Boolean = false): Iterator[MessageAndOffset] = {
new IteratorTemplate[MessageAndOffset] {
var topIter = buffer.slice()
var innerIter: Iterator[MessageAndOffset] = null
def innerDone(): Boolean = (innerIter == null || !innerIter.hasNext)
def makeNextOuter: MessageAndOffset = {
// if there isn't at least an offset and size, we are done
if (topIter.remaining < 12)
return allDone()
val offset = topIter.getLong()
val size = topIter.getInt()
if(size < Message.MinMessageOverhead)
throw new InvalidMessageException("Message found with corrupt size (" + size + ") in shallow iterator")
// we have an incomplete message
if(topIter.remaining < size)
return allDone()
// read the current message and check correctness
val message = topIter.slice()
message.limit(size)
topIter.position(topIter.position + size)
val newMessage = new Message(message)
if(isShallow) {
MessageAndOffset(newMessage, offset)
} else {
newMessage.compressionCodec match {
case NoCompressionCodec =>
innerIter = null
MessageAndOffset(newMessage, offset)
case _ =>
innerIter = ByteBufferMessageSet.deepIterator(MessageAndOffset(newMessage, offset), ensureMatchingMagic)
if(!innerIter.hasNext)
innerIter = null
makeNext()
}
}
}
override def makeNext(): MessageAndOffset = {
if(isShallow){
makeNextOuter
} else {
if(innerDone())
makeNextOuter
else
innerIter.next()
}
}
}
}
def filterInto(buffer: ByteBuffer,
filter: MessageAndOffset => Boolean): FilterResult = {
var maxTimestamp = Message.NoTimestamp
var offsetOfMaxTimestamp = -1L
var messagesRead = 0
var bytesRead = 0
var messagesRetained = 0
var bytesRetained = 0
for (shallowMessageAndOffset <- shallowIterator) {
val shallowMessage = shallowMessageAndOffset.message
val shallowOffset = shallowMessageAndOffset.offset
val size = MessageSet.entrySize(shallowMessageAndOffset.message)
messagesRead += 1
bytesRead += size
if (shallowMessageAndOffset.message.compressionCodec == NoCompressionCodec) {
if (filter(shallowMessageAndOffset)) {
ByteBufferMessageSet.writeMessage(buffer, shallowMessage, shallowOffset)
messagesRetained += 1
bytesRetained += size
if (shallowMessage.timestamp > maxTimestamp) {
maxTimestamp = shallowMessage.timestamp
offsetOfMaxTimestamp = shallowOffset
}
}
messagesRead += 1
} else {
// We use the absolute offset to decide whether to retain the message or not (this is handled by the
// deep iterator). Because of KAFKA-4298, we have to allow for the possibility that a previous version
// corrupted the log by writing a compressed message set with a wrapper magic value not matching the magic
// of the inner messages. This will be fixed as we recopy the messages to the destination segment.
var writeOriginalMessageSet = true
val retainedMessages = ArrayBuffer[MessageAndOffset]()
val shallowMagic = shallowMessage.magic
for (deepMessageAndOffset <- ByteBufferMessageSet.deepIterator(shallowMessageAndOffset)) {
messagesRead += 1
if (filter(deepMessageAndOffset)) {
// Check for log corruption due to KAFKA-4298. If we find it, make sure that we overwrite
// the corrupted entry with correct data.
if (shallowMagic != deepMessageAndOffset.message.magic)
writeOriginalMessageSet = false
retainedMessages += deepMessageAndOffset
// We need the max timestamp and last offset for time index
if (deepMessageAndOffset.message.timestamp > maxTimestamp)
maxTimestamp = deepMessageAndOffset.message.timestamp
}
else writeOriginalMessageSet = false
}
offsetOfMaxTimestamp = if (retainedMessages.nonEmpty) retainedMessages.last.offset else -1L
// There are no messages compacted out and no message format conversion, write the original message set back
if (writeOriginalMessageSet)
ByteBufferMessageSet.writeMessage(buffer, shallowMessage, shallowOffset)
else if (retainedMessages.nonEmpty) {
val compressedSize = ByteBufferMessageSet.writeCompressedMessages(buffer, shallowMessage.compressionCodec, retainedMessages)
messagesRetained += 1
bytesRetained += compressedSize
}
}
}
FilterResult(messagesRead, bytesRead, messagesRetained, bytesRetained, maxTimestamp, offsetOfMaxTimestamp)
}
/**
* Update the offsets for this message set and do further validation on messages including:
* 1. Messages for compacted topics must have keys
* 2. When magic value = 1, inner messages of a compressed message set must have monotonically increasing offsets
* starting from 0.
* 3. When magic value = 1, validate and maybe overwrite timestamps of messages.
*
* This method will convert the messages in the following scenarios:
* A. Magic value of a message = 0 and messageFormatVersion is 1
* B. Magic value of a message = 1 and messageFormatVersion is 0
*
* If no format conversion or value overwriting is required for messages, this method will perform in-place
* operations and avoid re-compression.
*
* Returns a ValidationAndOffsetAssignResult containing the validated message set, maximum timestamp, the offset
* of the shallow message with the max timestamp and a boolean indicating whether the message sizes may have changed.
*/
private[kafka] def validateMessagesAndAssignOffsets(offsetCounter: LongRef,
now: Long,
sourceCodec: CompressionCodec,
targetCodec: CompressionCodec,
compactedTopic: Boolean = false,
messageFormatVersion: Byte = Message.CurrentMagicValue,
messageTimestampType: TimestampType,
messageTimestampDiffMaxMs: Long): ValidationAndOffsetAssignResult = {
if (sourceCodec == NoCompressionCodec && targetCodec == NoCompressionCodec) {
// check the magic value
if (!isMagicValueInAllWrapperMessages(messageFormatVersion))
convertNonCompressedMessages(offsetCounter, compactedTopic, now, messageTimestampType, messageTimestampDiffMaxMs,
messageFormatVersion)
else
// Do in-place validation, offset assignment and maybe set timestamp
validateNonCompressedMessagesAndAssignOffsetInPlace(offsetCounter, now, compactedTopic, messageTimestampType,
messageTimestampDiffMaxMs)
} else {
// Deal with compressed messages
// We cannot do in place assignment in one of the following situations:
// 1. Source and target compression codec are different
// 2. When magic value to use is 0 because offsets need to be overwritten
// 3. When magic value to use is above 0, but some fields of inner messages need to be overwritten.
// 4. Message format conversion is needed.
// No in place assignment situation 1 and 2
var inPlaceAssignment = sourceCodec == targetCodec && messageFormatVersion > Message.MagicValue_V0
var maxTimestamp = Message.NoTimestamp
var offsetOfMaxTimestamp = -1L
val expectedInnerOffset = new LongRef(0)
val validatedMessages = new mutable.ArrayBuffer[Message]
this.internalIterator(isShallow = false, ensureMatchingMagic = true).foreach { messageAndOffset =>
val message = messageAndOffset.message
validateMessageKey(message, compactedTopic)
if (message.magic > Message.MagicValue_V0 && messageFormatVersion > Message.MagicValue_V0) {
// No in place assignment situation 3
// Validate the timestamp
validateTimestamp(message, now, messageTimestampType, messageTimestampDiffMaxMs)
// Check if we need to overwrite offset
if (messageAndOffset.offset != expectedInnerOffset.getAndIncrement())
inPlaceAssignment = false
if (message.timestamp > maxTimestamp) {
maxTimestamp = message.timestamp
offsetOfMaxTimestamp = offsetCounter.value + expectedInnerOffset.value - 1
}
}
if (sourceCodec != NoCompressionCodec && message.compressionCodec != NoCompressionCodec)
throw new InvalidMessageException("Compressed outer message should not have an inner message with a " +
s"compression attribute set: $message")
// No in place assignment situation 4
if (message.magic != messageFormatVersion)
inPlaceAssignment = false
validatedMessages += message.toFormatVersion(messageFormatVersion)
}
if (!inPlaceAssignment) {
// Cannot do in place assignment.
val (largestTimestampOfMessageSet, offsetOfMaxTimestampInMessageSet) = {
if (messageFormatVersion == Message.MagicValue_V0)
(Some(Message.NoTimestamp), -1L)
else if (messageTimestampType == TimestampType.CREATE_TIME)
(Some(maxTimestamp), {if (targetCodec == NoCompressionCodec) offsetOfMaxTimestamp else offsetCounter.value + validatedMessages.length - 1})
else // Log append time
(Some(now), {if (targetCodec == NoCompressionCodec) offsetCounter.value else offsetCounter.value + validatedMessages.length - 1})
}
ValidationAndOffsetAssignResult(validatedMessages = new ByteBufferMessageSet(compressionCodec = targetCodec,
offsetCounter = offsetCounter,
wrapperMessageTimestamp = largestTimestampOfMessageSet,
timestampType = messageTimestampType,
messages = validatedMessages: _*),
maxTimestamp = largestTimestampOfMessageSet.get,
offsetOfMaxTimestamp = offsetOfMaxTimestampInMessageSet,
messageSizeMaybeChanged = true)
} else {
// Do not do re-compression but simply update the offset, timestamp and attributes field of the wrapper message.
buffer.putLong(0, offsetCounter.addAndGet(validatedMessages.size) - 1)
// validate the messages
validatedMessages.foreach(_.ensureValid())
var crcUpdateNeeded = true
val timestampOffset = MessageSet.LogOverhead + Message.TimestampOffset
val attributeOffset = MessageSet.LogOverhead + Message.AttributesOffset
val timestamp = buffer.getLong(timestampOffset)
val attributes = buffer.get(attributeOffset)
buffer.putLong(timestampOffset, maxTimestamp)
if (messageTimestampType == TimestampType.CREATE_TIME && timestamp == maxTimestamp)
// We don't need to recompute crc if the timestamp is not updated.
crcUpdateNeeded = false
else if (messageTimestampType == TimestampType.LOG_APPEND_TIME) {
// Set timestamp type and timestamp
buffer.putLong(timestampOffset, now)
buffer.put(attributeOffset, messageTimestampType.updateAttributes(attributes))
}
if (crcUpdateNeeded) {
// need to recompute the crc value
buffer.position(MessageSet.LogOverhead)
val wrapperMessage = new Message(buffer.slice())
Utils.writeUnsignedInt(buffer, MessageSet.LogOverhead + Message.CrcOffset, wrapperMessage.computeChecksum)
}
buffer.rewind()
// For compressed messages,
ValidationAndOffsetAssignResult(validatedMessages = this,
maxTimestamp = buffer.getLong(timestampOffset),
offsetOfMaxTimestamp = buffer.getLong(0),
messageSizeMaybeChanged = false)
}
}
}
// We create this method to avoid a memory copy. It reads from the original message set and directly
// writes the converted messages into new message set buffer. Hence we don't need to allocate memory for each
// individual message during message format conversion.
private def convertNonCompressedMessages(offsetCounter: LongRef,
compactedTopic: Boolean,
now: Long,
timestampType: TimestampType,
messageTimestampDiffMaxMs: Long,
toMagicValue: Byte): ValidationAndOffsetAssignResult = {
val sizeInBytesAfterConversion = shallowValidBytes + this.internalIterator(isShallow = true).map { messageAndOffset =>
Message.headerSizeDiff(messageAndOffset.message.magic, toMagicValue)
}.sum
val newBuffer = ByteBuffer.allocate(sizeInBytesAfterConversion)
var newMessagePosition = 0
var maxTimestamp = Message.NoTimestamp
var offsetOfMaxTimestamp = -1L
this.internalIterator(isShallow = true).foreach { case MessageAndOffset(message, _) =>
validateMessageKey(message, compactedTopic)
validateTimestamp(message, now, timestampType, messageTimestampDiffMaxMs)
newBuffer.position(newMessagePosition)
newBuffer.putLong(offsetCounter.getAndIncrement())
val newMessageSize = message.size + Message.headerSizeDiff(message.magic, toMagicValue)
newBuffer.putInt(newMessageSize)
val newMessageBuffer = newBuffer.slice()
newMessageBuffer.limit(newMessageSize)
message.convertToBuffer(toMagicValue, newMessageBuffer, now, timestampType)
if (toMagicValue > Message.MagicValue_V0) {
val timestamp = newMessageBuffer.getLong(Message.TimestampOffset)
if (maxTimestamp < timestamp) {
maxTimestamp = timestamp
offsetOfMaxTimestamp = offsetCounter.value - 1
}
}
newMessagePosition += MessageSet.LogOverhead + newMessageSize
}
newBuffer.rewind()
new ValidationAndOffsetAssignResult(validatedMessages = new ByteBufferMessageSet(newBuffer),
maxTimestamp = maxTimestamp,
offsetOfMaxTimestamp = offsetOfMaxTimestamp,
messageSizeMaybeChanged = true)
}
private def validateNonCompressedMessagesAndAssignOffsetInPlace(offsetCounter: LongRef,
now: Long,
compactedTopic: Boolean,
timestampType: TimestampType,
timestampDiffMaxMs: Long): ValidationAndOffsetAssignResult = {
// do in-place validation and offset assignment
var messagePosition = 0
var maxTimestamp = Message.NoTimestamp
var offsetOfMaxTimestamp = -1L
buffer.mark()
while (messagePosition < sizeInBytes - MessageSet.LogOverhead) {
buffer.position(messagePosition)
buffer.putLong(offsetCounter.getAndIncrement())
val messageSize = buffer.getInt()
val messageBuffer = buffer.slice()
messageBuffer.limit(messageSize)
val message = new Message(messageBuffer)
validateMessageKey(message, compactedTopic)
if (message.magic > Message.MagicValue_V0) {
validateTimestamp(message, now, timestampType, timestampDiffMaxMs)
if (timestampType == TimestampType.LOG_APPEND_TIME) {
message.buffer.putLong(Message.TimestampOffset, now)
message.buffer.put(Message.AttributesOffset, timestampType.updateAttributes(message.attributes))
Utils.writeUnsignedInt(message.buffer, Message.CrcOffset, message.computeChecksum)
}
if (message.timestamp > maxTimestamp) {
maxTimestamp = message.timestamp
offsetOfMaxTimestamp = offsetCounter.value - 1
}
}
messagePosition += MessageSet.LogOverhead + messageSize
}
buffer.reset()
ValidationAndOffsetAssignResult(validatedMessages = this,
maxTimestamp = maxTimestamp,
offsetOfMaxTimestamp = offsetOfMaxTimestamp,
messageSizeMaybeChanged = false)
}
private def validateMessageKey(message: Message, compactedTopic: Boolean) {
if (compactedTopic && !message.hasKey)
throw new InvalidMessageException("Compacted topic cannot accept message without key.")
}
/**
* This method validates the timestamps of a message.
* If the message is using create time, this method checks if it is within acceptable range.
*/
private def validateTimestamp(message: Message,
now: Long,
timestampType: TimestampType,
timestampDiffMaxMs: Long) {
if (timestampType == TimestampType.CREATE_TIME && math.abs(message.timestamp - now) > timestampDiffMaxMs)
throw new InvalidTimestampException(s"Timestamp ${message.timestamp} of message is out of range. " +
s"The timestamp should be within [${now - timestampDiffMaxMs}, ${now + timestampDiffMaxMs}")
if (message.timestampType == TimestampType.LOG_APPEND_TIME)
throw new InvalidTimestampException(s"Invalid timestamp type in message $message. Producer should not set " +
s"timestamp type to LogAppendTime.")
}
/**
* The total number of bytes in this message set, including any partial trailing messages
*/
def sizeInBytes: Int = buffer.limit
/**
* The total number of bytes in this message set not including any partial, trailing messages
*/
def validBytes: Int = shallowValidBytes
/**
* Two message sets are equal if their respective byte buffers are equal
*/
override def equals(other: Any): Boolean = {
other match {
case that: ByteBufferMessageSet =>
buffer.equals(that.buffer)
case _ => false
}
}
override def hashCode: Int = buffer.hashCode
}
case class ValidationAndOffsetAssignResult(validatedMessages: ByteBufferMessageSet,
maxTimestamp: Long,
offsetOfMaxTimestamp: Long,
messageSizeMaybeChanged: Boolean)
|
geeag/kafka
|
core/src/main/scala/kafka/message/ByteBufferMessageSet.scala
|
Scala
|
apache-2.0
| 35,734 |
package im.actor.server.api.frontend
import scala.annotation.tailrec
import scala.collection.immutable
import akka.actor._
import akka.stream.actor.ActorPublisher
import akka.stream.actor.ActorPublisherMessage.{ Cancel, Request }
import com.google.protobuf.ByteString
import scodec.bits.BitVector
import im.actor.server.mtproto.codecs.protocol.MessageBoxCodec
import im.actor.server.mtproto.protocol.{ MessageBox, SessionLost }
import im.actor.server.mtproto.{ transport β T }
import im.actor.server.session.{ HandleMessageBox, SessionEnvelope, SessionRegion }
private[frontend] object SessionClient {
@SerialVersionUID(1L)
case class SendToSession(p: T.MTPackage)
def props(sessionRegion: SessionRegion) = Props(classOf[SessionClient], sessionRegion)
}
private[frontend] class SessionClient(sessionRegion: SessionRegion) extends Actor with ActorLogging with ActorPublisher[T.MTProto] {
import SessionClient.SendToSession
private[this] var packageQueue = immutable.Queue.empty[T.MTProto]
def receive: Receive = watchForSession
def watchForSession: Receive = publisher orElse {
case SendToSession(T.MTPackage(authId, sessionId, messageBytes)) β
sessionRegion.ref ! SessionEnvelope(authId, sessionId).withHandleMessageBox(HandleMessageBox(ByteString.copyFrom(messageBytes.toByteBuffer)))
case p: T.MTPackage β
context.watch(sender())
enqueuePackage(p)
context.become(working(p.authId, p.sessionId))
}
def working(authId: Long, sessionId: Long): Receive = publisher orElse {
case SendToSession(T.MTPackage(authId, sessionId, messageBytes)) β
sessionRegion.ref ! SessionEnvelope(authId, sessionId).withHandleMessageBox(HandleMessageBox(ByteString.copyFrom(messageBytes.toByteBuffer)))
case p @ T.MTPackage(authId, sessionId, mbBits: BitVector) β
enqueuePackage(p)
case Terminated(sessionRef) β
val p = T.MTPackage(authId, sessionId, MessageBoxCodec.encode(MessageBox(Long.MaxValue, SessionLost)).require)
enqueuePackage(p)
context.become(watchForSession.orElse(publisher))
}
def publisher: Receive = {
case Request(_) β
deliverBuf()
case Cancel β
context.stop(self)
}
private def enqueuePackage(p: T.MTPackage): Unit = {
if (packageQueue.isEmpty && totalDemand > 0) {
onNext(p)
} else {
packageQueue = packageQueue.enqueue(p)
deliverBuf()
}
}
@tailrec
private def deliverBuf(): Unit = {
if (isActive && totalDemand > 0)
packageQueue.dequeueOption match {
case Some((el, queue)) β
packageQueue = queue
onNext(el)
deliverBuf()
case None β
}
}
}
|
v2tmobile/actor-platform
|
actor-server/actor-frontend/src/main/scala/im/actor/server/api/frontend/SessionClient.scala
|
Scala
|
mit
| 2,694 |
package com.kubukoz.adventofcode2016
import org.scalatest.{FlatSpec, Matchers}
import Day10._
class Day10Tests extends FlatSpec with Matchers {
"State transforming" should "work" in {
val input = """value 5 goes to bot 2
|bot 2 gives low to bot 1 and high to bot 0
|value 3 goes to bot 1
|bot 1 gives low to output 1 and high to bot 0
|bot 0 gives low to output 2 and high to output 0
|value 2 goes to bot 2""".stripMargin.split("\\n").toList
runBots(input) shouldBe ProgramState(
Map(
2 -> Bot(List(2, 5)),
1 -> Bot(List(2, 3)),
0 -> Bot(List(3, 5))
),
Map(
0 -> 5,
1 -> 2,
2 -> 3
)
)
}
}
|
kubukoz/advent-of-code-2016
|
src/test/scala/com/kubukoz/adventofcode2016/Day10Tests.scala
|
Scala
|
apache-2.0
| 770 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.