code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.lewuathe.dllib
import breeze.linalg.Vector
import org.scalatest._
import com.lewuathe.dllib.activations.{relu, reluPrime, softplus, softplusPrime}
class ActivationSpec extends FlatSpec with Matchers {
"Rectified linear unit" should "return given value" in {
val x = Vector(-2.0, 0.0, 2.0)
relu(x) should be (Vector(0.0, 0.0, 2.0))
}
"Rectified linear unit" should "return 1.0 as differential" in {
val x = Vector(-2.0, 0.0, 2.0)
reluPrime(x) should be (Vector(0.0, 0.0, 1.0))
}
"Softplus unit" should "return 0.0 with small value" in {
val x = Vector(-10.0, 0.0, 10.0)
softplus(x) should be (Vector(4.5398899216870535E-5, 0.6931471805599453, 10.000045398899218))
}
"Softplus" should "return differential properly" in {
val x = Vector(-10.0, 0.0, 10.0)
softplusPrime(x) should be (Vector(4.5397868702434395E-5, 0.5, 0.9999546021312976))
}
}
|
Lewuathe/neurallib
|
src/test/scala/com/lewuathe/dllib/ActivationSpec.scala
|
Scala
|
mit
| 1,715 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.metrics.sink
import java.io.File
import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit
import com.codahale.metrics.{CsvReporter, MetricRegistry}
import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem
private[spark] class CsvSink(val property: Properties, val registry: MetricRegistry,
securityMgr: SecurityManager) extends Sink {
val CSV_KEY_PERIOD = "period"
val CSV_KEY_UNIT = "unit"
val CSV_KEY_DIR = "directory"
val CSV_DEFAULT_PERIOD = 10
val CSV_DEFAULT_UNIT = "SECONDS"
val CSV_DEFAULT_DIR = "/tmp/"
val pollPeriod = Option(property.getProperty(CSV_KEY_PERIOD)) match {
case Some(s) => s.toInt
case None => CSV_DEFAULT_PERIOD
}
val pollUnit: TimeUnit = Option(property.getProperty(CSV_KEY_UNIT)) match {
case Some(s) => TimeUnit.valueOf(s.toUpperCase())
case None => TimeUnit.valueOf(CSV_DEFAULT_UNIT)
}
MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)
val pollDir = Option(property.getProperty(CSV_KEY_DIR)) match {
case Some(s) => s
case None => CSV_DEFAULT_DIR
}
val reporter: CsvReporter = CsvReporter.forRegistry(registry)
.formatFor(Locale.US)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.convertRatesTo(TimeUnit.SECONDS)
.build(new File(pollDir))
override def start() {
reporter.start(pollPeriod, pollUnit)
}
override def stop() {
reporter.stop()
}
}
|
yelshater/hadoop-2.3.0
|
spark-core_2.10-1.0.0-cdh5.1.0/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala
|
Scala
|
apache-2.0
| 2,265 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package docs.scaladsl.mb
import akka.Done
import akka.NotUsed
//#hello-service
import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceCall
import play.api.libs.json.Format
import play.api.libs.json.Json
object HelloService {
val TOPIC_NAME = "greetings"
}
trait HelloService extends Service {
final override def descriptor = {
import Service._
named("brokerdocs")
.withCalls(
pathCall("/api/hello/:id", hello _),
pathCall("/api/hello/:id", useGreeting _)
)
.withTopics(
topic(HelloService.TOPIC_NAME, greetingsTopic)
)
.withAutoAcl(true)
}
// The topic handle
def greetingsTopic(): Topic[GreetingMessage]
def hello(id: String): ServiceCall[NotUsed, String]
def useGreeting(id: String): ServiceCall[GreetingMessage, Done]
}
//#hello-service
case class GreetingMessage(message: String)
object GreetingMessage {
implicit val format: Format[GreetingMessage] = Json.format[GreetingMessage]
}
|
rcavalcanti/lagom
|
docs/manual/scala/guide/broker/code/docs/scaladsl/mb/HelloService.scala
|
Scala
|
apache-2.0
| 1,144 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller, Zhaochong Liu
* @version 1.3
* @date Sat Jul 30 22:53:47 EDT 2016
* @see LICENSE (MIT style license file).
*/
package scalation.linalgebra
import scalation.math.double_exp
import scalation.math.ExtremeD.TOL
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Fac_QR_H2` class provides methods to factor an 'm-by-n' matrix 'aa' into
* the product of two matrices:
* <p>
* 'q' - an 'm-by-n' orthogonal matrix and
* 'r' - an 'n-by-n' right upper triangular matrix
* <p>
* such that 'a = q * r'. It uses Householder orthogonalization.
* @see 5.1 and 5.2 in Matrix Computations
* @see QRDecomposition.java in Jama
* @see www.stat.wisc.edu/~larget/math496/qr.html
* @see math.stackexchange.com/questions/678843/householder-qr-factorization-for-m-by-n-matrix-both-m-n-and-mn
*------------------------------------------------------------------------------
* This implementation replaces matrix operations in `Fac_QR_H3` with low-level
* operations for greater efficiency. Also, calculates Householder vectors differently.
* Caveat: for m < n use `Fac_LQ`.
*------------------------------------------------------------------------------
* @param aa the matrix to be factor into q and r
* @param needQ flag indicating whether a full q matrix is needed
*/
class Fac_QR_H2 [MatT <: MatriD] (aa: MatT, needQ: Boolean = true)
extends Fac_QR (aa, needQ)
{
private val a = aa.copy () // copy of matrix aa
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor matrix 'a' into the product of two matrices, 'a = q * r', returning
* both the orthogonal 'q' matrix and the right upper triangular 'r' matrix.
* This algorithm uses Householder orthogonalization.
* @see 5.1 and 5.2 in Matrix Computations
* @see QRDecomposition.java in Jama
*/
def factor ()
{
if (factored) return
for (k <- 0 until p) { // for each column k in a
val a_k = a.col (k, k) // A(k:m, k) column vector
var _norm = a_k.norm // norm of this column
if (_norm !=~ 0.0) {
if (a_k(0) < 0.0) _norm = -_norm // make kth Householder vector
for (i <- k until m) a(i, k) /= _norm
a(k, k) += 1.0
for (j <- k + 1 until n) { // transform remaining columns
var sum = 0.0
for (i <- k until m) sum += a(i, k) * a(i, j)
sum /= - a(k, k)
for (i <- k until m) a(i, j) += sum * a(i, k)
} // for
} // if
r(k, k) = -_norm // set r's diagonal element r_kk
} // for
if (needQ) computeQ ()
for (j <- 0 until p; i <- 0 until j) r(i, j) = a(i, j) // fill in rest of r matrix
r.clean (TOL) // comment out to avoid cleaning r matrix
factored = true
} // factor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the full 'q' orthogonal matrix based on updated values in 'a'.
*/
def computeQ ()
{
for (k <- p-1 to 0 by -1) {
if (a(k, k) !=~ 0.0) {
for (j <- k until n) {
var sum = 0.0
for (i <- k until m) sum += q(i, j) * a(i, k)
sum /= - a(k, k)
for (i <- k until m) q(i, j) += sum * a(i, k)
} // for
} // if
} // for
} // computeQ
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the nullspace of matrix 'a: { x | a*x = 0 }' using 'QR' Factorization
* 'q*r*x = 0'. Gives a basis of dimension 'n - rank' for the nullspace
* @param rank the rank of the matrix (number of linearly independent column vectors)
*/
def nullspace (rank: Int): MatriD =
{
val aat = aa.t // transpose of aa
val ns = if (aat.dim1 < aat.dim2) {
val qq = (new Fac_LQ (aat)).factor2 () // using LQ
qq.slice (rank, qq.dim1).t // last n - rank rows, transpose
} else {
val qq = (new Fac_QR_H2 (aat)).factor1 () // using QR
qq.slice (0, aat.dim1, rank, aat.dim2) // last n - rank columns
} // if
if (ns.dim2 > 0) ns else new MatrixD (aat.dim1, 0)
} // nullspace
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the nullspace of matrix 'a: { x | a*x = 0 }' using 'QR' Factorization
* 'q*r*x = 0'. Gives only one vector in the nullspace.
* @param x a vector with the correct dimension
*/
def nullspaceV (x: VectoD): VectoD =
{
x(n-1) = 1.0 // vector to solve for
val b = x.zero (n) // new rhs as -r_i,n-1
for (i <- 0 until n) b(i) = -r(i, n-1)
val rr = r.slice (0, n, 0, n-1) // drop last column
for (k <- n-2 to 0 by -1) { // solve for x in rr*x = b
x(k) = (b(k) - (rr(k) dot x)) / rr(k, k)
} // for
x
} // nullspaceV
} // Fac_QR_H2 class
|
NBKlepp/fda
|
scalation_1.3/scalation_mathstat/src/main/scala/scalation/linalgebra/Fac_QR_H2.scala
|
Scala
|
mit
| 5,668 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import java.util.{Map => JMap}
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.data.{Key, Value}
import org.apache.accumulo.core.iterators.{IteratorEnvironment, SortedKeyValueIterator}
class IndexedSpatioTemporalFilter
extends GeomesaFilteringIterator
with HasFeatureType
with HasIndexValueDecoder
with HasSpatioTemporalFilter
with LazyLogging {
override def init(source: SortedKeyValueIterator[Key, Value],
options: JMap[String, String],
env: IteratorEnvironment) = {
super.init(source, options, env)
initFeatureType(options)
init(featureType, options)
this.source = source.deepCopy(env)
}
override def setTopConditionally() = {
val sourceValue = source.getTopValue
val meetsFilter = stFilter == null || {
val sf = indexEncoder.decode(sourceValue.get)
stFilter.evaluate(sf)
}
if (meetsFilter) {
topKey = source.getTopKey
topValue = sourceValue
}
}
}
|
vpipkt/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/iterators/IndexedSpatioTemporalFilter.scala
|
Scala
|
apache-2.0
| 1,526 |
package rpm4s.repo.data
import rpm4s.repo.data.Data.{Primary, UpdateInfo}
case class Repomd(
revision: Option[Long],
primary: Option[Primary],
updateinfo: Option[UpdateInfo]
)
object Repomd {
case class RepoMdBuilder(
revision: Option[Long],
primary: Option[Primary],
updateinfo: Option[UpdateInfo]
)
object RepoMdBuilder {
val empty: RepoMdBuilder = apply(None, None, None)
def build(builder: RepoMdBuilder): Option[Repomd] = {
Some(Repomd(builder.revision, builder.primary, builder.updateinfo))
}
}
}
|
lucidd/rpm4s
|
repo-utils/shared/src/main/scala/rpm4s/repo/data/Repomd.scala
|
Scala
|
mit
| 560 |
/*
* Copyright 2013 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.sharry.ui
import org.eknet.publet.engine.scala.ScalaScript
import org.eknet.publet.web.shiro.Security
import org.eknet.publet.sharry.SharryService.ArchiveInfo
import org.eknet.publet.vfs.util.ByteSize
import org.eknet.publet.sharry.lib.FileName
/**
* @author Eike Kettner [email protected]
* @since 18.04.13 00:43
*/
class ArchiveManage extends ScalaScript {
def serve() = asSharryUser {
param("do") match {
case Some("list") => listArchives
case Some("removeArchive") => deleteArchive()
case _ => makeJson(Map("success" -> false, "message" -> "Unknown command."))
}
}
def listArchives = {
val currentOwner = (ai: ArchiveInfo) => ai.archive.owner == Security.username
val list = sharry.listArchives.filter(currentOwner).toList
val size = list.map(_.archive.size).foldLeft(0L)((s,t) => s+t)
makeJson(Map(
"archives" -> list.map(archiveInfo2Map),
"size" -> size,
"sizeString" -> ByteSize.bytes.normalizeString(size),
"count" -> list.size
))
}
def deleteArchive() = {
param("archive").flatMap(FileName.tryParse) match {
case Some(fn) => {
sharry.removeFiles(_.archive == fn)
makeSuccess("Archive removed.")
}
case _ => makeFailure("Cannot remove archive.")
}
}
}
|
eikek/publet-sharry
|
src/main/scala/org/eknet/publet/sharry/ui/ArchiveManage.scala
|
Scala
|
apache-2.0
| 1,919 |
package org.workcraft.gui.propertyeditor
import org.workcraft.util.Action
import org.workcraft.scala.effects.IO
trait GenericEditorProvider[T] {
def createEditor(initialValue:T, accept: IO[Unit], cancel: IO[Unit]): GenericCellEditor[T]
}
|
tuura/workcraft-2.2
|
Gui/src/main/scala/org/workcraft/gui/propertyeditor/GenericEditorProvider.scala
|
Scala
|
gpl-3.0
| 243 |
package org.scalatra
import org.scalatest.FunSuite
import org.scalatest.Matchers
/**
* Test cases adapted from https://github.com/josh/rack-mount/blob/d44e02ec8a6318fdda8ea53a30aac654e228e07b/test/test_strexp.rb
*/
class RailsPathPatternParserTest extends FunSuite with Matchers {
test("static string") {
val PathPattern(re, names) = RailsPathPatternParser("foo")
re.toString should equal ("""\\Afoo\\Z""")
names should equal (Nil)
}
test("dynamic segment") {
val PathPattern(re, names) = RailsPathPatternParser(":foo.example.com")
re.toString should equal ("""\\A([^#/.?]+)\\.example\\.com\\Z""")
names should equal (List("foo"))
}
test("dynamic segment with leading underscore") {
val PathPattern(re, names) = RailsPathPatternParser(":_foo.example.com")
re.toString should equal ("""\\A([^#/.?]+)\\.example\\.com\\Z""")
names should equal (List("_foo"))
}
test("skips invalid group names: 123") {
val PathPattern(re, names) = RailsPathPatternParser(":123.example.com")
re.toString should equal ("""\\A:123\\.example\\.com\\Z""")
names should equal (Nil)
}
test("skips invalid group names: $") {
val PathPattern(re, names) = RailsPathPatternParser(":$.example.com")
re.toString should equal ("""\\A:\\$\\.example\\.com\\Z""")
names should equal (Nil)
}
test("escaped dynamic segment") {
val PathPattern(re, names) = RailsPathPatternParser("\\\\:foo.example.com")
re.toString should equal ("""\\A:foo\\.example\\.com\\Z""")
names should equal (Nil)
}
test("dynamic segment inside optional segment") {
val PathPattern(re, names) = RailsPathPatternParser("foo(.:extension)")
re.toString should equal ("""\\Afoo(?:\\.([^#/.?]+))?\\Z""")
names should equal (List("extension"))
}
test("glob segment") {
val PathPattern(re, names) = RailsPathPatternParser("src/*files")
re.toString should equal ("""\\Asrc/(.+)\\Z""")
names should equal (List("files"))
}
test("glob segment at the beginning") {
val PathPattern(re, names) = RailsPathPatternParser("*files/foo.txt")
re.toString should equal ("""\\A(.+)/foo\\.txt\\Z""")
names should equal (List("files"))
}
test("glob segment in the middle") {
val PathPattern(re, names) = RailsPathPatternParser("src/*files/foo.txt")
re.toString should equal ("""\\Asrc/(.+)/foo\\.txt\\Z""")
names should equal (List("files"))
}
test("multiple glob segments") {
val PathPattern(re, names) = RailsPathPatternParser("src/*files/dir/*morefiles/foo.txt")
re.toString should equal ("""\\Asrc/(.+)/dir/(.+)/foo\\.txt\\Z""")
names should equal (List("files", "morefiles"))
}
test("escaped glob segment") {
val PathPattern(re, names) = RailsPathPatternParser("src/\\\\*files")
re.toString should equal ("""\\Asrc/\\*files\\Z""")
names should equal (Nil)
}
test("optional segment") {
val PathPattern(re, names) = RailsPathPatternParser("/foo(/bar)")
re.toString should equal ("""\\A/foo(?:/bar)?\\Z""")
names should equal (Nil)
}
test("consecutive optional segment") {
val PathPattern(re, names) = RailsPathPatternParser("/foo(/bar)(/baz)")
re.toString should equal ("""\\A/foo(?:/bar)?(?:/baz)?\\Z""")
names should equal (Nil)
}
test("multiple optional segments") {
val PathPattern(re, names) = RailsPathPatternParser("(/foo)(/bar)(/baz)")
re.toString should equal ("""\\A(?:/foo)?(?:/bar)?(?:/baz)?\\Z""")
names should equal (Nil)
}
test("escapes optional segment parentheses") {
val PathPattern(re, names) = RailsPathPatternParser("""/foo\\(/bar\\)""")
re.toString should equal ("""\\A/foo\\(/bar\\)\\Z""")
names should equal (Nil)
}
test("escapes one optional segment parenthesis") {
val PathPattern(re, names) = RailsPathPatternParser("""/foo\\((/bar)""")
re.toString should equal ("""\\A/foo\\((?:/bar)?\\Z""")
names should equal (Nil)
}
}
|
etorreborre/scalatra
|
core/src/test/scala/org/scalatra/RailsPathPatternParserTest.scala
|
Scala
|
bsd-2-clause
| 3,908 |
package concrete.generator.cspompatterns
import concrete.CSPOMDriver
import cspom.compiler.ConstraintCompiler.{addCtr, applyDomain, removeCtr, replace}
import cspom.compiler.{ConstraintCompiler, Delta, Functions}
import cspom.util.{Finite, Infinitable, IntInterval, RangeSet}
import cspom.variable.IntExpression.implicits.ranges
import cspom.variable.{CSPOMConstant, IntExpression, SimpleExpression}
import cspom.{CSPOM, CSPOMConstraint}
import scala.collection.mutable
/**
* Removes constants from alldifferent constraints
*/
object AllDiffConstant extends ConstraintCompiler {
type A = (Seq[BigInt], Seq[SimpleExpression[Int]])
def functions = Functions("alldifferent")
override def mtch(c: CSPOMConstraint[_], p: CSPOM): Option[A] = {
c match {
case CSPOMConstraint(CSPOMConstant(true), _, arguments: Seq[SimpleExpression[Int]] @unchecked, _) =>
val (constants, variables) = pickAndRemoveConstants(arguments, ranges)
if (constants.nonEmpty) {
Some((constants, variables))
} else {
None
}
case _ => None
}
}
def pickAndRemoveConstants(args: Seq[SimpleExpression[Int]], dom: SimpleExpression[Int] => RangeSet[Infinitable]): (Seq[BigInt], Seq[SimpleExpression[Int]]) = {
val (constants, variables) = args.partition { v =>
dom(v).span.itvSize == Finite(1)
}
(constants.map(dom(_).lowerBound).map {
case Finite(lb) => lb
case e => throw new IllegalArgumentException(s"$e not supported")
}, variables)
}
def compile(constraint: CSPOMConstraint[_], problem: CSPOM, constants: A): Delta = {
val except = constraint.getSeqParam[Int]("except").map(BigInt(_)).toSet
val map = mutable.Map[SimpleExpression[Int], RangeSet[Infinitable]]() ++ constraint.arguments.map {
case v: SimpleExpression[Int] @unchecked => v -> ranges(v)
case e => throw new IllegalArgumentException(s"$e not supported")
}
def filter(constants: Seq[BigInt], variables: Seq[SimpleExpression[Int]]): Seq[SimpleExpression[Int]] = {
if (constants.isEmpty) {
variables
} else {
val toRemove = RangeSet(constants.filterNot(except).map(IntInterval.singleton))
for (v <- variables) {
val actualDomain = map(v)
map(v) = actualDomain -- toRemove
}
(filter _).tupled(pickAndRemoveConstants(variables, map))
}
}
val (constant, args) = constants
val remaining = filter(constant, args)
var delta = removeCtr(constraint, problem)
if (remaining.lengthCompare(1) > 0) {
delta ++= addCtr(CSPOMDriver.allDifferent(remaining: _*) withParams constraint.params, problem)
}
for ((v, f) <- map if IntExpression.implicits.ranges(v) != f) {
delta ++= replace(v, applyDomain(v, f), problem)
}
delta
}
}
|
concrete-cp/concrete
|
src/main/scala/concrete/generator/cspompatterns/AllDiffConstant.scala
|
Scala
|
lgpl-2.1
| 2,836 |
package com.github.tminglei.slickpg
import java.sql.{Date, Time, Timestamp}
import java.util.UUID
import org.scalatest.funsuite.AnyFunSuite
import slick.jdbc.GetResult
import scala.collection.mutable.Buffer
import scala.concurrent.Await
import scala.concurrent.duration._
class PgArraySupportSuite extends AnyFunSuite with PostgresContainer {
import utils.SimpleArrayUtils._
//-- additional definitions
case class Institution(value: Long)
case class MarketFinancialProduct(value: String)
trait MyPostgresProfile1 extends ExPostgresProfile with PgArraySupport {
override val api: API = new API {}
///
trait API extends super.API with ArrayImplicits {
implicit val simpleOptStrListListMapper = new SimpleArrayJdbcType[String]("text")
.mapTo[Option[String]](Option(_), _.orNull).to(_.toList)
implicit val simpleLongBufferTypeMapper = new SimpleArrayJdbcType[Long]("int8").to(_.toBuffer[Long], (v: Buffer[Long]) => v.toSeq)
implicit val simpleStrVectorTypeMapper = new SimpleArrayJdbcType[String]("text").to(_.toVector)
implicit val institutionListTypeWrapper = new SimpleArrayJdbcType[Long]("int8")
.mapTo[Institution](new Institution(_), _.value).to(_.toList)
implicit val marketFinancialProductWrapper = new SimpleArrayJdbcType[String]("text")
.mapTo[MarketFinancialProduct](new MarketFinancialProduct(_), _.value).to(_.toList)
///
implicit val bigDecimalTypeWrapper = new SimpleArrayJdbcType[java.math.BigDecimal]("numeric")
.mapTo[scala.math.BigDecimal](javaBigDecimal => scala.math.BigDecimal(javaBigDecimal),
scalaBigDecimal => scalaBigDecimal.bigDecimal).to(_.toList)
implicit val advancedStringListTypeMapper = new AdvancedArrayJdbcType[String]("text",
fromString(identity)(_).orNull, mkString(identity))
///
implicit val longlongWitness = ElemWitness.AnyWitness.asInstanceOf[ElemWitness[List[Long]]]
implicit val simpleLongLongListTypeMapper = new SimpleArrayJdbcType[List[Long]]("int8[]")
.to(_.asInstanceOf[Seq[Array[Any]]].toList.map(_.toList.asInstanceOf[List[Long]]))
implicit val institutionTypeWrapper = MappedJdbcType.base[Institution, Long](_.value, Institution)
implicit val marketFinancialProductTypeWrapper = MappedJdbcType.base[MarketFinancialProduct, String](_.value, MarketFinancialProduct)
}
}
object MyPostgresProfile1 extends MyPostgresProfile1
//////////////////////////////////////////////////////////////////////////
import MyPostgresProfile1.api._
lazy val db = Database.forURL(url = container.jdbcUrl, driver = "org.postgresql.Driver")
case class ArrayBean(
id: Long,
str: String,
intArr: List[Int],
longArr: Buffer[Long],
longlongArr: List[List[Long]],
shortArr: List[Short],
strList: List[String],
optStrList: List[Option[String]],
strArr: Option[Vector[String]],
uuidArr: List[UUID],
bigDecimalArr: List[BigDecimal],
institutions: List[Institution],
mktFinancialProducts: Option[List[MarketFinancialProduct]]
)
class ArrayTestTable(tag: Tag) extends Table[ArrayBean](tag, "ArrayTest") {
def id = column[Long]("id", O.AutoInc, O.PrimaryKey)
def str = column[String]("str")
def intArr = column[List[Int]]("intArray", O.Default(Nil))
def longArr = column[Buffer[Long]]("longArray")
def longlongArr = column[List[List[Long]]]("longlongArray")
def shortArr = column[List[Short]]("shortArray")
def strList = column[List[String]]("stringList")
def optStrList = column[List[Option[String]]]("optStrList")
def strArr = column[Option[Vector[String]]]("stringArray")
def uuidArr = column[List[UUID]]("uuidArray")
def bigDecimalArr = column[List[BigDecimal]]("bigDecimalArr")
def institutions = column[List[Institution]]("institutions")
def mktFinancialProducts = column[Option[List[MarketFinancialProduct]]]("mktFinancialProducts")
def * = (id, str, intArr, longArr, longlongArr, shortArr, strList, optStrList, strArr, uuidArr,
bigDecimalArr, institutions, mktFinancialProducts) <> (ArrayBean.tupled, ArrayBean.unapply)
}
val ArrayTests = TableQuery[ArrayTestTable]
//------------------------------------------------------------------------------
val uuid1 = UUID.randomUUID()
val uuid2 = UUID.randomUUID()
val uuid3 = UUID.randomUUID()
val testRec1 = ArrayBean(33L, "tt", List(101, 102, 103), Buffer(1L, 3L, 5L, 7L), List(List(11L, 12L, 13L)), List(1,7),
List("robert}; drop table students--", null, "NULL"), List(Some("[2.3,)"), Some("[0.3.0,)"), None, Some("7.1.0"), None),
Some(Vector("str1", "str3", "", " ")), List(uuid1, uuid2), List(BigDecimal.decimal(0.5)), List(Institution(113)), None)
val testRec2 = ArrayBean(37L, "test'", List(101, 103), Buffer(11L, 31L, 5L), List(List(21L, 22L, 23L)), Nil, List(""), Nil,
Some(Vector("str11", "str3")), List(uuid1, uuid2, uuid3), Nil, List(Institution(579)), Some(List(MarketFinancialProduct("product1"))))
val testRec3 = ArrayBean(41L, "haha", List(103, 101), Buffer(11L, 5L, 31L), List(List(31L, 32L, 33L)), List(35,77), Nil, Nil,
Some(Vector("(s)", "str5", "str3")), List(uuid1, uuid3), Nil, Nil, Some(List(MarketFinancialProduct("product3"), MarketFinancialProduct("product x"))))
test("Array Lifted support") {
Await.result(db.run(
DBIO.seq(
(ArrayTests.schema) create,
ArrayTests forceInsertAll List(testRec1, testRec2, testRec3)
).andThen(
DBIO.seq(
ArrayTests.sortBy(_.id).to[List].result.map(
r => assert(List(testRec1, testRec2, testRec3) === r)
),
// all
ArrayTests.filter(101.bind === _.intArr.any).sortBy(_.id).to[List].result.map(
r => assert(List(testRec1, testRec2, testRec3) === r)
),
// all
ArrayTests.filter(5L.bind <= _.longArr.all).sortBy(_.id).to[List].result.map(
r => assert(List(testRec2, testRec3) === r)
),
// all
ArrayTests.filter(_.str === (List("test'") : Rep[List[String]]).any).sortBy(_.id).to[List].result.map(
r => assert(List(testRec2) === r)
),
// @>
ArrayTests.filter(_.strArr @> Vector("str3")).sortBy(_.id).to[List].result.map(
r => assert(List(testRec1, testRec2, testRec3) === r)
),
ArrayTests.filter(_.mktFinancialProducts @> List(MarketFinancialProduct("product1"))).sortBy(_.id).to[List].result.map(
r => assert(List(testRec2) === r)
),
ArrayTests.filter(_.institutions @> List(Institution(579))).sortBy(_.id).to[List].result.map(
r => assert(List(testRec2) === r)
),
ArrayTests.filter(_.strArr @> Vector("str3").bind).sortBy(_.id).to[List].result.map(
r => assert(List(testRec1, testRec2, testRec3) === r)
),
// <@
ArrayTests.filter(Vector("str3").bind <@: _.strArr).sortBy(_.id).to[List].result.map(
r => assert(List(testRec1, testRec2, testRec3) === r)
),
ArrayTests.filter(List(Institution(579)).bind <@: _.institutions).sortBy(_.id).to[List].result.map(
r => assert(List(testRec2) === r)
),
// &&
ArrayTests.filter(_.longArr @& Buffer(5L, 17L).bind).sortBy(_.id).to[List].result.map(
r => assert(List(testRec1, testRec2, testRec3) === r)
),
// length
ArrayTests.filter(_.longArr.length() > 3.bind).sortBy(_.id).to[List].result.map(
r => assert(List(testRec1) === r)
),
ArrayTests.filter(_.shortArr.length() === 0.bind).map(_.id).to[List].result.map(
r => assert(List(37L) === r)
),
// unnest
ArrayTests.filter(5L.bind <= _.longArr.all).map(_.strArr.unnest).to[List].result.map(
r => assert((testRec2.strArr.get ++ testRec3.strArr.get).toList === r.map(_.orNull))
),
// concatenate
ArrayTests.filter(_.id === 33L.bind).map(_.intArr ++ List(105, 107).bind).result.head.map(
r => assert(List(101, 102, 103, 105, 107) === r)
),
ArrayTests.filter(_.id === 33L.bind).map(List(105, 107).bind ++ _.intArr).result.head.map(
r => assert(List(105, 107, 101, 102, 103) === r)
),
ArrayTests.filter(_.id === 33L.bind).map(_.intArr + 105.bind).result.head.map(
r => assert(List(101, 102, 103, 105) === r)
),
ArrayTests.filter(_.id === 33L.bind).map(105.bind +: _.intArr).result.head.map(
r => assert(List(105, 101, 102, 103) === r)
)
)
).andFinally(
(ArrayTests.schema) drop
).transactionally
), Duration.Inf)
}
//------------------------------------------------------------------------
case class ArrayBean1(
id: Long,
bytea: Array[Byte],
uuidArr: List[UUID],
strArr: Option[List[String]],
longArr: Seq[Long],
intArr: List[Int],
shortArr: Vector[Short],
floatArr: List[Float],
doubleArr: List[Double],
boolArr: Seq[Boolean],
dateArr: List[Date],
timeArr: List[Time],
tsArr: Seq[Timestamp],
institutionArr: List[Institution]
)
test("Array Plain SQL support") {
import MyPostgresProfile.plainAPI._
import utils.PlainSQLUtils._
{
addNextArrayConverter((r) => r.nextArrayOption[Long]().map(_.map(Institution(_))))
}
implicit val getInstitutionArray = mkGetResult(_.nextArray[Institution]())
implicit val getInstitutionArrayOption = mkGetResult(_.nextArrayOption[Institution]())
implicit val setInstitutionArray = mkArraySetParameter[Institution]("int8", v => String.valueOf(v.value))
implicit val setInstitutionArrayOption = mkArrayOptionSetParameter[Institution]("int8", v => String.valueOf(v.value))
implicit val getArrarBean1Result = GetResult { r =>
ArrayBean1(r.nextLong(),
r.<<[Array[Byte]],
r.<<[Seq[UUID]].toList,
r.<<?[Seq[String]].map(_.toList),
r.<<[Seq[Long]],
r.<<[Seq[Int]].toList,
r.<<[Seq[Short]].toVector,
r.<<[Seq[Float]].toList,
r.<<[Seq[Double]].toList,
r.<<[Seq[Boolean]],
r.<<[Seq[Date]].toList,
r.<<[Seq[Time]].toList,
r.<<[Seq[Timestamp]],
r.<<[Seq[Institution]].toList
)
}
val b = ArrayBean1(101L, "tt".getBytes, List(UUID.randomUUID()), Some(List("tewe", "ttt", "apostrophe'")), List(111L), List(1, 2), Vector(3, 5), List(1.2f, 43.32f), List(21.35d), List(true, true),
List(new Date(System.currentTimeMillis())), List(new Time(System.currentTimeMillis())), List(new Timestamp(System.currentTimeMillis())), List(Institution(579)))
Await.result(db.run(
DBIO.seq(
sqlu"""create table ArrayTest1(
id int8 not null primary key,
byte_arr bytea not null,
uuid_arr uuid[] not null,
str_arr text[] not null,
long_arr int8[] not null,
int_arr int4[] not null,
short_arr int2[] not null,
float_arr float4[] not null,
double_arr float8[] not null,
bool_arr bool[] not null,
date_arr date[] not null,
time_arr time[] not null,
ts_arr timestamp[] not null,
ins_arr int8[] not null)
""",
///
sqlu"insert into ArrayTest1 values(${b.id}, ${b.bytea}, ${b.uuidArr}, ${b.strArr}, ${b.longArr}, ${b.intArr}, ${b.shortArr}, ${b.floatArr}, ${b.doubleArr}, ${b.boolArr}, ${b.dateArr}, ${b.timeArr}, ${b.tsArr}, ${b.institutionArr})",
sql"select * from ArrayTest1 where id = ${b.id}".as[ArrayBean1].head.map(
f => {
b.bytea.zip(f.bytea).map(r => assert(r._1 === r._2))
b.uuidArr.zip(f.uuidArr).map(r => assert(r._1 === r._2))
b.strArr.getOrElse(Nil).zip(f.strArr.getOrElse(Nil)).map(r => assert(r._1 === r._2))
b.longArr.zip(f.longArr).map(r => assert(r._1 === r._2))
b.intArr.zip(f.intArr).map(r => assert(r._1 === r._2))
b.shortArr.zip(f.shortArr).map(r => assert(r._1 === r._2))
b.floatArr.zip(f.floatArr).map(r => assert(Math.abs(r._1 - r._2) < 0.01f))
b.doubleArr.zip(f.doubleArr).map(r => assert(Math.abs(r._1 - r._2) < 0.01d))
b.boolArr.zip(f.boolArr).map(r => assert(r._1 === r._2))
b.dateArr.zip(f.dateArr).map(r => assert(r._1.toString === r._2.toString))
b.timeArr.zip(f.timeArr).map(r => assert(r._1.toString === r._2.toString))
b.tsArr.zip(f.tsArr).map(r => assert(r._1.toString === r._2.toString))
b.institutionArr.zip(f.institutionArr).map(r => assert(r._1 === r._2))
}
),
///
sqlu"drop table if exists ArrayTest1 cascade"
).transactionally
), Duration.Inf)
}
}
|
tminglei/slick-pg
|
src/test/scala/com/github/tminglei/slickpg/PgArraySupportSuite.scala
|
Scala
|
bsd-2-clause
| 13,013 |
package scorex.api.http
import akka.actor.ActorRefFactory
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCode}
import akka.http.scaladsl.server.{Directive0, Directives, Route}
import akka.util.Timeout
import play.api.libs.json.JsValue
import scorex.crypto.hash.SecureCryptographicHash
import scorex.settings.Settings
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
final case class JsonResponse(response: JsValue, code: StatusCode)
trait ApiRoute extends Directives with CommonApiFunctions {
val settings: Settings
val context: ActorRefFactory
val route: Route
implicit val timeout = Timeout(5.seconds)
lazy val corsAllowed = settings.corsAllowed
lazy val apiKeyHash = settings.apiKeyHash
//def actorRefFactory: ActorRefFactory = context
def getJsonRoute(fn: Future[JsonResponse]): Route =
jsonRoute(Await.result(fn, timeout.duration), get)
def getJsonRoute(fn: JsonResponse): Route = jsonRoute(fn, get)
def postJsonRoute(fn: JsonResponse): Route = jsonRoute(fn, post)
def postJsonRoute(fn: Future[JsonResponse]): Route = jsonRoute(Await.result(fn, timeout.duration), post)
def deleteJsonRoute(fn: JsonResponse): Route = jsonRoute(fn, delete)
def deleteJsonRoute(fn: Future[JsonResponse]): Route = jsonRoute(Await.result(fn, timeout.duration), delete)
private def jsonRoute(fn: JsonResponse, method: Directive0): Route = method {
val resp = complete(fn.code -> HttpEntity(ContentTypes.`application/json`, fn.response.toString))
withCors(resp)
}
def withAuth(route: => Route): Route = {
optionalHeaderValueByName("api_key") { case keyOpt =>
if (isValid(keyOpt)) route
else {
val resp = complete(ApiKeyNotValid.code -> HttpEntity(ContentTypes.`application/json`, ApiKeyNotValid.json.toString))
withCors(resp)
}
}
}
private def withCors(fn: => Route): Route = {
if (corsAllowed) respondWithHeaders(RawHeader("Access-Control-Allow-Origin", "*"))(fn)
else fn
}
private def isValid(keyOpt: Option[String]): Boolean = {
lazy val keyHash = keyOpt.map(SecureCryptographicHash(_))
(apiKeyHash, keyHash) match {
case (None, _) => true
case (Some(expected), Some(passed)) => expected sameElements passed
case _ => false
}
}
}
|
B83YPoj/Waves
|
src/main/scala/scorex/api/http/ApiRoute.scala
|
Scala
|
apache-2.0
| 2,360 |
package org.apache.activemq.apollo.broker.store
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.fusesource.hawtbuf.AsciiBuffer._
import org.fusesource.hawtdispatch.TaskTracker
import java.util.concurrent.TimeUnit
import collection.mutable.ListBuffer
import org.apache.activemq.apollo.util.{LoggingTracker, FunSuiteSupport, LongCounter}
import org.scalatest.BeforeAndAfterEach
import org.apache.activemq.apollo.util.FileSupport._
import java.util.concurrent.atomic.AtomicReference
import java.io._
import org.apache.activemq.apollo.util.sync_cb
/**
* <p>Implements generic testing of Store implementations.</p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
abstract class StoreFunSuiteSupport extends FunSuiteSupport with BeforeAndAfterEach {
var store:Store = null
def create_store(flushDelay:Long):Store
protected def get_flush_delay(): Long = 5*1000
/**
* Handy helper to call an async method on the store and wait for
* the result of the callback.
*/
def data_directory = test_data_dir / "store"
override protected def beforeAll() = {
super.beforeAll()
data_directory.recursive_delete
store = create_store(get_flush_delay())
val tracker = new LoggingTracker("store startup")
tracker.start(store)
tracker.await
}
override protected def afterAll() = {
val tracker = new LoggingTracker("store stop")
tracker.stop(store)
tracker.await
super.afterAll()
}
override protected def beforeEach() = {
purge
}
def purge {
val tracker = new LoggingTracker("store startup")
val task = tracker.task("purge")
store.purge(task.run)
tracker.await
}
def expectCB[T](expected:T)(func: (T=>Unit)=>Unit ) = {
expect(expected) {
sync_cb(func)
}
}
val queue_key_counter = new LongCounter
def add_queue(name:String):Long = {
var queue_a = QueueRecord(queue_key_counter.incrementAndGet, ascii("test"), ascii(name))
val rc:Boolean = sync_cb( cb=> store.add_queue(queue_a)(cb) )
expect(true)(rc)
queue_a.key
}
def add_message(batch:StoreUOW, content:String) = {
var message = new MessageRecord
message.codec = ascii("test-protocol")
message.buffer = ascii(content).buffer
message.locator = new AtomicReference[Object]()
val key = batch.store(message)
(key, message.locator)
}
def entry(queue_key:Long, entry_seq:Long, message_key:(Long, AtomicReference[Object])) = {
var queueEntry = new QueueEntryRecord
queueEntry.queue_key = queue_key
queueEntry.entry_seq = entry_seq
queueEntry.message_key = message_key._1
queueEntry.message_locator = message_key._2
queueEntry
}
def populate(queue_key:Long, messages:List[String], first_seq:Long=1) = {
var batch = store.create_uow("")
var msg_keys = ListBuffer[(Long, AtomicReference[Object], Long)]()
var next_seq = first_seq
messages.foreach { message=>
val msgKey = add_message(batch, message)
msg_keys +=( (msgKey._1, msgKey._2, next_seq) )
batch.enqueue(entry(queue_key, next_seq, msgKey))
next_seq += 1
}
val tracker = new TaskTracker("unknown", 0)
val task = tracker.task("uow complete")
batch.on_complete(task.run)
batch.release("")
msg_keys.foreach { msgKey =>
store.flush_message(msgKey._1) {}
}
tracker.await
msg_keys
}
}
|
chirino/activemq-apollo
|
apollo-broker/src/test/scala/org/apache/activemq/apollo/broker/store/StoreFunSuiteSupport.scala
|
Scala
|
apache-2.0
| 4,141 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "scalabitz"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
"org.reactivemongo" %% "play2-reactivemongo" % "0.8"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
resolvers += "sgodbillon" at "https://bitbucket.org/sgodbillon/repository/raw/master/snapshots/"
)
}
|
sandermak/scalabitz
|
project/Build.scala
|
Scala
|
mit
| 441 |
/*
* Copyright 2016 Groupon, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupon.sparklint.common
import org.apache.spark.SparkConf
/**
* @author rxue
* @since 12/7/16.
*/
class SparkConfSparklintConfig(conf: SparkConf) extends SparklintConfig {
override def port: Int = conf.get("spark.sparklint.port", defaultPort.toString).toInt
}
|
groupon/sparklint
|
src/main/scala/com/groupon/sparklint/common/SparkConfSparklintConfig.scala
|
Scala
|
apache-2.0
| 881 |
package com.harrys.hyppo.worker.actor.queue
import java.time.LocalDateTime
import java.util.UUID
import akka.testkit.{TestActorRef, TestProbe}
import com.harrys.hyppo.util.TimeUtils
import com.harrys.hyppo.worker.actor.amqp.{RabbitQueueStatusActor, SingleQueueDetails}
import com.harrys.hyppo.worker.actor.{RabbitMQTests, RequestForAnyWork, RequestForPreferredWork}
import com.harrys.hyppo.worker.api.proto.CreateIngestionTasksRequest
import com.harrys.hyppo.worker.{TestConfig, TestObjects}
import scala.util.Try
/**
* Created by jpetty on 9/16/15.
*/
class WorkerDelegatorActorTests extends RabbitMQTests("WorkerDelegatorActorTests", TestConfig.workerWithRandomQueuePrefix()) {
import com.thenewmotion.akka.rabbitmq._
val injector = TestConfig.localWorkerInjector(system, config)
"The WorkDelegator" must {
val delegator = TestActorRef(injector.getInstance(classOf[WorkDelegation]), "delegation")
"initialize with empty queue status information" in {
delegator.underlyingActor.statusTracker.integrationQueueMetrics() shouldBe empty
}
"respond to queue status updates by updating it status info" in {
val statuses = Seq(SingleQueueDetails(queueName = naming.generalQueueName, size = 0, rate = 0.0, ready = 0, unacknowledged = 0, LocalDateTime.now()))
delegator ! RabbitQueueStatusActor.QueueStatusUpdate(statuses)
delegator.underlyingActor.statusTracker.generalQueueMetrics().details shouldEqual statuses.head
}
"incrementally update the queue statuses as new information arrives" in {
val channel = connectionActor.createChannel(ChannelActor.props(), name = Some("partial-test"))
val integrations = Seq(
TestObjects.testProcessedDataIntegration(TestObjects.testIngestionSource(name = "Test Source One")),
TestObjects.testProcessedDataIntegration(TestObjects.testIngestionSource(name = "Test Source Two"))
)
val workItems = integrations.map { integration =>
CreateIngestionTasksRequest(integration, UUID.randomUUID(), Seq(), TestObjects.testIngestionJob(integration.source))
}
val queues = workItems.map { item =>
enqueueWork(item)
SingleQueueDetails(queueName = naming.integrationWorkQueueName(item), size = 1, rate = 0.0, ready = 1, unacknowledged = 0, idleSince = TimeUtils.currentLocalDateTime())
}
delegator ! RabbitQueueStatusActor.QueueStatusUpdate(queues)
val metrics = delegator.underlyingActor.statusTracker.integrationQueueMetrics()
metrics.size shouldEqual 2
metrics.map(_.details).sortBy(_.queueName) shouldEqual queues.sortBy(_.queueName)
// Clear the contents of those queues
withChannel { c =>
queues.map(_.queueName).foreach(c.queuePurge)
}
delegator ! RabbitQueueStatusActor.QueueStatusUpdate(Seq())
delegator ! RequestForAnyWork(channel)
expectNoMsg()
delegator.underlyingActor.statusTracker.integrationQueueMetrics().size shouldEqual 0
}
"provide preferred work when possible" in {
val integration = TestObjects.testProcessedDataIntegration(TestObjects.testIngestionSource(name = "work delegator"))
val testJob = TestObjects.testIngestionJob(integration.source)
val work = CreateIngestionTasksRequest(integration, UUID.randomUUID(), Seq(), testJob)
val workerChan = connectionActor.createChannel(ChannelActor.props())
val queueName = enqueueWork(work)
try {
val probe = TestProbe()
val queues = Seq(SingleQueueDetails(queueName = queueName, size = 1, rate = 0.0, ready = 1, unacknowledged = 0, idleSince = LocalDateTime.now()))
delegator ! RabbitQueueStatusActor.QueueStatusUpdate(queues)
delegator.underlyingActor.statusTracker.integrationQueueMetrics().size shouldEqual 1
probe.send(delegator, RequestForPreferredWork(workerChan, integration))
val reply = probe.expectMsgType[WorkQueueExecution]
reply.input shouldBe a[CreateIngestionTasksRequest]
reply.input.code.isSameCode(integration.code) shouldBe true
} finally {
Try(connection.close())
}
}
}
}
|
harrystech/hyppo-worker
|
worker/src/test/scala/com/harrys/hyppo/worker/actor/queue/WorkerDelegatorActorTests.scala
|
Scala
|
mit
| 4,156 |
package net.rrm.ehour.ui.manage.lock
import java.util.Locale
import org.joda.time.DateTime
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{Matchers, WordSpec}
@RunWith(classOf[JUnitRunner])
class LockAdminBackingBeanSpec extends WordSpec with Matchers {
"Lock Admin Backing Bean" should {
"set week number as name when there are less than 7 days between start and end" in {
val start = new DateTime()
val end = start.plusDays(5)
LockAdminBackingBean.determineName(start.toDate, end.toDate, Locale.US) should startWith("Week")
}
"set start and end date as name when there are less than 7 days between start and end" in {
val start = new DateTime(2014, 2, 1, 0, 0)
val end = start.plusDays(14)
LockAdminBackingBean.determineName(start.toDate, end.toDate, Locale.US) should be("2/1/14 - 2/15/14")
}
"set month name when there's approx. a month between start and end date" in {
val start = new DateTime(2014, 1, 1, 0, 0)
val end = start.plusDays(30)
LockAdminBackingBean.determineName(start.toDate, end.toDate, Locale.US) should be("January, 2014")
}
"set quarter name when there's 12 or 13 weeks between start and end date" in {
val start = new DateTime(2014, 4, 1, 0, 0)
val end = start.plusWeeks(13)
LockAdminBackingBean.determineName(start.toDate, end.toDate, Locale.US) should be("Q2, 2014")
}
}
}
|
momogentoo/ehour
|
eHour-wicketweb/src/test/scala/net/rrm/ehour/ui/manage/lock/LockAdminBackingBeanSpec.scala
|
Scala
|
gpl-2.0
| 1,462 |
package io.getquill.norm
import io.getquill.ast.BinaryOperation
import io.getquill.ast.BooleanOperator
import io.getquill.ast.Filter
import io.getquill.ast.FlatMap
import io.getquill.ast.Map
import io.getquill.ast.Query
import io.getquill.ast.Union
import io.getquill.ast.UnionAll
object AdHocReduction {
def unapply(q: Query) =
q match {
// ---------------------------
// *.filter
// a.filter(b => c).filter(d => e) =>
// a.filter(b => c && e[d := b])
case Filter(Filter(a, b, c), d, e) =>
val er = BetaReduction(e, d -> b)
Some(Filter(a, b, BinaryOperation(c, BooleanOperator.`&&`, er)))
// ---------------------------
// flatMap.*
// a.flatMap(b => c).map(d => e) =>
// a.flatMap(b => c.map(d => e))
case Map(FlatMap(a, b, c), d, e) =>
Some(FlatMap(a, b, Map(c, d, e)))
// a.flatMap(b => c).filter(d => e) =>
// a.flatMap(b => c.filter(d => e))
case Filter(FlatMap(a, b, c), d, e) =>
Some(FlatMap(a, b, Filter(c, d, e)))
// a.flatMap(b => c.union(d))
// a.flatMap(b => c).union(a.flatMap(b => d))
case FlatMap(a, b, Union(c, d)) =>
Some(Union(FlatMap(a, b, c), FlatMap(a, b, d)))
// a.flatMap(b => c.unionAll(d))
// a.flatMap(b => c).unionAll(a.flatMap(b => d))
case FlatMap(a, b, UnionAll(c, d)) =>
Some(UnionAll(FlatMap(a, b, c), FlatMap(a, b, d)))
case other => None
}
}
|
mentegy/quill
|
quill-core/src/main/scala/io/getquill/norm/AdHocReduction.scala
|
Scala
|
apache-2.0
| 1,482 |
package com.github.dcapwell.netty.examples.block
trait BlockStore[Key] {
def apply(key: Key): Option[Array[Byte]]
def add(key: Key, value: Array[Byte]): Unit
}
|
dcapwell/netty-examples
|
src/main/scala/com/github/dcapwell/netty/examples/block/BlockStore.scala
|
Scala
|
mit
| 166 |
package a39
case class Item(name: String)
trait Number1
case class Number1Positive(tail: Number1, head: Item) extends Number1
case class Number1Zero(tail: () => Number2) extends Number1
trait Number2
case class Number2Positive(tail: Number2, head: Item) extends Number2
case class Number2Zero(tail: () => Number1) extends Number2
|
djx314/ubw
|
a39-双头龙/src/main/scala/a39/Number1.scala
|
Scala
|
bsd-3-clause
| 353 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import org.apache.parquet.filter2.predicate.Operators._
import org.apache.parquet.filter2.predicate.{FilterPredicate, Operators}
import org.apache.spark.sql.{Column, DataFrame, QueryTest, Row, SQLConf}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, LogicalRelation}
import org.apache.spark.sql.test.SharedSQLContext
/**
* A test suite that tests Parquet filter2 API based filter pushdown optimization.
*
* NOTE:
*
* 1. `!(a cmp b)` is always transformed to its negated form `a cmp' b` by the
* `BooleanSimplification` optimization rule whenever possible. As a result, predicate `!(a < 1)`
* results in a `GtEq` filter predicate rather than a `Not`.
*
* 2. `Tuple1(Option(x))` is used together with `AnyVal` types like `Int` to ensure the inferred
* data type is nullable.
*/
class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContext {
private def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
val analyzedPredicate = query.queryExecution.optimizedPlan.collect {
case PhysicalOperation(_, filters, LogicalRelation(_: ParquetRelation, _)) => filters
}.flatten
assert(analyzedPredicate.nonEmpty)
val selectedFilters = DataSourceStrategy.selectFilters(analyzedPredicate)
assert(selectedFilters.nonEmpty)
selectedFilters.foreach { pred =>
val maybeFilter = ParquetFilters.createFilter(df.schema, pred)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
maybeFilter.foreach { f =>
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
assert(f.getClass === filterClass)
}
}
checker(query, expected)
}
}
private def checkFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
checkFilterPredicate(df, predicate, filterClass, checkAnswer(_, _: Seq[Row]), expected)
}
private def checkFilterPredicate[T]
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: T)
(implicit df: DataFrame): Unit = {
checkFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
def checkBinaryAnswer(df: DataFrame, expected: Seq[Row]) = {
assertResult(expected.map(_.getAs[Array[Byte]](0).mkString(",")).sorted) {
df.map(_.getAs[Array[Byte]](0).mkString(",")).collect().toSeq.sorted
}
}
checkFilterPredicate(df, predicate, filterClass, checkBinaryAnswer _, expected)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Array[Byte])
(implicit df: DataFrame): Unit = {
checkBinaryFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
test("filter pushdown - boolean") {
withParquetDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], Seq(Row(true), Row(false)))
checkFilterPredicate('_1 === true, classOf[Eq[_]], true)
checkFilterPredicate('_1 <=> true, classOf[Eq[_]], true)
checkFilterPredicate('_1 !== true, classOf[NotEq[_]], false)
}
}
test("filter pushdown - integer") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - long") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - float") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - double") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
// See https://issues.apache.org/jira/browse/SPARK-11153
ignore("filter pushdown - string") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 === "1", classOf[Eq[_]], "1")
checkFilterPredicate('_1 <=> "1", classOf[Eq[_]], "1")
checkFilterPredicate(
'_1 !== "1", classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 < "2", classOf[Lt[_]], "1")
checkFilterPredicate('_1 > "3", classOf[Gt[_]], "4")
checkFilterPredicate('_1 <= "1", classOf[LtEq[_]], "1")
checkFilterPredicate('_1 >= "4", classOf[GtEq[_]], "4")
checkFilterPredicate(Literal("1") === '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("1") <=> '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("2") > '_1, classOf[Lt[_]], "1")
checkFilterPredicate(Literal("3") < '_1, classOf[Gt[_]], "4")
checkFilterPredicate(Literal("1") >= '_1, classOf[LtEq[_]], "1")
checkFilterPredicate(Literal("4") <= '_1, classOf[GtEq[_]], "4")
checkFilterPredicate(!('_1 < "4"), classOf[GtEq[_]], "4")
checkFilterPredicate('_1 < "2" || '_1 > "3", classOf[Operators.Or], Seq(Row("1"), Row("4")))
}
}
// See https://issues.apache.org/jira/browse/SPARK-11153
ignore("filter pushdown - binary") {
implicit class IntToBinary(int: Int) {
def b: Array[Byte] = int.toString.getBytes("UTF-8")
}
withParquetDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df =>
checkBinaryFilterPredicate('_1 === 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1 <=> 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkBinaryFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate(
'_1 !== 1.b, classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate('_1 < 2.b, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate('_1 > 3.b, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate('_1 <= 1.b, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate('_1 >= 4.b, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) === '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(1.b) <=> '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(2.b) > '_1, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate(Literal(3.b) < '_1, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) >= '_1, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate(Literal(4.b) <= '_1, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(!('_1 < 4.b), classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(
'_1 < 2.b || '_1 > 3.b, classOf[Operators.Or], Seq(Row(1.b), Row(4.b)))
}
}
test("SPARK-6554: don't push down predicates which reference partition columns") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
sqlContext.read.parquet(path).filter("part = 1"),
(1 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
test("SPARK-10829: Filter combine partition key and attribute doesn't work in DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
sqlContext.read.parquet(path).filter("a > 0 and (part = 0 or a > 1)"),
(2 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
}
|
pronix/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
|
Scala
|
apache-2.0
| 14,436 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnetexamples.imclassification.util
import org.apache.mxnet.Callback.Speedometer
import org.apache.mxnet._
import org.apache.mxnet.optimizer.SGD
import org.slf4j.LoggerFactory
object Trainer {
private val logger = LoggerFactory.getLogger(classOf[Trainer])
/**
* Fits a model
* @param batchSize Number of images per training batch
* @param numExamples Total number of image examples
* @param devs List of device contexts to use
* @param network The model to train
* @param dataLoader Function to get data loaders for training and validation data
* @param kvStore KVStore to use
* @param numEpochs Number of times to train on each image
* @param modelPrefix Prefix to model identification
* @param loadEpoch Loads a saved checkpoint at this epoch when set
* @param lr The learning rate
* @param lrFactor Learning rate factor (see FactorScheduler)
* @param lrFactorEpoch Learning rate factor epoch (see FactorScheduler)
* @param clipGradient Maximum gradient during optimization
* @param monitorSize (See Monitor)
* @return Final accuracy
*/
// scalastyle:off parameterNum
def fit(batchSize: Int, numExamples: Int, devs: Array[Context],
network: Symbol, dataLoader: (Int, KVStore) => (DataIter, DataIter),
kvStore: String, numEpochs: Int, modelPrefix: String = null, loadEpoch: Int = -1,
lr: Float = 0.1f, lrFactor: Float = 1f, lrFactorEpoch: Float = 1f,
clipGradient: Float = 0f, monitorSize: Int = -1): Accuracy = {
// kvstore
ResourceScope.using() {
var kv = KVStore.create(kvStore)
// load model
val modelPrefixWithRank =
if (modelPrefix == null) null
else modelPrefix + s"-${kv.rank}"
val (argParams, auxParams, beginEpoch) =
if (loadEpoch >= 0) {
require(modelPrefixWithRank != null)
val tmp = FeedForward.load(modelPrefix, loadEpoch)
(tmp.getArgParams, tmp.getAuxParams, loadEpoch)
} else {
(null, null, 0)
}
// save model
val checkpoint: EpochEndCallback =
if (modelPrefix == null) null
else new EpochEndCallback {
override def invoke(epoch: Int, symbol: Symbol,
argParams: Map[String, NDArray],
auxStates: Map[String, NDArray]): Unit = {
Model.saveCheckpoint(modelPrefix, epoch + 1, symbol, argParams, auxParams)
}
}
// data
val (train, validation) = dataLoader(batchSize, kv)
// train
val epochSize =
if (kvStore == "dist_sync") numExamples / batchSize / kv.numWorkers
else numExamples / batchSize
val lrScheduler =
if (lrFactor < 1f) {
new FactorScheduler(step = Math.max((epochSize * lrFactorEpoch).toInt, 1),
factor = lrFactor)
} else {
null
}
val optimizer: Optimizer = new SGD(learningRate = lr,
lrScheduler = lrScheduler, clipGradient = clipGradient,
momentum = 0.9f, wd = 0.00001f)
// disable kvstore for single device
if (kv.`type`.contains("local") && (devs.length == 1 || devs(0).deviceType != "gpu")) {
kv.dispose()
kv = null
}
val model = new FeedForward(ctx = devs,
symbol = network,
numEpoch = numEpochs,
optimizer = optimizer,
initializer = new Xavier(factorType = "in", magnitude = 2.34f),
argParams = argParams,
auxParams = auxParams,
beginEpoch = beginEpoch,
epochSize = epochSize)
if (monitorSize > 0) {
model.setMonitor(new Monitor(monitorSize))
}
val acc = new Accuracy()
model.fit(trainData = train,
evalData = validation,
evalMetric = acc,
kvStore = kv,
batchEndCallback = new Speedometer(batchSize, 50),
epochEndCallback = checkpoint)
if (kv != null) {
kv.dispose()
}
acc
}
}
// scalastyle:on parameterNum
}
class Trainer
|
dmlc/mxnet
|
scala-package/examples/src/main/scala/org/apache/mxnetexamples/imclassification/util/Trainer.scala
|
Scala
|
apache-2.0
| 5,144 |
object main extends App{
abstract class Expr
case class Var(name: String) extends Expr
case class Number(num: Double) extends Expr
case class UnOp(operator: String,
arg: Expr) extends Expr
case class BinOp(operator: String,
left: Expr,
right: Expr) extends Expr
def simplifyAll(expr: Expr): Expr= expr match {
case UnOp("-", UnOp("-", e)) => simplifyAll(e) // simplify inverse of an inverse
case BinOp("+", e, Number(0)) => simplifyAll(e) // simplify add 0
case BinOp("*", e, Number(1)) => simplifyAll(e)
case UnOp(op, e) => UnOp(op, simplifyAll(e))
case BinOp(op, l, r) => BinOp(op, simplifyAll(l), simplifyAll(r))
case _ => expr
}
val v = Var("x")
val binOp1 = UnOp("-", v)
val binOp2 = UnOp("-", binOp1)
println(simplifyAll(v))
println(simplifyAll(binOp1))
println(simplifyAll(binOp2))
}
|
arcyfelix/Courses
|
18-10-18-Programming-in-Scala-by-Martin-Odersky-Lex-Spoon-and-Bill-Venners/42-RecursivePatternMatching/src/main.scala
|
Scala
|
apache-2.0
| 922 |
package org.improving.scalify
import Scalify._
import org.eclipse.jdt.core._
import org.eclipse.jdt.core.dom
import org.eclipse.jdt.core.compiler._
// the java model in eclipse is all the IJavaElement business
trait JavaModel
{
def getICU(cu: dom.CompilationUnit): ICU = cu.getJavaElement.asInstanceOf[ICU]
}
object JavaModel extends JavaModel
class RichIJavaElement(jelement: IJavaElement) { }
// The IMembers are: IType, IMethod, IField, and IInitializer
class RichIMember(member: IMember) extends RichIJavaElement(member) with Modifiable {
val node = null
def flags = member.getFlags
def srcWithoutComments: Emission = {
val commentRegexpBlock = "/\\\\*(?:.|[\\\\n\\\\r])*?\\\\*/"
val commentRegexpLine = """(?m)[ \\t]*\\/\\/.*?\\n"""
val str = member
. getSource
. replaceAll(commentRegexpLine, "\\n")
. replaceAll(commentRegexpBlock, " ")
. replaceAll("""(?m)^\\s*$""", "") // empty lines
if (str.matches("""\\s*""")) Nil
else COMMENT(removeCommonPrefix(str) + "\\n")
}
}
class RichIMethod(imethod: IMethod) extends RichIMember(imethod)
{
def id = imethod.getElementName +
"(" + sigString + ") " +
(if (imethod.isResolved) "" else " (not resolved)")
def sigString = Signature.toString(imethod.getSignature)
}
class RichIType(itype: IType) extends RichIMember(itype) {
def fields: List[IField] = itype.getFields
def methods: List[IMethod] = itype.getMethods
def id = itype.getTypeQualifiedName + (if (itype.isResolved) "" else " (not resolved)")
def hasStaticMembers: Boolean = {
val children: List[IJavaElement] = itype.getChildren
for (c <- children) c match {
case x: IType if x.isStatic || x.isInterface => return true
case x: IField if x.isStatic => return true
case x: IMethod if x.isStatic => return true
case _ =>
}
false
}
def ensureOpen: IType = {
val op = itype.getOpenable
if (op == null || op.isOpen) return itype
log.trace("ensureOpen: %s ", itype)
op.open(null)
itype
}
}
class RichIProblem(problem: IProblem) {
import IProblem._
lazy val id = problem.getID
lazy val args = problem.getArguments
lazy val cp: Option[CategorizedProblem] = problem match {
case x: CategorizedProblem => Some(x)
case _ => None
}
def isNonStaticAccess = id == NonStaticAccessToStaticField || id == NonStaticAccessToStaticMethod
}
|
mbana/scalify
|
src/main/eclipse/JavaModel.scala
|
Scala
|
isc
| 2,327 |
package org.personal.durdina.s3dr.io
/**
* Created by misko on 26/04/2016.
*/
case class S3File(val bucket: String, val key: String)
|
durdina/s3-dist-reader
|
src/main/scala/org/personal/durdina/s3dr/io/S3File.scala
|
Scala
|
apache-2.0
| 138 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.File
import java.nio.file.Files
import java.util.Locale
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkConf
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.{AnalysisException, DataFrame}
import org.apache.spark.sql.execution.DataSourceScanExec
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2Relation, FileScan, FileTable}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
import org.apache.spark.util.Utils
abstract class FileStreamSinkSuite extends StreamTest {
import testImplicits._
override def beforeAll(): Unit = {
super.beforeAll()
spark.sessionState.conf.setConf(SQLConf.ORC_IMPLEMENTATION, "native")
}
override def afterAll(): Unit = {
try {
spark.sessionState.conf.unsetConf(SQLConf.ORC_IMPLEMENTATION)
} finally {
super.afterAll()
}
}
protected def checkQueryExecution(df: DataFrame): Unit
test("unpartitioned writing and batch reading") {
val inputData = MemoryStream[Int]
val df = inputData.toDF()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
df.writeStream
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.parquet(outputDir).as[Int]
checkDatasetUnorderly(outputDf, 1, 2, 3)
} finally {
if (query != null) {
query.stop()
}
}
}
test("SPARK-21167: encode and decode path correctly") {
val inputData = MemoryStream[String]
val ds = inputData.toDS()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
val query = ds.map(s => (s, s.length))
.toDF("value", "len")
.writeStream
.partitionBy("value")
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
try {
// The output is partitioned by "value", so the value will appear in the file path.
// This is to test if we handle spaces in the path correctly.
inputData.addData("hello world")
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.parquet(outputDir)
checkDatasetUnorderly(outputDf.as[(Int, String)], ("hello world".length, "hello world"))
} finally {
query.stop()
}
}
test("partitioned writing and batch reading") {
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.partitionBy("id")
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.parquet(outputDir)
val expectedSchema = new StructType()
.add(StructField("value", IntegerType, nullable = false))
.add(StructField("id", IntegerType))
assert(outputDf.schema === expectedSchema)
// Verify the data is correctly read
checkDatasetUnorderly(
outputDf.as[(Int, Int)],
(1000, 1), (2000, 2), (3000, 3))
checkQueryExecution(outputDf)
} finally {
if (query != null) {
query.stop()
}
}
}
test("partitioned writing and batch reading with 'basePath'") {
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val outputPath = outputDir.getAbsolutePath
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
var query: StreamingQuery = null
try {
query =
ds.map(i => (i, -i, i * 1000))
.toDF("id1", "id2", "value")
.writeStream
.partitionBy("id1", "id2")
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.format("parquet")
.start(outputPath)
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val readIn = spark.read.option("basePath", outputPath).parquet(s"$outputDir/*/*")
checkDatasetUnorderly(
readIn.as[(Int, Int, Int)],
(1000, 1, -1), (2000, 2, -2), (3000, 3, -3))
} finally {
if (query != null) {
query.stop()
}
}
}
}
}
// This tests whether FileStreamSink works with aggregations. Specifically, it tests
// whether the correct streaming QueryExecution (i.e. IncrementalExecution) is used to
// to execute the trigger for writing data to file sink. See SPARK-18440 for more details.
test("writing with aggregation") {
// Since FileStreamSink currently only supports append mode, we will test FileStreamSink
// with aggregations using event time windows and watermark, which allows
// aggregation + append mode.
val inputData = MemoryStream[Long]
val inputDF = inputData.toDF.toDF("time")
val outputDf = inputDF
.selectExpr("CAST(time AS timestamp) AS timestamp")
.withWatermark("timestamp", "10 seconds")
.groupBy(window($"timestamp", "5 seconds"))
.count()
.select("window.start", "window.end", "count")
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
outputDf.writeStream
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
def addTimestamp(timestampInSecs: Int*): Unit = {
inputData.addData(timestampInSecs.map(_ * 1L): _*)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
}
def check(expectedResult: ((Long, Long), Long)*): Unit = {
val outputDf = spark.read.parquet(outputDir)
.selectExpr(
"CAST(start as BIGINT) AS start",
"CAST(end as BIGINT) AS end",
"count")
.orderBy("start") // sort the DataFrame in order to compare with the expected one.
checkDataset(
outputDf.as[(Long, Long, Long)],
expectedResult.map(x => (x._1._1, x._1._2, x._2)): _*)
}
addTimestamp(100) // watermark = None before this, watermark = 100 - 10 = 90 after this
check() // nothing emitted yet
addTimestamp(104, 123) // watermark = 90 before this, watermark = 123 - 10 = 113 after this
check((100L, 105L) -> 2L) // no-data-batch emits results on 100-105,
addTimestamp(140) // wm = 113 before this, emit results on 100-105, wm = 130 after this
check((100L, 105L) -> 2L, (120L, 125L) -> 1L) // no-data-batch emits results on 120-125
} finally {
if (query != null) {
query.stop()
}
}
}
test("Update and Complete output mode not supported") {
val df = MemoryStream[Int].toDF().groupBy().count()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
withTempDir { dir =>
def testOutputMode(mode: String): Unit = {
val e = intercept[AnalysisException] {
df.writeStream.format("parquet").outputMode(mode).start(dir.getCanonicalPath)
}
Seq(mode, "not support").foreach { w =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(w))
}
}
testOutputMode("update")
testOutputMode("complete")
}
}
test("parquet") {
testFormat(None) // should not throw error as default format parquet when not specified
testFormat(Some("parquet"))
}
test("orc") {
testFormat(Some("orc"))
}
test("text") {
testFormat(Some("text"))
}
test("json") {
testFormat(Some("json"))
}
def testFormat(format: Option[String]): Unit = {
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
val writer = ds.map(i => (i, i * 1000)).toDF("id", "value").writeStream
if (format.nonEmpty) {
writer.format(format.get)
}
query = writer.option("checkpointLocation", checkpointDir).start(outputDir)
} finally {
if (query != null) {
query.stop()
}
}
}
test("FileStreamSink.ancestorIsMetadataDirectory()") {
val hadoopConf = spark.sessionState.newHadoopConf()
def assertAncestorIsMetadataDirectory(path: String): Unit =
assert(FileStreamSink.ancestorIsMetadataDirectory(new Path(path), hadoopConf))
def assertAncestorIsNotMetadataDirectory(path: String): Unit =
assert(!FileStreamSink.ancestorIsMetadataDirectory(new Path(path), hadoopConf))
assertAncestorIsMetadataDirectory(s"/${FileStreamSink.metadataDir}")
assertAncestorIsMetadataDirectory(s"/${FileStreamSink.metadataDir}/")
assertAncestorIsMetadataDirectory(s"/a/${FileStreamSink.metadataDir}")
assertAncestorIsMetadataDirectory(s"/a/${FileStreamSink.metadataDir}/")
assertAncestorIsMetadataDirectory(s"/a/b/${FileStreamSink.metadataDir}/c")
assertAncestorIsMetadataDirectory(s"/a/b/${FileStreamSink.metadataDir}/c/")
assertAncestorIsNotMetadataDirectory(s"/a/b/c")
assertAncestorIsNotMetadataDirectory(s"/a/b/c/${FileStreamSink.metadataDir}extra")
}
test("SPARK-20460 Check name duplication in schema") {
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val inputData = MemoryStream[(Int, Int)]
val df = inputData.toDF()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
df.writeStream
.option("checkpointLocation", checkpointDir)
.format("json")
.start(outputDir)
inputData.addData((1, 1))
failAfter(streamingTimeout) {
query.processAllAvailable()
}
} finally {
if (query != null) {
query.stop()
}
}
val errorMsg = intercept[AnalysisException] {
spark.read.schema(s"$c0 INT, $c1 INT").json(outputDir).as[(Int, Int)]
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the data schema: "))
}
}
}
test("SPARK-23288 writing and checking output metrics") {
Seq("parquet", "orc", "text", "json").foreach { format =>
val inputData = MemoryStream[String]
val df = inputData.toDF()
withTempDir { outputDir =>
withTempDir { checkpointDir =>
var query: StreamingQuery = null
var numTasks = 0
var recordsWritten: Long = 0L
var bytesWritten: Long = 0L
try {
spark.sparkContext.addSparkListener(new SparkListener() {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd) {
val outputMetrics = taskEnd.taskMetrics.outputMetrics
recordsWritten += outputMetrics.recordsWritten
bytesWritten += outputMetrics.bytesWritten
numTasks += 1
}
})
query =
df.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format(format)
.start(outputDir.getCanonicalPath)
inputData.addData("1", "2", "3")
inputData.addData("4", "5")
failAfter(streamingTimeout) {
query.processAllAvailable()
}
spark.sparkContext.listenerBus.waitUntilEmpty(streamingTimeout.toMillis)
assert(numTasks > 0)
assert(recordsWritten === 5)
// This is heavily file type/version specific but should be filled
assert(bytesWritten > 0)
} finally {
if (query != null) {
query.stop()
}
}
}
}
}
}
test("special characters in output path") {
withTempDir { tempDir =>
val checkpointDir = new File(tempDir, "chk")
val outputDir = new File(tempDir, "output @#output")
val inputData = MemoryStream[Int]
inputData.addData(1, 2, 3)
val q = inputData.toDF()
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("parquet")
.start(outputDir.getCanonicalPath)
try {
q.processAllAvailable()
} finally {
q.stop()
}
// The "_spark_metadata" directory should be in "outputDir"
assert(outputDir.listFiles.map(_.getName).contains(FileStreamSink.metadataDir))
val outputDf = spark.read.parquet(outputDir.getCanonicalPath).as[Int]
checkDatasetUnorderly(outputDf, 1, 2, 3)
}
}
testQuietly("cleanup incomplete output for aborted task") {
withTempDir { tempDir =>
val checkpointDir = new File(tempDir, "chk")
val outputDir = new File(tempDir, "output")
val inputData = MemoryStream[Int]
inputData.addData(1, 2, 3)
val q = inputData.toDS().map(_ / 0)
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("parquet")
.start(outputDir.getCanonicalPath)
intercept[StreamingQueryException] {
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
val outputFiles = Files.walk(outputDir.toPath).iterator().asScala
.filter(_.toString.endsWith(".parquet"))
assert(outputFiles.toList.isEmpty, "Incomplete files should be cleaned up.")
}
}
}
class FileStreamSinkV1Suite extends FileStreamSinkSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_READER_LIST, "csv,json,orc,text,parquet")
.set(SQLConf.USE_V1_SOURCE_WRITER_LIST, "csv,json,orc,text,parquet")
override def checkQueryExecution(df: DataFrame): Unit = {
// Verify that MetadataLogFileIndex is being used and the correct partitioning schema has
// been inferred
val hadoopdFsRelations = df.queryExecution.analyzed.collect {
case LogicalRelation(baseRelation: HadoopFsRelation, _, _, _) => baseRelation
}
assert(hadoopdFsRelations.size === 1)
assert(hadoopdFsRelations.head.location.isInstanceOf[MetadataLogFileIndex])
assert(hadoopdFsRelations.head.partitionSchema.exists(_.name == "id"))
assert(hadoopdFsRelations.head.dataSchema.exists(_.name == "value"))
/** Check some condition on the partitions of the FileScanRDD generated by a DF */
def checkFileScanPartitions(df: DataFrame)(func: Seq[FilePartition] => Unit): Unit = {
val getFileScanRDD = df.queryExecution.executedPlan.collect {
case scan: DataSourceScanExec if scan.inputRDDs().head.isInstanceOf[FileScanRDD] =>
scan.inputRDDs().head.asInstanceOf[FileScanRDD]
}.headOption.getOrElse {
fail(s"No FileScan in query\\n${df.queryExecution}")
}
func(getFileScanRDD.filePartitions)
}
// Read without pruning
checkFileScanPartitions(df) { partitions =>
// There should be as many distinct partition values as there are distinct ids
assert(partitions.flatMap(_.files.map(_.partitionValues)).distinct.size === 3)
}
// Read with pruning, should read only files in partition dir id=1
checkFileScanPartitions(df.filter("id = 1")) { partitions =>
val filesToBeRead = partitions.flatMap(_.files)
assert(filesToBeRead.map(_.filePath).forall(_.contains("/id=1/")))
assert(filesToBeRead.map(_.partitionValues).distinct.size === 1)
}
// Read with pruning, should read only files in partition dir id=1 and id=2
checkFileScanPartitions(df.filter("id in (1,2)")) { partitions =>
val filesToBeRead = partitions.flatMap(_.files)
assert(!filesToBeRead.map(_.filePath).exists(_.contains("/id=3/")))
assert(filesToBeRead.map(_.partitionValues).distinct.size === 2)
}
}
}
class FileStreamSinkV2Suite extends FileStreamSinkSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_READER_LIST, "")
.set(SQLConf.USE_V1_SOURCE_WRITER_LIST, "")
override def checkQueryExecution(df: DataFrame): Unit = {
// Verify that MetadataLogFileIndex is being used and the correct partitioning schema has
// been inferred
val table = df.queryExecution.analyzed.collect {
case DataSourceV2Relation(table: FileTable, _, _) => table
}
assert(table.size === 1)
assert(table.head.fileIndex.isInstanceOf[MetadataLogFileIndex])
assert(table.head.fileIndex.partitionSchema.exists(_.name == "id"))
assert(table.head.dataSchema.exists(_.name == "value"))
/** Check some condition on the partitions of the FileScanRDD generated by a DF */
def checkFileScanPartitions(df: DataFrame)(func: Seq[FilePartition] => Unit): Unit = {
val fileScan = df.queryExecution.executedPlan.collect {
case batch: BatchScanExec if batch.scan.isInstanceOf[FileScan] =>
batch.scan.asInstanceOf[FileScan]
}.headOption.getOrElse {
fail(s"No FileScan in query\\n${df.queryExecution}")
}
func(fileScan.planInputPartitions().map(_.asInstanceOf[FilePartition]))
}
// Read without pruning
checkFileScanPartitions(df) { partitions =>
// There should be as many distinct partition values as there are distinct ids
assert(partitions.flatMap(_.files.map(_.partitionValues)).distinct.size === 3)
}
// TODO: test partition pruning when file source V2 supports it.
}
}
|
techaddict/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSinkSuite.scala
|
Scala
|
apache-2.0
| 19,859 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package examples
import rx.lang.scala.Observable
import scala.concurrent.duration._
import scala.language.postfixOps
object Olympics {
case class Medal(val year: Int, val games: String, val discipline: String, val medal: String, val athlete: String, val country: String)
def mountainBikeMedals: Observable[Medal] = Observable.just(
duration(100 millis), // a short delay because medals are only awarded some time after the Games began
Observable.just(
Medal(1996, "Atlanta 1996", "cross-country men", "Gold", "Bart BRENTJENS", "Netherlands"),
Medal(1996, "Atlanta 1996", "cross-country women", "Gold", "Paola PEZZO", "Italy"),
Medal(1996, "Atlanta 1996", "cross-country men", "Silver", "Thomas FRISCHKNECHT", "Switzerland"),
Medal(1996, "Atlanta 1996", "cross-country women", "Silver", "Alison SYDOR", "Canada"),
Medal(1996, "Atlanta 1996", "cross-country men", "Bronze", "Miguel MARTINEZ", "France"),
Medal(1996, "Atlanta 1996", "cross-country women", "Bronze", "Susan DEMATTEI", "United States of America")
),
fourYearsEmpty,
Observable.just(
Medal(2000, "Sydney 2000", "cross-country women", "Gold", "Paola PEZZO", "Italy"),
Medal(2000, "Sydney 2000", "cross-country women", "Silver", "Barbara BLATTER", "Switzerland"),
Medal(2000, "Sydney 2000", "cross-country women", "Bronze", "Marga FULLANA", "Spain"),
Medal(2000, "Sydney 2000", "cross-country men", "Gold", "Miguel MARTINEZ", "France"),
Medal(2000, "Sydney 2000", "cross-country men", "Silver", "Filip MEIRHAEGHE", "Belgium"),
Medal(2000, "Sydney 2000", "cross-country men", "Bronze", "Christoph SAUSER", "Switzerland")
),
fourYearsEmpty,
Observable.just(
Medal(2004, "Athens 2004", "cross-country men", "Gold", "Julien ABSALON", "France"),
Medal(2004, "Athens 2004", "cross-country men", "Silver", "Jose Antonio HERMIDA RAMOS", "Spain"),
Medal(2004, "Athens 2004", "cross-country men", "Bronze", "Bart BRENTJENS", "Netherlands"),
Medal(2004, "Athens 2004", "cross-country women", "Gold", "Gunn-Rita DAHLE", "Norway"),
Medal(2004, "Athens 2004", "cross-country women", "Silver", "Marie-Helene PREMONT", "Canada"),
Medal(2004, "Athens 2004", "cross-country women", "Bronze", "Sabine SPITZ", "Germany")
),
fourYearsEmpty,
Observable.just(
Medal(2008, "Beijing 2008", "cross-country women", "Gold", "Sabine SPITZ", "Germany"),
Medal(2008, "Beijing 2008", "cross-country women", "Silver", "Maja WLOSZCZOWSKA", "Poland"),
Medal(2008, "Beijing 2008", "cross-country women", "Bronze", "Irina KALENTYEVA", "Russian Federation"),
Medal(2008, "Beijing 2008", "cross-country men", "Gold", "Julien ABSALON", "France"),
Medal(2008, "Beijing 2008", "cross-country men", "Silver", "Jean-Christophe PERAUD", "France"),
Medal(2008, "Beijing 2008", "cross-country men", "Bronze", "Nino SCHURTER", "Switzerland")
),
fourYearsEmpty,
Observable.just(
Medal(2012, "London 2012", "cross-country men", "Gold", "Jaroslav KULHAVY", "Czech Republic"),
Medal(2012, "London 2012", "cross-country men", "Silver", "Nino SCHURTER", "Switzerland"),
Medal(2012, "London 2012", "cross-country men", "Bronze", "Marco Aurelio FONTANA", "Italy"),
Medal(2012, "London 2012", "cross-country women", "Gold", "Julie BRESSET", "France"),
Medal(2012, "London 2012", "cross-country women", "Silver", "Sabine SPITZ", "Germany"),
Medal(2012, "London 2012", "cross-country women", "Bronze", "Georgia GOULD", "United States of America")
)
).concat
// speed it up :D
val oneYear = 1000.millis
//val neverUsedDummyMedal = Medal(3333, "?", "?", "?", "?", "?")
/** runs an infinite loop, and returns Bottom type (Nothing) */
def getNothing: Nothing = {
println("You shouldn't have called this method ;-)")
getNothing
}
/** returns an Observable which emits no elements and completes after a duration of d */
def duration(d: Duration): Observable[Nothing] = Observable.interval(d).take(1).filter(_ => false).map(_ => getNothing)
def fourYearsEmpty: Observable[Medal] = duration(4*oneYear)
def yearTicks: Observable[Int] =
(Observable.from(1996 to 2014) zip (Observable.just(-1) ++ Observable.interval(oneYear))).map(_._1)
/*
def fourYearsEmptyOld: Observable[Medal] = {
// TODO this should return an observable which emits nothing during fourYears and then completes
// Because of https://github.com/ReactiveX/RxJava/issues/388, we get non-terminating tests
// And this https://github.com/ReactiveX/RxJava/pull/289#issuecomment-24738668 also causes problems
// So we don't use this:
Observable.interval(fourYears).take(1).map(i => neverUsedDummyMedal).filter(m => false)
// But we just return empty, which completes immediately
// Observable.empty
}*/
}
|
jbripley/RxScala
|
examples/src/test/scala/examples/Olympics.scala
|
Scala
|
apache-2.0
| 5,486 |
package filters
import akka.stream.Materializer
import javax.inject._
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}
import models.LoginSession
@Singleton
class LoginSessionFilter @Inject()(
implicit override val mat: Materializer,
exec: ExecutionContext
) extends Filter {
override def apply(
nextFilter: RequestHeader => Future[Result]
)(
requestHeader: RequestHeader
): Future[Result] = {
nextFilter(requestHeader).map { result =>
// If an blogger is logged in, postpone login expiration time.
LoginSession.retrieveLogin(result)(requestHeader) match {
case Some(loginSession) =>
result.withSession(requestHeader.session + loginSession.renewExpirationTime().toLoginSessionString)
case None => result
}
}
}
}
|
ruimo/blog
|
app/filters/LoginSessionFilter.scala
|
Scala
|
apache-2.0
| 810 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv.generic
import kantan.codecs.shapeless.laws.{Left, Or, Right}
import kantan.csv.generic.Instances._
import kantan.csv.generic.arbitrary._
import kantan.csv.laws.LegalRow
import kantan.csv.laws.discipline.{DisciplineSuite, RowCodecTests}
import org.scalacheck.Arbitrary
object Instances {
case class Simple(i: Int)
case class Complex(i: Int, b: Boolean, c: Option[Float])
implicit val arbLegal: Arbitrary[LegalRow[Or[Complex, Simple]]] =
arbLegalValue { (o: Or[Complex, Simple]) =>
o match {
case Left(Complex(i, b, c)) => Seq(i.toString, b.toString, c.fold("")(_.toString))
case Right(Simple(i)) => Seq(i.toString)
}
}
}
// Shapeless' Lazy generates code with Null that we need to ignore.
@SuppressWarnings(Array("org.wartremover.warts.Null"))
class DerivedRowCodecTests extends DisciplineSuite {
checkAll("DerivedRowCodec[Or[Complex, Simple]]", RowCodecTests[Or[Complex, Simple]].codec[Byte, Float])
}
|
nrinaudo/tabulate
|
generic/shared/src/test/scala/kantan/csv/generic/DerivedRowCodecTests.scala
|
Scala
|
mit
| 1,576 |
package akashic.storage.service
import akka.http.scaladsl.server.Route
trait Runnable {
def run: Route
}
|
akiradeveloper/fss3
|
src/main/scala/akashic/storage/service/Runnable.scala
|
Scala
|
apache-2.0
| 109 |
/*
* Copyright 2015 Michael Gnatz.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.mg.tt.service.dao
import de.mg.tt.model.{Activity, Category, Persistent}
import de.mg.tt.service.{ExceptionHandler, FilterCriteria}
import javax.ejb._
import javax.interceptor.Interceptors
import javax.persistence._
import scala.collection.JavaConverters._
@Stateless
@LocalBean
@Interceptors(Array(classOf[ExceptionHandler]))
class TTMgmtDao {
@PersistenceContext
private var em: EntityManager = _
def this(em: EntityManager) = {
this()
this.em = em
}
def create[T <: Persistent](entity: T)(implicit mf: Manifest[T]): T = {
em.persist(entity)
entity
}
def update[T <: Persistent](entity: T)(implicit mf: Manifest[T]): T = {
em.merge(entity)
entity
}
def get[T <: Persistent](id: Long)(implicit mf: Manifest[T]): T =
em.getReference(mf.runtimeClass, id).asInstanceOf[T]
def delete(entity: Persistent): Unit = {
entity match {
case c: Category => c.activities.remove(c)
case a: Activity => ;
}
em.remove(entity)
}
def findAllCategories(): List[Category] =
resultList[Category](em.createNamedQuery("findAllCategories"))
def findActivities(criteria: FilterCriteria): List[Activity] = {
assert(criteria != null)
var q = "select a from Activity a where a.from >= :fromDate and a.to <= :toDate"
var i = 0
criteria.categories.foreach(
elem => {
q += s""" and exists (select c${i} from Category c${i} where a.categories = c${i} and c${i}.name = "${elem.name}")""";
i += 1
})
q += " order by a.from asc"
resultList[Activity](em.createQuery(q)
.setParameter("fromDate", criteria.from)
.setParameter("toDate", criteria.to)
.setMaxResults(5000))
}
def findAllActivities(): List[Activity] = resultList[Activity](em.createNamedQuery("findAllActivities"))
private def resultList[T](query: Query) = query.getResultList.asScala.toList.asInstanceOf[List[T]]
}
|
micgn/timetracker
|
TimeTrackerWeb/src/main/scala/de/mg/tt/service/dao/TTMgmtDao.scala
|
Scala
|
apache-2.0
| 2,526 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import org.apache.spark.sql.execution.streaming.StreamExecution
trait StateStoreMetricsTest extends StreamTest {
private var lastCheckedRecentProgressIndex = -1
private var lastQuery: StreamExecution = null
override def beforeEach(): Unit = {
super.beforeEach()
lastCheckedRecentProgressIndex = -1
}
def assertNumStateRows(
total: Seq[Long],
updated: Seq[Long],
droppedByWatermark: Seq[Long],
removed: Option[Seq[Long]]): AssertOnQuery = {
AssertOnQuery(s"Check total state rows = $total, updated state rows = $updated" +
s", rows dropped by watermark = $droppedByWatermark, removed state rows = $removed") { q =>
// This assumes that the streaming query will not make any progress while the eventually
// is being executed.
eventually(timeout(streamingTimeout)) {
val (progressesSinceLastCheck, lastCheckedProgressIndex, numStateOperators) =
retrieveProgressesSinceLastCheck(q)
val allNumUpdatedRowsSinceLastCheck =
progressesSinceLastCheck.map(_.stateOperators.map(_.numRowsUpdated))
val allNumRowsDroppedByWatermarkSinceLastCheck =
progressesSinceLastCheck.map(_.stateOperators.map(_.numRowsDroppedByWatermark))
lazy val debugString = "recent progresses:\n" +
progressesSinceLastCheck.map(_.prettyJson).mkString("\n\n")
val numTotalRows = progressesSinceLastCheck.last.stateOperators.map(_.numRowsTotal)
assert(numTotalRows === total, s"incorrect total rows, $debugString")
val numUpdatedRows = arraySum(allNumUpdatedRowsSinceLastCheck, numStateOperators)
assert(numUpdatedRows === updated, s"incorrect updates rows, $debugString")
val numRowsDroppedByWatermark = arraySum(allNumRowsDroppedByWatermarkSinceLastCheck,
numStateOperators)
assert(numRowsDroppedByWatermark === droppedByWatermark,
s"incorrect dropped rows by watermark, $debugString")
if (removed.isDefined) {
val allNumRowsRemovedSinceLastCheck =
progressesSinceLastCheck.map(_.stateOperators.map(_.numRowsRemoved))
val numRemovedRows = arraySum(allNumRowsRemovedSinceLastCheck, numStateOperators)
assert(numRemovedRows === removed.get, s"incorrect removed rows, $debugString")
}
advanceLastCheckedRecentProgressIndex(lastCheckedProgressIndex)
}
true
}
}
/** AssertOnQuery to verify the given state operator's custom metric has expected value */
def assertStateOperatorCustomMetric(
metric: String, expected: Long, operatorIndex: Int = 0): AssertOnQuery = {
AssertOnQuery(s"Check metrics $metric has value $expected") { q =>
eventually(timeout(streamingTimeout)) {
val (progressesSinceLastCheck, lastCheckedProgressIndex, numStateOperators) =
retrieveProgressesSinceLastCheck(q)
assert(operatorIndex < numStateOperators, s"Invalid operator Index: $operatorIndex")
val allCustomMetricValuesSinceLastCheck = progressesSinceLastCheck
.map(_.stateOperators(operatorIndex).customMetrics.get(metric))
.map(Long2long)
lazy val debugString = "recent progresses:\n" +
progressesSinceLastCheck.map(_.prettyJson).mkString("\n\n")
assert(allCustomMetricValuesSinceLastCheck.sum === expected,
s"incorrect custom metric ($metric), $debugString")
advanceLastCheckedRecentProgressIndex(lastCheckedProgressIndex)
}
true
}
}
/** Assert on [[StateOperatorProgress]] metrics */
def assertStateOperatorProgressMetric(operatorName: String, numShufflePartitions: Long,
numStateStoreInstances: Long, operatorIndex: Int = 0): AssertOnQuery = {
AssertOnQuery(s"Check operator progress metrics: operatorName = $operatorName, " +
s"numShufflePartitions = $numShufflePartitions, " +
s"numStateStoreInstances = $numStateStoreInstances") { q =>
eventually(timeout(streamingTimeout)) {
val (progressesSinceLastCheck, lastCheckedProgressIndex, numStateOperators) =
retrieveProgressesSinceLastCheck(q)
assert(operatorIndex < numStateOperators, s"Invalid operator Index: $operatorIndex")
val lastOpProgress = progressesSinceLastCheck.last.stateOperators(operatorIndex)
lazy val debugString = "recent progresses:\n" +
progressesSinceLastCheck.map(_.prettyJson).mkString("\n\n")
assert(lastOpProgress.operatorName === operatorName,
s"incorrect operator name, $debugString")
assert(lastOpProgress.numShufflePartitions === numShufflePartitions,
s"incorrect number of shuffle partitions, $debugString")
assert(lastOpProgress.numStateStoreInstances === numStateStoreInstances,
s"incorrect number of state stores, $debugString")
advanceLastCheckedRecentProgressIndex(lastCheckedProgressIndex)
}
true
}
}
def assertNumStateRows(total: Seq[Long], updated: Seq[Long]): AssertOnQuery = {
assert(total.length === updated.length)
assertNumStateRows(
total, updated, droppedByWatermark = (0 until total.length).map(_ => 0L), None)
}
def assertNumStateRows(
total: Long,
updated: Long,
droppedByWatermark: Long = 0): AssertOnQuery = {
assertNumStateRows(Seq(total), Seq(updated), Seq(droppedByWatermark), None)
}
def arraySum(arraySeq: Seq[Array[Long]], arrayLength: Int): Seq[Long] = {
if (arraySeq.isEmpty) return Seq.fill(arrayLength)(0L)
assert(arraySeq.forall(_.length == arrayLength),
"Arrays are of different lengths:\n" + arraySeq.map(_.toSeq).mkString("\n"))
(0 until arrayLength).map { index => arraySeq.map(_.apply(index)).sum }
}
def retrieveProgressesSinceLastCheck(
execution: StreamExecution): (Array[StreamingQueryProgress], Int, Int) = {
val recentProgress = execution.recentProgress
require(recentProgress != null, "No progress made")
require(recentProgress.length < spark.sessionState.conf.streamingProgressRetention,
"This test assumes that all progresses are present in q.recentProgress but " +
"some may have been dropped due to retention limits")
if (execution.ne(lastQuery)) lastCheckedRecentProgressIndex = -1
lastQuery = execution
val numStateOperators = recentProgress.last.stateOperators.length
val recentProgresses = recentProgress
.slice(lastCheckedRecentProgressIndex + 1, recentProgress.length)
.filter(_.stateOperators.length == numStateOperators)
(recentProgresses, recentProgress.length - 1, recentProgresses.last.stateOperators.length)
}
def advanceLastCheckedRecentProgressIndex(newLastCheckedRecentProgressIndex: Int): Unit = {
lastCheckedRecentProgressIndex = newLastCheckedRecentProgressIndex
}
}
|
ueshin/apache-spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StateStoreMetricsTest.scala
|
Scala
|
apache-2.0
| 7,661 |
package com.pointr.util
import java.io.FileInputStream
import org.yaml.snakeyaml.Yaml
import com.pointr.tcp.util.Logger._
import com.pointr.tensorflow.{AnyMap, MapMap, StringMap}
class AppConfig(yamlPath: String, os: String) {
val yamlConf = YamlStruct(yamlPath)
val envmap = yamlConf.getMap("environments").apply(os).asInstanceOf[MapMap]("env").asInstanceOf[StringMap]
private def emap(app: String) = yamlConf.getMap("defaults").apply("apps").asInstanceOf[AnyMap](app).asInstanceOf[StringMap].map { case (k, v) =>
val vnew = envmap.foldLeft(v) { case (vv, (ke, ve)) => /* info(vv); */ vv.replace(s"$${$ke}", ve) }
(k, vnew)
}
def apply(dottedKey: String) = {
val keys = dottedKey.split("\\\\.")
yamlConf.getMap(keys(0)).asInstanceOf[StringMap].get(keys(1)).get
}
def apply(app: String, key: String, default: String = "") = emap(app).getOrElse(key, default)
def getMap(app: String, key: String) = emap(app)(key).asInstanceOf[Map[String, String]]
}
trait YamlConf {
def apply(key: String): Any
def apply(key: String, default: String): Any
def getMap(key: String): Map[String, _]
}
case class YamlStruct(ymlFile: String) extends YamlConf {
val yaml = new Yaml
import collection.JavaConverters._
def expand(o: Any): Any = {
o match {
case lst: java.util.List[_] =>
Seq(lst.asScala.map { o => expand(o) })
case m: java.util.Map[_, _] =>
m.asScala.map { case (k, v) =>
(k, expand(v))
}.toMap
case _ =>
o
}
}
val yamlConf = {
val iy = (for (y <- yaml.loadAll(new FileInputStream(ymlFile)).iterator.asScala) yield y).toList.head // toMap[String,Any]
val omap = expand(iy).asInstanceOf[Map[String, _]]
info(s"omap: ${omap}")
omap
}
def getConfiguration = yamlConf
override def toString() = {
getConfiguration.mkString(",")
}
override def getMap(key: String) = apply(key).asInstanceOf[Map[String, _]]
override def apply(key: String) = {
yamlConf(key)
}
override def apply(key: String, default: String) = {
yamlConf.getOrElse(key, default)
}
}
object YamlStruct {
def main(args: Array[String]) {
val f = java.io.File.createTempFile("yaml-test", null)
val s =
"""
abcdef: |
abc
def
g hi hi again
AnotherKey:
key1: key value 1
key2: key value 2
Map parent:
Map Child:
mapkey1: MapChildVal1
mapkey2:
MapGrandChild:
- MapGrandChildVal1
- MapGrandChildVal2
mapkey3:
"MapChild1Val2"
MapChild2 :
intval: 33
mapchild2: MapChild2 Val 1
"""
tools.nsc.io.File(f).writeAll(s)
val y = new YamlStruct(f.getAbsolutePath)
info(y.getConfiguration.map { case (k, v) => s"$k=$v" }.mkString("\\n"))
}
}
|
OpenChaiSpark/OCspark
|
tf/src/main/scala/com/pointr/util/YamlStruct.scala
|
Scala
|
apache-2.0
| 2,789 |
/**
* @author Yuuto
*/
package yuuto.enhancedinventories.config.json
import yuuto.enhancedinventories.config.EIConfiguration
import java.io.File
import java.io.FilenameFilter
import com.google.gson.JsonObject
import com.google.gson.JsonParser
import yuuto.enhancedinventories.util.LogHelperEI
import java.io.FileReader
import yuuto.enhancedinventories.materials.FrameMaterial
import com.google.gson.JsonElement
import scala.collection.mutable.MutableList
import net.minecraft.item.ItemStack
import yuuto.enhancedinventories.materials.FrameMaterials
import yuuto.enhancedinventories.materials.ETier
class JsonFrameFactory {
def loadFrames(){
LogHelperEI.Info("Loading Frames");
val dir:File=EIConfiguration.getFrameConfigDir();
val files:Array[File]=getFileList(dir);
val parser:JsonParser = new JsonParser();
if(EIConfiguration.canCraftStoneFrame){
ETier.Tier1.addChestMaterial("cobblestone");
FrameMaterials.Instance.registerMaterial("cobblestone", FrameMaterials.Stone);
}else{
FrameMaterials.Instance.registerMaterial(FrameMaterials.Stone);
}
if(EIConfiguration.canPaintObsidianFrame){
FrameMaterials.Instance.registerMaterial("obsidian", FrameMaterials.Obsidian);
}else{
FrameMaterials.Instance.registerMaterial(FrameMaterials.Obsidian);
}
for(f<-files){
loadFramesFromFile(f, parser);
}
}
def getFileList(dir:File):Array[File]={
val files:Array[File]=dir.listFiles(new FilenameFilter() {
override def accept(file:File, name:String):Boolean={
if (name == null) {
return false;
}
return name.toLowerCase().endsWith(".json"); //|| new File(file, name).isDirectory();
}
});
return files;
}
def loadFramesFromFile(file:File, parser:JsonParser){
var frameList:JsonObject=null;
try {
frameList = parser.parse(new FileReader(file)).asInstanceOf[JsonObject];
} catch{
case t:Throwable=>{
LogHelperEI.Error("Critical error reading from recipe file "+file.getName()+" Please make sure it is formated correctly");
LogHelperEI.Error(t.getMessage());
}
//log.error("Critical error reading from a world generation file: " + configs(1) + " > Please be sure the file is correct!", t);
return;
}
val itr=frameList.entrySet().iterator();
val fileName=file.getName();
while(itr.hasNext()){
val entry=itr.next()
if(!entry.getValue().isJsonObject()){
LogHelperEI.Error("Invalid frame "+entry.getKey()+" in file "+file.getName());
}else{
try{
parseFrame(entry.getValue().getAsJsonObject(), fileName, entry.getKey());
}catch{
case e:Exception=>{
LogHelperEI.Error(e.getMessage());
var t=e.getCause();
while(t != null){
LogHelperEI.Error(t.getMessage())
t=t.getCause();
}
}
}
}
}
}
def parseFrame(jsonFrame:JsonObject, fileName:String, key:String){
val name=getFrameName(jsonFrame);
val r=getInt(jsonFrame, "ColorRed", 255, fileName, key);
val g=getInt(jsonFrame, "ColorGreen", 255, fileName, key);
val b=getInt(jsonFrame, "ColorBlue", 255, fileName, key);
val frameType=getInt(jsonFrame, "FrameType", 0, fileName, key);
val frameMaterial=new FrameMaterial(name, frameType, r, g, b);
val materials:Array[Object]=getMaterials(jsonFrame.get("materials"), fileName, key);
val tier=getInt(jsonFrame, "tier", -1, fileName, key);
for(o<-materials){
if(o.isInstanceOf[ItemStack]){
FrameMaterials.Instance.registerMaterial(o.asInstanceOf[ItemStack], frameMaterial);
if(tier>=0 && tier < 8){
ETier.values()(tier).addChestMaterial(o.asInstanceOf[ItemStack]);
}
}else if(o.isInstanceOf[String]){
FrameMaterials.Instance.registerMaterial(o.asInstanceOf[String], frameMaterial);
if(tier>=0 && tier < 8){
ETier.values()(tier).addChestMaterial(o.asInstanceOf[String]);
}
}
}
}
def getFrameName(jsonFrame:JsonObject):String={
if(!jsonFrame.has("name") || !jsonFrame.get("name").isJsonPrimitive()){
return throw new Exception("Cannot find fame name for frame");
}
return jsonFrame.get("name").getAsString();
}
def getInt(jsonFrame:JsonObject, key:String, default:Int, fileName:String, framekey:String):Int={
if(!jsonFrame.has(key)){
return default;
}
if(!jsonFrame.get(key).isJsonPrimitive()){
LogHelperEI.Error("Invalid "+key+" for frame "+framekey+" in file "+fileName);
return default;
}
if(jsonFrame.getAsJsonPrimitive(key).isNumber()){
return jsonFrame.get(key).getAsInt();
}else if(jsonFrame.getAsJsonPrimitive(key).isString()){
var value=default;
try{
value=jsonFrame.get(key).getAsInt();
}catch{
case e:NumberFormatException=>{
LogHelperEI.Error("Invalid "+key+" for frame "+framekey+" in file "+fileName+" could convert string to number");
}
}
return value;
}
LogHelperEI.Error("Invalid "+key+" for frame "+framekey+" in file "+fileName+" value must be a number");
return default;
}
def getMaterials(mats:JsonElement, fileName:String, framekey:String):Array[Object]={
if(mats==null){
throw new Exception("No materials for frame "+framekey+" in file "+fileName+" frames require atleast one material!");
}
if(mats.isJsonArray()){
val jsArray=mats.getAsJsonArray();
val matList=new MutableList[Object]();
for(i<-0 until jsArray.size()){
if(jsArray.get(i).isJsonObject()){
try{
val ret=JsonHelper.getItemStack(jsArray.get(i).getAsJsonObject());
matList+=ret;
}catch{
case e:Exception=>{
LogHelperEI.Error(e.getMessage()+" for frame "+framekey+" in file "+fileName);
}
}
}else if(jsArray.get(i).isJsonPrimitive()){
matList+=jsArray.get(i).getAsString();
}else{
LogHelperEI.Error("invalid material for frame "+framekey+" in file "+fileName);
}
}
if(matList.size < 1){
throw new Exception("No materials for frame "+framekey+" in file "+fileName+" frames require atleast one material!");
}
return matList.toArray;
}else if(mats.isJsonObject()){
try{
val ret=Array(JsonHelper.getItemStack(mats.getAsJsonObject()))
return ret.asInstanceOf[Array[Object]];
}catch{
case e:Exception=>{
throw new Exception("No materials for frame "+framekey+" in file "+fileName+" frames require atleast one material!", e)
}
}
}else if(mats.isJsonPrimitive()){
return Array(mats.getAsString());
}
LogHelperEI.Error("invalid material for frame "+framekey+" in file "+fileName);
throw new Exception("No materials for frame "+framekey+" in file "+fileName+" frames require atleast one material!");
}
}
|
Joccob/EnhancedInventories
|
src/main/scala/yuuto/enhancedinventories/config/json/JsonFrameFactory.scala
|
Scala
|
gpl-2.0
| 7,020 |
package org.fayalite.util.dsl
/**
* Created by aa on 7/2/2016.
*/
trait CollectLike {
implicit class SeqHelpSimpleMethod[T](s: Seq[T]) {
def allContainsNot(f: T => Boolean) = {
// s.forall()
}
}
}
|
ryleg/fayalite
|
common/src/main/scala/org/fayalite/util/dsl/CollectLike.scala
|
Scala
|
mit
| 220 |
class A {
def apply(xs: Int*) = 42
}
/* name clash between defined and inherited member:
class B extends A {
def apply(xs: Seq[Int]) = 27
}
*/
/* method apply overrides nothing.
*/
class C extends A {
override def apply(xs: Seq[Int]) = 17
}
// ok because different return type
class D extends A {
def apply(xs: Seq[Int]) = "42"
}
|
scala/scala
|
test/files/neg/t7052b.scala
|
Scala
|
apache-2.0
| 344 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.amqp.test
import org.apache.qpid.proton.amqp.messaging.{AmqpValue, Source, Target}
import org.fusesource.hawtdispatch._
import org.apache.qpid.proton.hawtdispatch.api._
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class AmqpConnectionTest extends AmqpTestSupport {
def print_result[T](action: String)(then_action: => Unit): Callback[T] = new Callback[T] {
def onSuccess(value: T) {
println(action + " completed");
then_action
}
def onFailure(value: Throwable) {
println(action + " failed: " + value);
value.printStackTrace()
}
}
def then_do[T](func: (T) => Unit): Callback[T] = new Callback[T] {
def onSuccess(value: T) {
func(value)
}
def onFailure(value: Throwable) {
value.printStackTrace()
}
}
test("Sender Open") {
val amqp = new AmqpConnectOptions();
amqp.setHost("localhost", port)
amqp.setUser("admin");
amqp.setPassword("password");
val connection = AmqpConnection.connect(amqp)
connection.queue() {
var session = connection.createSession()
val target = new Target
target.setAddress("queue://FOO")
val sender = session.createSender(target);
val md = sender.send(session.createTextMessage("Hello World"))
md.onSettle(print_result("message sent") {
println("========================================================")
println("========================================================")
val source = new Source
source.setAddress("queue://FOO")
val receiver = session.createReceiver(source);
receiver.resume()
receiver.setDeliveryListener(new AmqpDeliveryListener {
def onMessageDelivery(delivery: MessageDelivery) = {
println("Received: " + delivery.getMessage().getBody().asInstanceOf[AmqpValue].getValue);
delivery.settle()
connection.close()
}
})
})
}
connection.waitForDisconnected()
}
}
|
chirino/activemq-apollo
|
apollo-amqp/src/test/scala/org/apache/activemq/apollo/amqp/test/AmqpConnectionTest.scala
|
Scala
|
apache-2.0
| 2,847 |
package com.xuanyuansen.algo.params
/**
* Created by wangshuai on 16/7/28.
*/
class GRULayerParam extends LayerParam {
}
|
xuanyuansen/scalaLSTM
|
src/main/scala/com/xuanyuansen/algo/params/GRULayerParam.scala
|
Scala
|
apache-2.0
| 125 |
package com.ponkotuy.value
/**
*
* @author ponkotuy
* Date: 15/07/24.
*/
object ShipIds {
val Akebono = 15
val Mochizuki = 31
val Hibiki = 35
val Yudachi = 45
val Tenryu = 51
val Sendai = 54
val Naka = 56
val Chokai = 69
val Tone = 71
val Kirishima = 85
val Sazanami = 94
val Mikuma = 120
val Kumano = 125
val I8 = 128
val Yamato = 131
val Makigumo = 134
val Noshiro = 138
val Yahagi = 139
val Sakawa = 140
val IsuzuMk2 = 141
val Musashi = 143
val Vernyj = 147 // べーるぬい
val Taiho = 153
val Katori = 154
val I401 = 155
val AkitsuMaru = 161
val Tanikaze = 169
val Bismarck = 171
val Ooyodo = 183
val Taigei = 184
val Ryuho = 185
val Hatsukaze = 190
val Akiduki = 330
val Teruduki = 346
val HatsudukiMk1 = 357
val Unryu = 404
val Harusame = 405
val Hayashimo = 409
val Kiyoshimo = 410
val Asagumo = 413
val Nowaki = 415
val SatsukiMk2 = 418
val AkidukiMk1 = 421
val TerudukiMk1 = 422
val Hatsuduki = 423
val Asashimo = 425
val MayaMk2 = 428
val U511 = 431
val Ro500 = 436
val Littorio = 441
val Roma = 442
val Italia = 446
val Okinami = 452
val KasumiMk2Otsu = 470
val Amagiri = 479
val KinuMk2 = 487
val YuraMk2 = 488
val FumidukiMk2 = 548
def isSpecialShipId(id: Int): Boolean = 750 < id && id <= 1500
def isEnemy(id: Int): Boolean = 1500 < id
}
|
ponkotuy/MyFleetGirls
|
library/src/main/scala/com/ponkotuy/value/ShipIds.scala
|
Scala
|
mit
| 1,381 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
import scala.collection.mutable
import com.google.common.annotations.VisibleForTesting
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.{Logging, SparkException, SparkConf, TaskContext}
/**
* Allocates a pool of memory to tasks for use in shuffle operations. Each disk-spilling
* collection (ExternalAppendOnlyMap or ExternalSorter) used by these tasks can acquire memory
* from this pool and release it as it spills data out. When a task ends, all its memory will be
* released by the Executor.
*
* This class tries to ensure that each task gets a reasonable share of memory, instead of some
* task ramping up to a large amount first and then causing others to spill to disk repeatedly.
* If there are N tasks, it ensures that each tasks can acquire at least 1 / 2N of the memory
* before it has to spill, and at most 1 / N. Because N varies dynamically, we keep track of the
* set of active tasks and redo the calculations of 1 / 2N and 1 / N in waiting tasks whenever
* this set changes. This is all done by synchronizing access on "this" to mutate state and using
* wait() and notifyAll() to signal changes.
*
* Use `ShuffleMemoryManager.create()` factory method to create a new instance.
*
* @param maxMemory total amount of memory available for execution, in bytes.
* @param pageSizeBytes number of bytes for each page, by default.
*/
private[spark]
class ShuffleMemoryManager protected (
val maxMemory: Long,
val pageSizeBytes: Long)
extends Logging {
private val taskMemory = new mutable.HashMap[Long, Long]() // taskAttemptId -> memory bytes
private def currentTaskAttemptId(): Long = {
// In case this is called on the driver, return an invalid task attempt id.
Option(TaskContext.get()).map(_.taskAttemptId()).getOrElse(-1L)
}
/**
* Try to acquire up to numBytes memory for the current task, and return the number of bytes
* obtained, or 0 if none can be allocated. This call may block until there is enough free memory
* in some situations, to make sure each task has a chance to ramp up to at least 1 / 2N of the
* total memory pool (where N is the # of active tasks) before it is forced to spill. This can
* happen if the number of tasks increases but an older task had a lot of memory already.
*/
def tryToAcquire(numBytes: Long): Long = synchronized {
val taskAttemptId = currentTaskAttemptId()
assert(numBytes > 0, "invalid number of bytes requested: " + numBytes)
// Add this task to the taskMemory map just so we can keep an accurate count of the number
// of active tasks, to let other tasks ramp down their memory in calls to tryToAcquire
if (!taskMemory.contains(taskAttemptId)) {
taskMemory(taskAttemptId) = 0L
notifyAll() // Will later cause waiting tasks to wake up and check numThreads again
}
// Keep looping until we're either sure that we don't want to grant this request (because this
// task would have more than 1 / numActiveTasks of the memory) or we have enough free
// memory to give it (we always let each task get at least 1 / (2 * numActiveTasks)).
while (true) {
val numActiveTasks = taskMemory.keys.size
val curMem = taskMemory(taskAttemptId)
val freeMemory = maxMemory - taskMemory.values.sum
// How much we can grant this task; don't let it grow to more than 1 / numActiveTasks;
// don't let it be negative
val maxToGrant = math.min(numBytes, math.max(0, (maxMemory / numActiveTasks) - curMem))
if (curMem < maxMemory / (2 * numActiveTasks)) {
// We want to let each task get at least 1 / (2 * numActiveTasks) before blocking;
// if we can't give it this much now, wait for other tasks to free up memory
// (this happens if older tasks allocated lots of memory before N grew)
if (freeMemory >= math.min(maxToGrant, maxMemory / (2 * numActiveTasks) - curMem)) {
val toGrant = math.min(maxToGrant, freeMemory)
taskMemory(taskAttemptId) += toGrant
return toGrant
} else {
logInfo(
s"TID $taskAttemptId waiting for at least 1/2N of shuffle memory pool to be free")
wait()
}
} else {
// Only give it as much memory as is free, which might be none if it reached 1 / numThreads
val toGrant = math.min(maxToGrant, freeMemory)
taskMemory(taskAttemptId) += toGrant
return toGrant
}
}
0L // Never reached
}
/** Release numBytes bytes for the current task. */
def release(numBytes: Long): Unit = synchronized {
val taskAttemptId = currentTaskAttemptId()
val curMem = taskMemory.getOrElse(taskAttemptId, 0L)
if (curMem < numBytes) {
throw new SparkException(
s"Internal error: release called on ${numBytes} bytes but task only has ${curMem}")
}
taskMemory(taskAttemptId) -= numBytes
notifyAll() // Notify waiters who locked "this" in tryToAcquire that memory has been freed
}
/** Release all memory for the current task and mark it as inactive (e.g. when a task ends). */
def releaseMemoryForThisTask(): Unit = synchronized {
val taskAttemptId = currentTaskAttemptId()
taskMemory.remove(taskAttemptId)
notifyAll() // Notify waiters who locked "this" in tryToAcquire that memory has been freed
}
/** Returns the memory consumption, in bytes, for the current task */
def getMemoryConsumptionForThisTask(): Long = synchronized {
val taskAttemptId = currentTaskAttemptId()
taskMemory.getOrElse(taskAttemptId, 0L)
}
}
private[spark] object ShuffleMemoryManager {
def create(conf: SparkConf, numCores: Int): ShuffleMemoryManager = {
val maxMemory = ShuffleMemoryManager.getMaxMemory(conf)
val pageSize = ShuffleMemoryManager.getPageSize(conf, maxMemory, numCores)
new ShuffleMemoryManager(maxMemory, pageSize)
}
def create(maxMemory: Long, pageSizeBytes: Long): ShuffleMemoryManager = {
new ShuffleMemoryManager(maxMemory, pageSizeBytes)
}
@VisibleForTesting
def createForTesting(maxMemory: Long): ShuffleMemoryManager = {
new ShuffleMemoryManager(maxMemory, 4 * 1024 * 1024)
}
/**
* Figure out the shuffle memory limit from a SparkConf. We currently have both a fraction
* of the memory pool and a safety factor since collections can sometimes grow bigger than
* the size we target before we estimate their sizes again.
*/
private def getMaxMemory(conf: SparkConf): Long = {
val memoryFraction = conf.getDouble("spark.shuffle.memoryFraction", 0.2)
val safetyFraction = conf.getDouble("spark.shuffle.safetyFraction", 0.8)
(Runtime.getRuntime.maxMemory * memoryFraction * safetyFraction).toLong
}
/**
* Sets the page size, in bytes.
*
* If user didn't explicitly set "spark.buffer.pageSize", we figure out the default value
* by looking at the number of cores available to the process, and the total amount of memory,
* and then divide it by a factor of safety.
*/
private def getPageSize(conf: SparkConf, maxMemory: Long, numCores: Int): Long = {
val minPageSize = 1L * 1024 * 1024 // 1MB
val maxPageSize = 64L * minPageSize // 64MB
val cores = if (numCores > 0) numCores else Runtime.getRuntime.availableProcessors()
// Because of rounding to next power of 2, we may have safetyFactor as 8 in worst case
val safetyFactor = 16
// TODO(davies): don't round to next power of 2
val size = ByteArrayMethods.nextPowerOf2(maxMemory / cores / safetyFactor)
val default = math.min(maxPageSize, math.max(minPageSize, size))
conf.getSizeAsBytes("spark.buffer.pageSize", default)
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
core/src/main/scala/org/apache/spark/shuffle/ShuffleMemoryManager.scala
|
Scala
|
apache-2.0
| 8,511 |
package io.arabesque
import io.arabesque.conf.{Configuration, SparkConfiguration}
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class CubeGraphSuite extends FunSuite with BeforeAndAfterAll {
private val master = "local[2]"
private val appName = "arabesque-spark"
private var sampleGraphPath: String = _
private var sc: SparkContext = _
private var arab: ArabesqueContext = _
private var arabGraph: ArabesqueGraph = _
/** set up spark context */
override def beforeAll: Unit = {
// spark conf and context
val conf = new SparkConf().
setMaster(master).
setAppName(appName)
sc = new SparkContext(conf)
arab = new ArabesqueContext(sc, "warn")
val loader = classOf[SparkArabesqueSuite].getClassLoader
sampleGraphPath = "data/cube.graph"
arabGraph = arab.textFile (sampleGraphPath)
}
/** stop spark context */
override def afterAll: Unit = {
if (sc != null) {
sc.stop()
arab.stop()
}
}
test ("[motifs] arabesque API") {
// Test output for motifs for embedding with size 0 to 3
// Expected output
val numEmbedding = List(0, 8, 12, 24)
for(k <- 0 to (numEmbedding.size - 1)) {
val motifsRes = arabGraph.motifs(k).
set ("num_partitions", 10)
val odags = motifsRes.odags
val embeddings = motifsRes.embeddings
assert(embeddings.count() == numEmbedding(k))
}
}
test ("[clique] arabesque API") {
// Test output for clique for embeddings with size 1 to 3
// Expected output
val numEmbedding = List(0, 8, 12, 0)
for(k <- 0 to (numEmbedding.size - 1)) {
val cliqueRes = arabGraph.cliques(k)
val embeddings = cliqueRes.embeddings
assert(embeddings.count == numEmbedding(k))
}
}
test ("[fsm] arabesque API") {
// Critical test
// Test output for fsm with support 2 for embeddings with size 2 to 3
val support = 2
// Expected output
val numEmbedding = List(0, 0, 9, 24)
for(k <- 0 to (numEmbedding.size -1)) {
val motifsRes = arabGraph.fsm(support, k)
val embeddings = motifsRes.embeddings
assert(embeddings.count == numEmbedding(k))
}
}
test ("[triangles] arabesque API") {
// Test output for triangles
// Expected output
val numTriangles = 0
val trianglesRes = arabGraph.triangles()
val embeddings = trianglesRes.embeddings
assert(embeddings.count == numTriangles)
}
}
|
dccspeed/Arabesque
|
src/test/scala/CubeGraphSuite.scala
|
Scala
|
apache-2.0
| 2,503 |
/* Generated File */
package controllers.admin.store
import com.kyleu.projectile.controllers.{BaseController, ServiceAuthController}
import com.kyleu.projectile.models.module.Application
import com.kyleu.projectile.models.result.RelationCount
import com.kyleu.projectile.models.result.orderBy.OrderBy
import com.kyleu.projectile.models.web.ControllerUtils
import com.kyleu.projectile.services.auth.PermissionService
import com.kyleu.projectile.services.note.NoteService
import com.kyleu.projectile.util.{Credentials, DateUtils}
import com.kyleu.projectile.util.JsonSerializers._
import com.kyleu.projectile.views.html.layout.{card, page}
import models.store.{InventoryRow, InventoryRowResult}
import play.api.http.MimeTypes
import scala.concurrent.{ExecutionContext, Future}
import services.customer.RentalRowService
import services.film.FilmRowService
import services.store.{InventoryRowService, StoreRowService}
@javax.inject.Singleton
class InventoryRowController @javax.inject.Inject() (
override val app: Application, svc: InventoryRowService, noteSvc: NoteService,
rentalRowS: RentalRowService, filmRowS: FilmRowService, storeRowS: StoreRowService
)(implicit ec: ExecutionContext) extends ServiceAuthController(svc) {
PermissionService.registerModel("store", "InventoryRow", "Inventory", Some(models.template.Icons.inventoryRow), "view", "edit")
private[this] val defaultOrderBy = Some("lastUpdate" -> false)
def list(q: Option[String], orderBy: Option[String], orderAsc: Boolean, limit: Option[Int], offset: Option[Int], t: Option[String] = None) = {
withSession("list", ("store", "InventoryRow", "view")) { implicit request => implicit td =>
val startMs = DateUtils.nowMillis
val orderBys = OrderBy.forVals(orderBy, orderAsc, defaultOrderBy).toSeq
searchWithCount(q, orderBys, limit, offset).map(r => renderChoice(t) {
case MimeTypes.HTML => r._2.toList match {
case model :: Nil if q.nonEmpty => Redirect(controllers.admin.store.routes.InventoryRowController.view(model.inventoryId))
case _ => Ok(views.html.admin.store.inventoryRowList(app.cfg(u = Some(request.identity), "store", "inventory"), Some(r._1), r._2, q, orderBys.headOption.map(_.col), orderBys.exists(_.dir.asBool), limit.getOrElse(100), offset.getOrElse(0)))
}
case MimeTypes.JSON => Ok(InventoryRowResult.fromRecords(q, Nil, orderBys, limit, offset, startMs, r._1, r._2).asJson)
case BaseController.MimeTypes.csv => csvResponse("InventoryRow", svc.csvFor(r._1, r._2))
})
}
}
def autocomplete(q: Option[String], orderBy: Option[String], orderAsc: Boolean, limit: Option[Int]) = {
withSession("autocomplete", ("store", "InventoryRow", "view")) { implicit request => implicit td =>
val orderBys = OrderBy.forVals(orderBy, orderAsc, defaultOrderBy).toSeq
search(q, orderBys, limit, None).map(r => Ok(r.map(_.toSummary).asJson))
}
}
def view(inventoryId: Long, t: Option[String] = None) = withSession("view", ("store", "InventoryRow", "view")) { implicit request => implicit td =>
val creds: Credentials = request
val modelF = svc.getByPrimaryKeyRequired(creds, inventoryId)
val notesF = noteSvc.getFor(creds, "InventoryRow", inventoryId)
val filmIdF = modelF.flatMap(m => filmRowS.getByPrimaryKey(creds, m.filmId))
val storeIdF = modelF.flatMap(m => storeRowS.getByPrimaryKey(creds, m.storeId))
filmIdF.flatMap(filmIdR => storeIdF.flatMap(storeIdR =>
notesF.flatMap(notes => modelF.map { model =>
renderChoice(t) {
case MimeTypes.HTML => Ok(views.html.admin.store.inventoryRowView(app.cfg(u = Some(request.identity), "store", "inventory", model.inventoryId.toString), model, notes, filmIdR, storeIdR, app.config.debug))
case MimeTypes.JSON => Ok(model.asJson)
}
})))
}
def editForm(inventoryId: Long) = withSession("edit.form", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
val cancel = controllers.admin.store.routes.InventoryRowController.view(inventoryId)
val call = controllers.admin.store.routes.InventoryRowController.edit(inventoryId)
svc.getByPrimaryKey(request, inventoryId).map {
case Some(model) => Ok(
views.html.admin.store.inventoryRowForm(app.cfg(Some(request.identity), "store", "inventory", "Edit"), model, s"Inventory [$inventoryId]", cancel, call, debug = app.config.debug)
)
case None => NotFound(s"No InventoryRow found with inventoryId [$inventoryId]")
}
}
def edit(inventoryId: Long) = withSession("edit", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
svc.update(request, inventoryId = inventoryId, fields = modelForm(request.body)).map(res => render {
case Accepts.Html() => Redirect(controllers.admin.store.routes.InventoryRowController.view(res._1.inventoryId))
case Accepts.Json() => Ok(res.asJson)
})
}
def remove(inventoryId: Long) = withSession("remove", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
svc.remove(request, inventoryId = inventoryId).map(_ => render {
case Accepts.Html() => Redirect(controllers.admin.store.routes.InventoryRowController.list())
case Accepts.Json() => Ok(io.circe.Json.obj("status" -> io.circe.Json.fromString("removed")))
})
}
def createForm = withSession("create.form", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
val cancel = controllers.admin.store.routes.InventoryRowController.list()
val call = controllers.admin.store.routes.InventoryRowController.create()
Future.successful(Ok(views.html.admin.store.inventoryRowForm(
app.cfg(u = Some(request.identity), "store", "inventory", "Create"), InventoryRow.empty(), "New Inventory", cancel, call, isNew = true, debug = app.config.debug
)))
}
def create = withSession("create", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
svc.create(request, modelForm(request.body)).map {
case Some(model) => Redirect(controllers.admin.store.routes.InventoryRowController.view(model.inventoryId))
case None => Redirect(controllers.admin.store.routes.InventoryRowController.list())
}
}
def bulkEditForm = withSession("bulk.edit.form", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
val act = controllers.admin.store.routes.InventoryRowController.bulkEdit()
Future.successful(Ok(views.html.admin.store.inventoryRowBulkForm(app.cfg(Some(request.identity), "store", "inventory", "Bulk Edit"), Nil, act, debug = app.config.debug)))
}
def bulkEdit = withSession("bulk.edit", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
val form = ControllerUtils.getForm(request.body)
val pks = form("primaryKeys").split("//").map(_.trim).filter(_.nonEmpty).map(_.split("---").map(_.trim).filter(_.nonEmpty).toList).toList
val typed = pks.map(pk => pk.head.toLong)
val changes = modelForm(request.body)
svc.updateBulk(request, typed, changes).map(msg => Ok("OK: " + msg))
}
def byFilmId(filmId: Int, orderBy: Option[String], orderAsc: Boolean, limit: Option[Int], offset: Option[Int], t: Option[String] = None, embedded: Boolean = false) = {
withSession("get.by.filmId", ("store", "InventoryRow", "view")) { implicit request => implicit td =>
val orderBys = OrderBy.forVals(orderBy, orderAsc, defaultOrderBy).toSeq
svc.getByFilmId(request, filmId, orderBys, limit, offset).map(models => renderChoice(t) {
case MimeTypes.HTML =>
val cfg = app.cfg(Some(request.identity), "store", "inventory", "Film Id")
val list = views.html.admin.store.inventoryRowByFilmId(cfg, filmId, models, orderBy, orderAsc, limit.getOrElse(5), offset.getOrElse(0))
if (embedded) { Ok(list) } else { Ok(page(s"Inventories by Film Id [$filmId]", cfg)(card(None)(list))) }
case MimeTypes.JSON => Ok(models.asJson)
case BaseController.MimeTypes.csv => csvResponse("InventoryRow by filmId", svc.csvFor(0, models))
})
}
}
def byFilmIdBulkForm(filmId: Int) = {
withSession("get.by.filmId", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
svc.getByFilmId(request, filmId).map { modelSeq =>
val act = controllers.admin.store.routes.InventoryRowController.bulkEdit()
Ok(views.html.admin.store.inventoryRowBulkForm(app.cfg(Some(request.identity), "store", "inventory", "Bulk Edit"), modelSeq, act, debug = app.config.debug))
}
}
}
def byStoreId(storeId: Int, orderBy: Option[String], orderAsc: Boolean, limit: Option[Int], offset: Option[Int], t: Option[String] = None, embedded: Boolean = false) = {
withSession("get.by.storeId", ("store", "InventoryRow", "view")) { implicit request => implicit td =>
val orderBys = OrderBy.forVals(orderBy, orderAsc, defaultOrderBy).toSeq
svc.getByStoreId(request, storeId, orderBys, limit, offset).map(models => renderChoice(t) {
case MimeTypes.HTML =>
val cfg = app.cfg(Some(request.identity), "store", "inventory", "Store Id")
val list = views.html.admin.store.inventoryRowByStoreId(cfg, storeId, models, orderBy, orderAsc, limit.getOrElse(5), offset.getOrElse(0))
if (embedded) { Ok(list) } else { Ok(page(s"Inventories by Store Id [$storeId]", cfg)(card(None)(list))) }
case MimeTypes.JSON => Ok(models.asJson)
case BaseController.MimeTypes.csv => csvResponse("InventoryRow by storeId", svc.csvFor(0, models))
})
}
}
def byStoreIdBulkForm(storeId: Int) = {
withSession("get.by.storeId", ("store", "InventoryRow", "edit")) { implicit request => implicit td =>
svc.getByStoreId(request, storeId).map { modelSeq =>
val act = controllers.admin.store.routes.InventoryRowController.bulkEdit()
Ok(views.html.admin.store.inventoryRowBulkForm(app.cfg(Some(request.identity), "store", "inventory", "Bulk Edit"), modelSeq, act, debug = app.config.debug))
}
}
}
def relationCounts(inventoryId: Long) = withSession("relation.counts", ("store", "InventoryRow", "view")) { implicit request => implicit td =>
val rentalRowByInventoryIdF = rentalRowS.countByInventoryId(request, inventoryId)
for (rentalRowByInventoryIdC <- rentalRowByInventoryIdF) yield {
Ok(Seq(
RelationCount(model = "rentalRow", field = "inventoryId", count = rentalRowByInventoryIdC)
).asJson)
}
}
}
|
KyleU/boilerplay
|
app/controllers/admin/store/InventoryRowController.scala
|
Scala
|
cc0-1.0
| 10,492 |
package monocle.function
import monocle.MonocleSuite
import monocle.std._
import monocle.syntax._
class InitExample extends MonocleSuite {
test("init creates a Lens from a 2-6 tuple to its tail") {
((2, false) applyLens init get) shouldEqual 2
(('r', false, "lala", 5.6, 7, 4) applyLens init get) shouldEqual (('r', false, "lala", 5.6, 7))
((2, false, "hello") applyLens init set((4, true))) shouldEqual ((4, true, "hello"))
}
}
|
malcolmgreaves/Monocle
|
example/src/test/scala/monocle/function/InitExample.scala
|
Scala
|
mit
| 471 |
package api
import play.api.libs.json._
import model.ComputerSet
import model.FreeStyleBuild
import model.FreeStyleProject
import model.Hudson
import model.ListView
import model.Queue
@javax.annotation.Generated(value = Array("org.openapitools.codegen.languages.ScalaPlayFrameworkServerCodegen"), date = "2022-02-13T02:38:35.589632Z[Etc/UTC]")
trait RemoteAccessApi {
/**
* Retrieve computer details
* @param depth Recursion depth in response model
*/
def getComputer(depth: Int): ComputerSet
/**
* Retrieve Jenkins details
*/
def getJenkins(): Hudson
/**
* Retrieve job details
* @param name Name of the job
*/
def getJob(name: String): FreeStyleProject
/**
* Retrieve job configuration
* @param name Name of the job
*/
def getJobConfig(name: String): String
/**
* Retrieve job's last build details
* @param name Name of the job
*/
def getJobLastBuild(name: String): FreeStyleBuild
/**
* Retrieve job's build progressive text output
* @param name Name of the job
* @param number Build number
* @param start Starting point of progressive text output
*/
def getJobProgressiveText(name: String, number: String, start: String): Unit
/**
* Retrieve queue details
*/
def getQueue(): Queue
/**
* Retrieve queued item details
* @param number Queue number
*/
def getQueueItem(number: String): Queue
/**
* Retrieve view details
* @param name Name of the view
*/
def getView(name: String): ListView
/**
* Retrieve view configuration
* @param name Name of the view
*/
def getViewConfig(name: String): String
/**
* Retrieve Jenkins headers
*/
def headJenkins(): Unit
/**
* Create a new job using job configuration, or copied from an existing job
* @param name Name of the new job
* @param from Existing job to copy from
* @param mode Set to 'copy' for copying an existing job
* @param jenkinsCrumb CSRF protection token
* @param contentType Content type header application/xml
* @param body Job configuration in config.xml format
*/
def postCreateItem(name: String, from: Option[String], mode: Option[String], jenkinsCrumb: Option[String], contentType: Option[String], body: Option[String]): Unit
/**
* Create a new view using view configuration
* @param name Name of the new view
* @param jenkinsCrumb CSRF protection token
* @param contentType Content type header application/xml
* @param body View configuration in config.xml format
*/
def postCreateView(name: String, jenkinsCrumb: Option[String], contentType: Option[String], body: Option[String]): Unit
/**
* Build a job
* @param name Name of the job
* @param jenkinsCrumb CSRF protection token
*/
def postJobBuild(name: String, json: String, token: Option[String], jenkinsCrumb: Option[String]): Unit
/**
* Update job configuration
* @param name Name of the job
* @param body Job configuration in config.xml format
* @param jenkinsCrumb CSRF protection token
*/
def postJobConfig(name: String, body: String, jenkinsCrumb: Option[String]): Unit
/**
* Delete a job
* @param name Name of the job
* @param jenkinsCrumb CSRF protection token
*/
def postJobDelete(name: String, jenkinsCrumb: Option[String]): Unit
/**
* Disable a job
* @param name Name of the job
* @param jenkinsCrumb CSRF protection token
*/
def postJobDisable(name: String, jenkinsCrumb: Option[String]): Unit
/**
* Enable a job
* @param name Name of the job
* @param jenkinsCrumb CSRF protection token
*/
def postJobEnable(name: String, jenkinsCrumb: Option[String]): Unit
/**
* Stop a job
* @param name Name of the job
* @param jenkinsCrumb CSRF protection token
*/
def postJobLastBuildStop(name: String, jenkinsCrumb: Option[String]): Unit
/**
* Update view configuration
* @param name Name of the view
* @param body View configuration in config.xml format
* @param jenkinsCrumb CSRF protection token
*/
def postViewConfig(name: String, body: String, jenkinsCrumb: Option[String]): Unit
}
|
cliffano/swaggy-jenkins
|
clients/scala-play-server/generated/app/api/RemoteAccessApi.scala
|
Scala
|
mit
| 4,222 |
package com.wlangiewicz.workouttracker.services
import com.github.nscala_time.time.Imports._
import com.wlangiewicz.workouttracker.WorkoutTrackerSpec
import com.wlangiewicz.workouttracker.domain.{WorkoutId, SuccessfulRecordWorkoutResponse, RecordWorkoutRequest}
import org.scalatest.BeforeAndAfter
class WorkoutServiceSpec extends WorkoutTrackerSpec with BeforeAndAfter {
"WorkoutService" should "record new workouts" in {
workoutDao.findAllByUser(testingUser.userId).size shouldBe 4
val newWorkoutRequest = RecordWorkoutRequest("testing my new gps watch", 1000, 600, DateTime.now)
workoutService.recordNewWorkout(testingUser.userId, newWorkoutRequest).right.get shouldBe an[SuccessfulRecordWorkoutResponse]
workoutDao.findAllByUser(testingUser.userId).size shouldBe 5
val newWorkout = workoutDao.findAllByUser(testingUser.userId).find(_.workoutId == WorkoutId(5)).get
newWorkout.name shouldBe "testing my new gps watch"
newWorkout.workoutId shouldBe WorkoutId(5)
}
it should "delete workouts" in {
workoutService.deleteWorkout(testingUser, WorkoutId(1))
val workouts = workoutDao.findAllByUser(testingUser.userId)
workouts.map(_.workoutId) should not contain WorkoutId(1)
}
}
|
wlk/workout-tracker-akka-http
|
src/test/scala/com/wlangiewicz/workouttracker/services/WorkoutServiceSpec.scala
|
Scala
|
mit
| 1,233 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedAlias, UnresolvedAttribute, UnresolvedRelation, UnresolvedStar}
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType}
import org.apache.spark.sql.catalyst.expressions.{Ascending, Concat, SortOrder}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, RepartitionByExpression, Sort}
import org.apache.spark.sql.catalyst.plans.logical.sql.{DescribeColumnStatement, DescribeTableStatement}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.datasources.{CreateTable, RefreshResource}
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType}
/**
* Parser test cases for rules defined in [[SparkSqlParser]].
*
* See [[org.apache.spark.sql.catalyst.parser.PlanParserSuite]] for rules
* defined in the Catalyst module.
*/
class SparkSqlParserSuite extends AnalysisTest {
val newConf = new SQLConf
private lazy val parser = new SparkSqlParser(newConf)
/**
* Normalizes plans:
* - CreateTable the createTime in tableDesc will replaced by -1L.
*/
override def normalizePlan(plan: LogicalPlan): LogicalPlan = {
plan match {
case CreateTable(tableDesc, mode, query) =>
val newTableDesc = tableDesc.copy(createTime = -1L)
CreateTable(newTableDesc, mode, query)
case _ => plan // Don't transform
}
}
private def assertEqual(sqlCommand: String, plan: LogicalPlan): Unit = {
val normalized1 = normalizePlan(parser.parsePlan(sqlCommand))
val normalized2 = normalizePlan(plan)
comparePlans(normalized1, normalized2)
}
private def intercept(sqlCommand: String, messages: String*): Unit =
interceptParseException(parser.parsePlan)(sqlCommand, messages: _*)
test("refresh resource") {
assertEqual("REFRESH prefix_path", RefreshResource("prefix_path"))
assertEqual("REFRESH /", RefreshResource("/"))
assertEqual("REFRESH /path///a", RefreshResource("/path///a"))
assertEqual("REFRESH pat1h/112/_1a", RefreshResource("pat1h/112/_1a"))
assertEqual("REFRESH pat1h/112/_1a/a-1", RefreshResource("pat1h/112/_1a/a-1"))
assertEqual("REFRESH path-with-dash", RefreshResource("path-with-dash"))
assertEqual("REFRESH \\'path with space\\'", RefreshResource("path with space"))
assertEqual("REFRESH \\"path with space 2\\"", RefreshResource("path with space 2"))
intercept("REFRESH a b", "REFRESH statements cannot contain")
intercept("REFRESH a\\tb", "REFRESH statements cannot contain")
intercept("REFRESH a\\nb", "REFRESH statements cannot contain")
intercept("REFRESH a\\rb", "REFRESH statements cannot contain")
intercept("REFRESH a\\r\\nb", "REFRESH statements cannot contain")
intercept("REFRESH @ $a$", "REFRESH statements cannot contain")
intercept("REFRESH ", "Resource paths cannot be empty in REFRESH statements")
intercept("REFRESH", "Resource paths cannot be empty in REFRESH statements")
}
test("show functions") {
assertEqual("show functions", ShowFunctionsCommand(None, None, true, true))
assertEqual("show all functions", ShowFunctionsCommand(None, None, true, true))
assertEqual("show user functions", ShowFunctionsCommand(None, None, true, false))
assertEqual("show system functions", ShowFunctionsCommand(None, None, false, true))
intercept("show special functions", "SHOW special FUNCTIONS")
assertEqual("show functions foo",
ShowFunctionsCommand(None, Some("foo"), true, true))
assertEqual("show functions foo.bar",
ShowFunctionsCommand(Some("foo"), Some("bar"), true, true))
assertEqual("show functions 'foo\\\\\\\\.*'",
ShowFunctionsCommand(None, Some("foo\\\\.*"), true, true))
intercept("show functions foo.bar.baz", "Unsupported function name")
}
test("describe function") {
assertEqual("describe function bar",
DescribeFunctionCommand(FunctionIdentifier("bar", database = None), isExtended = false))
assertEqual("describe function extended bar",
DescribeFunctionCommand(FunctionIdentifier("bar", database = None), isExtended = true))
assertEqual("describe function foo.bar",
DescribeFunctionCommand(
FunctionIdentifier("bar", database = Some("foo")), isExtended = false))
assertEqual("describe function extended f.bar",
DescribeFunctionCommand(FunctionIdentifier("bar", database = Some("f")), isExtended = true))
}
private def createTableUsing(
table: String,
database: Option[String] = None,
tableType: CatalogTableType = CatalogTableType.MANAGED,
storage: CatalogStorageFormat = CatalogStorageFormat.empty,
schema: StructType = new StructType,
provider: Option[String] = Some("parquet"),
partitionColumnNames: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
mode: SaveMode = SaveMode.ErrorIfExists,
query: Option[LogicalPlan] = None): CreateTable = {
CreateTable(
CatalogTable(
identifier = TableIdentifier(table, database),
tableType = tableType,
storage = storage,
schema = schema,
provider = provider,
partitionColumnNames = partitionColumnNames,
bucketSpec = bucketSpec
), mode, query
)
}
private def createTable(
table: String,
database: Option[String] = None,
tableType: CatalogTableType = CatalogTableType.MANAGED,
storage: CatalogStorageFormat = CatalogStorageFormat.empty.copy(
inputFormat = HiveSerDe.sourceToSerDe("textfile").get.inputFormat,
outputFormat = HiveSerDe.sourceToSerDe("textfile").get.outputFormat,
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")),
schema: StructType = new StructType,
provider: Option[String] = Some("hive"),
partitionColumnNames: Seq[String] = Seq.empty,
comment: Option[String] = None,
mode: SaveMode = SaveMode.ErrorIfExists,
query: Option[LogicalPlan] = None): CreateTable = {
CreateTable(
CatalogTable(
identifier = TableIdentifier(table, database),
tableType = tableType,
storage = storage,
schema = schema,
provider = provider,
partitionColumnNames = partitionColumnNames,
comment = comment
), mode, query
)
}
test("create table - schema") {
assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING)",
createTable(
table = "my_tab",
schema = (new StructType)
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType)
)
)
assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) " +
"PARTITIONED BY (c INT, d STRING COMMENT 'test2')",
createTable(
table = "my_tab",
schema = (new StructType)
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType)
.add("c", IntegerType)
.add("d", StringType, nullable = true, "test2"),
partitionColumnNames = Seq("c", "d")
)
)
assertEqual("CREATE TABLE my_tab(id BIGINT, nested STRUCT<col1: STRING,col2: INT>)",
createTable(
table = "my_tab",
schema = (new StructType)
.add("id", LongType)
.add("nested", (new StructType)
.add("col1", StringType)
.add("col2", IntegerType)
)
)
)
// Partitioned by a StructType should be accepted by `SparkSqlParser` but will fail an analyze
// rule in `AnalyzeCreateTable`.
assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) " +
"PARTITIONED BY (nested STRUCT<col1: STRING,col2: INT>)",
createTable(
table = "my_tab",
schema = (new StructType)
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType)
.add("nested", (new StructType)
.add("col1", StringType)
.add("col2", IntegerType)
),
partitionColumnNames = Seq("nested")
)
)
intercept("CREATE TABLE my_tab(a: INT COMMENT 'test', b: STRING)",
"no viable alternative at input")
}
test("describe query") {
val query = "SELECT * FROM t"
assertEqual("DESCRIBE QUERY " + query, DescribeQueryCommand(query, parser.parsePlan(query)))
assertEqual("DESCRIBE " + query, DescribeQueryCommand(query, parser.parsePlan(query)))
}
test("analyze table statistics") {
assertEqual("analyze table t compute statistics",
AnalyzeTableCommand(TableIdentifier("t"), noscan = false))
assertEqual("analyze table t compute statistics noscan",
AnalyzeTableCommand(TableIdentifier("t"), noscan = true))
assertEqual("analyze table t partition (a) compute statistics nOscAn",
AnalyzePartitionCommand(TableIdentifier("t"), Map("a" -> None), noscan = true))
// Partitions specified
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr=11) COMPUTE STATISTICS",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = false,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> Some("11"))))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr=11) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> Some("11"))))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09') COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> Some("2008-04-09"))))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr) COMPUTE STATISTICS",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = false,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> None)))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> None)))
assertEqual("ANALYZE TABLE t PARTITION(ds, hr=11) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> None, "hr" -> Some("11"))))
assertEqual("ANALYZE TABLE t PARTITION(ds, hr) COMPUTE STATISTICS",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = false,
partitionSpec = Map("ds" -> None, "hr" -> None)))
assertEqual("ANALYZE TABLE t PARTITION(ds, hr) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> None, "hr" -> None)))
intercept("analyze table t compute statistics xxxx",
"Expected `NOSCAN` instead of `xxxx`")
intercept("analyze table t partition (a) compute statistics xxxx",
"Expected `NOSCAN` instead of `xxxx`")
}
test("analyze table column statistics") {
intercept("ANALYZE TABLE t COMPUTE STATISTICS FOR COLUMNS", "")
assertEqual("ANALYZE TABLE t COMPUTE STATISTICS FOR COLUMNS key, value",
AnalyzeColumnCommand(TableIdentifier("t"), Option(Seq("key", "value")), allColumns = false))
// Partition specified - should be ignored
assertEqual("ANALYZE TABLE t PARTITION(ds='2017-06-10') " +
"COMPUTE STATISTICS FOR COLUMNS key, value",
AnalyzeColumnCommand(TableIdentifier("t"), Option(Seq("key", "value")), allColumns = false))
// Partition specified should be ignored in case of COMPUTE STATISTICS FOR ALL COLUMNS
assertEqual("ANALYZE TABLE t PARTITION(ds='2017-06-10') " +
"COMPUTE STATISTICS FOR ALL COLUMNS",
AnalyzeColumnCommand(TableIdentifier("t"), None, allColumns = true))
intercept("ANALYZE TABLE t COMPUTE STATISTICS FOR ALL COLUMNS key, value",
"mismatched input 'key' expecting <EOF>")
intercept("ANALYZE TABLE t COMPUTE STATISTICS FOR ALL",
"missing 'COLUMNS' at '<EOF>'")
}
test("query organization") {
// Test all valid combinations of order by/sort by/distribute by/cluster by/limit/windows
val baseSql = "select * from t"
val basePlan =
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(TableIdentifier("t")))
assertEqual(s"$baseSql distribute by a, b",
RepartitionByExpression(UnresolvedAttribute("a") :: UnresolvedAttribute("b") :: Nil,
basePlan,
numPartitions = newConf.numShufflePartitions))
assertEqual(s"$baseSql distribute by a sort by b",
Sort(SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
global = false,
RepartitionByExpression(UnresolvedAttribute("a") :: Nil,
basePlan,
numPartitions = newConf.numShufflePartitions)))
assertEqual(s"$baseSql cluster by a, b",
Sort(SortOrder(UnresolvedAttribute("a"), Ascending) ::
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
global = false,
RepartitionByExpression(UnresolvedAttribute("a") :: UnresolvedAttribute("b") :: Nil,
basePlan,
numPartitions = newConf.numShufflePartitions)))
}
test("pipeline concatenation") {
val concat = Concat(
Concat(UnresolvedAttribute("a") :: UnresolvedAttribute("b") :: Nil) ::
UnresolvedAttribute("c") ::
Nil
)
assertEqual(
"SELECT a || b || c FROM t",
Project(UnresolvedAlias(concat) :: Nil, UnresolvedRelation(TableIdentifier("t"))))
}
test("database and schema tokens are interchangeable") {
assertEqual("CREATE DATABASE foo", parser.parsePlan("CREATE SCHEMA foo"))
assertEqual("DROP DATABASE foo", parser.parsePlan("DROP SCHEMA foo"))
assertEqual("ALTER DATABASE foo SET DBPROPERTIES ('x' = 'y')",
parser.parsePlan("ALTER SCHEMA foo SET DBPROPERTIES ('x' = 'y')"))
assertEqual("DESC DATABASE foo", parser.parsePlan("DESC SCHEMA foo"))
}
}
|
pgandhi999/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala
|
Scala
|
apache-2.0
| 14,866 |
package com.jayway.saaloop.dsl
import org.apache.hadoop.mapreduce.Reducer
/**
* Copyright 2012 Amir Moulavi ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Amir Moulavi
*/
trait SaaloopReducer extends Types {
implicit def reducePairs[K1 <: key, V1 <: value, K2 <: key, V2 <: value] = new Fun[(K1, V1) => (K2, V2), Reducer[K1, V1, K2, V2]] {
def apply(f:(K1, V1) => (K2, V2)) = {
new Reducer[K1, V1, K2, V2] {
def reduce(k1:K1, v1:V1, context:Context) {
val (key, value) = f(k1, v1)
context.write(key, value)
}
}
}
}
implicit def reducePairs2[K1 <: key, V1 <: value, K2 <: key, V2 <: value] = new Fun[(K1, List[V1]) => (K2, V2), Reducer[K1, V1, K2, V2]] {
def apply(f:(K1, List[V1]) => (K2, V2)) = {
new Reducer[K1, V1, K2, V2] {
def reduce(k1:K1, v1:List[V1], context:Context) {
val (key, value) = f(k1, v1)
context.write(key, value)
}
}
}
}
implicit def reduceListPairs[K1 <: key, V1 <: value, K2 <: key, V2 <: value] = new Fun[(K1, V1) => List[(K2, V2)], Reducer[K1, V1, K2, V2]] {
def apply(f:(K1, V1) => List[(K2, V2)]) = {
new Reducer[K1, V1, K2, V2] {
def reduce(k1:K1, v1:V1, context:Context) {
val result = f(k1, v1)
result foreach { p:(K2, V2) => context.write(p._1, p._2) }
}
}
}
}
implicit def reduceListPairs2[K1 <: key, V1 <: value, K2 <: key, V2 <: value] = new Fun[(K1, List[V1]) => List[(K2, V2)], Reducer[K1, V1, K2, V2]] {
def apply(f:(K1, List[V1]) => List[(K2, V2)]) = {
new Reducer[K1, V1, K2, V2] {
def reduce(k1:K1, v1:List[V1], context:Context) {
val result = f(k1, v1)
result foreach { p:(K2, V2) => context.write(p._1, p._2) }
}
}
}
}
object reducer {
def apply[F, K1, V1, K2, V2](f:F)(implicit fun:Fun[F, Reducer[K1, V1, K2, V2]]) = {
fun(f)
}
}
}
|
amir343/saaloop
|
saaloop-core/src/main/scala/com/jayway/saaloop/dsl/SaaloopReducer.scala
|
Scala
|
apache-2.0
| 2,519 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package examples.graphs
import api._
import model._
import scala.Ordering.Implicits._
@emma.lib
object EnumerateTriangles {
def apply[V: Ordering: Meta.Tag](edges: DataBag[Edge[V]]): DataBag[Triangle[V]] = {
// generate all triangles (x - v - w) such that x < v < w
val triangles = for {
Edge(x, u) <- edges
Edge(y, v) <- edges
Edge(z, w) <- edges
if x < u
if y < v
if z < w
if u == y
if x == z
if v == w
} yield Triangle(x, u, v)
// return
triangles
}
}
|
aalexandrov/emma
|
emma-examples/emma-examples-library/src/main/scala/org/emmalanguage/examples/graphs/EnumerateTriangles.scala
|
Scala
|
apache-2.0
| 1,183 |
package uk.ac.cdrc.mintsearch.index
import uk.ac.cdrc.mintsearch.WeightedLabelSet
import scala.pickling._
import Defaults._
import json._
/**
* Describe the label structure
*/
trait LabelTypeContext {
type L
def labelEncode(label: L): String
def labelEncodeQuery(label: L): String
def JSONfy(wls: WeightedLabelSet[L]): String
def deJSONfy(json: String): WeightedLabelSet[L]
}
/**
* Labels are pairs of key-value
*/
trait KeyValueLabelType extends LabelTypeContext {
override type L = (String, String)
override def labelEncode(label: L): String = s"${label._1}:${label._2}"
override def labelEncodeQuery(label: L): String = s"${label._1}\\\\:${label._2}"
override def JSONfy(wls: WeightedLabelSet[(String, String)]): String =
wls.pickle.value
override def deJSONfy(json: String): WeightedLabelSet[(String, String)] =
JSONPickle(json).unpickle[WeightedLabelSet[(String, String)]]
}
|
spacelis/mint-search
|
neo4j-plugin/src/main/scala/uk/ac/cdrc/mintsearch/index/LabelTypeContext.scala
|
Scala
|
apache-2.0
| 918 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.metrics
import java.util.Properties
import java.util.concurrent.TimeUnit
import scala.collection.mutable
import com.codahale.metrics.{Metric, MetricFilter, MetricRegistry}
import org.eclipse.jetty.servlet.ServletContextHandler
import org.apache.spark.{Logging, SecurityManager, SparkConf}
import org.apache.spark.metrics.sink.{MetricsServlet, Sink}
import org.apache.spark.metrics.source.Source
/**
* Spark Metrics System, created by specific "instance", combined by source,
* sink, periodically poll source metrics data to sink destinations.
*
* "instance" specify "who" (the role) use metrics system. In spark there are several roles
* like master, worker, executor, client driver, these roles will create metrics system
* for monitoring. So instance represents these roles. Currently in Spark, several instances
* have already implemented: master, worker, executor, driver, applications.
*
* "source" specify "where" (source) to collect metrics data. In metrics system, there exists
* two kinds of source:
* 1. Spark internal source, like MasterSource, WorkerSource, etc, which will collect
* Spark component's internal state, these sources are related to instance and will be
* added after specific metrics system is created.
* 2. Common source, like JvmSource, which will collect low level state, is configured by
* configuration and loaded through reflection.
*
* "sink" specify "where" (destination) to output metrics data to. Several sinks can be
* coexisted and flush metrics to all these sinks.
*
* Metrics configuration format is like below:
* [instance].[sink|source].[name].[options] = xxxx
*
* [instance] can be "master", "worker", "executor", "driver", "applications" which means only
* the specified instance has this property.
* wild card "*" can be used to replace instance name, which means all the instances will have
* this property.
*
* [sink|source] means this property belongs to source or sink. This field can only be
* source or sink.
*
* [name] specify the name of sink or source, it is custom defined.
*
* [options] is the specific property of this source or sink.
*/
private[spark] class MetricsSystem private (
val instance: String,
conf: SparkConf,
securityMgr: SecurityManager)
extends Logging {
private[this] val confFile = conf.get("spark.metrics.conf", null)
private[this] val metricsConfig = new MetricsConfig(Option(confFile))
private val sinks = new mutable.ArrayBuffer[Sink]
private val sources = new mutable.ArrayBuffer[Source]
private val registry = new MetricRegistry()
private var running: Boolean = false
// Treat MetricsServlet as a special sink as it should be exposed to add handlers to web ui
private var metricsServlet: Option[MetricsServlet] = None
/**
* Get any UI handlers used by this metrics system; can only be called after start().
*/
def getServletHandlers: Array[ServletContextHandler] = {
require(running, "Can only call getServletHandlers on a running MetricsSystem")
metricsServlet.map(_.getHandlers).getOrElse(Array())
}
metricsConfig.initialize()
def start() {
require(!running, "Attempting to start a MetricsSystem that is already running")
running = true
registerSources()
registerSinks()
sinks.foreach(_.start)
}
def stop() {
if (running) {
sinks.foreach(_.stop)
} else {
logWarning("Stopping a MetricsSystem that is not running")
}
running = false
}
def report() {
sinks.foreach(_.report())
}
/**
* Build a name that uniquely identifies each metric source.
* The name is structured as follows: <app ID>.<executor ID (or "driver")>.<source name>.
* If either ID is not available, this defaults to just using <source name>.
*
* @param source Metric source to be named by this method.
* @return An unique metric name for each combination of
* application, executor/driver and metric source.
*/
private[spark] def buildRegistryName(source: Source): String = {
val appId = conf.getOption("spark.app.id")
val executorId = conf.getOption("spark.executor.id")
val defaultName = MetricRegistry.name(source.sourceName)
if (instance == "driver" || instance == "executor") {
if (appId.isDefined && executorId.isDefined) {
MetricRegistry.name(appId.get, executorId.get, source.sourceName)
} else {
// Only Driver and Executor set spark.app.id and spark.executor.id.
// Other instance types, e.g. Master and Worker, are not related to a specific application.
val warningMsg = s"Using default name $defaultName for source because %s is not set."
if (appId.isEmpty) { logWarning(warningMsg.format("spark.app.id")) }
if (executorId.isEmpty) { logWarning(warningMsg.format("spark.executor.id")) }
defaultName
}
} else { defaultName }
}
def registerSource(source: Source) {
sources += source
try {
val regName = buildRegistryName(source)
registry.register(regName, source.metricRegistry)
} catch {
case e: IllegalArgumentException => logInfo("Metrics already registered", e)
}
}
def removeSource(source: Source) {
sources -= source
val regName = buildRegistryName(source)
registry.removeMatching(new MetricFilter {
def matches(name: String, metric: Metric): Boolean = name.startsWith(regName)
})
}
private def registerSources() {
val instConfig = metricsConfig.getInstance(instance)
val sourceConfigs = metricsConfig.subProperties(instConfig, MetricsSystem.SOURCE_REGEX)
// Register all the sources related to instance
sourceConfigs.foreach { kv =>
val classPath = kv._2.getProperty("class")
try {
val source = Class.forName(classPath).newInstance()
registerSource(source.asInstanceOf[Source])
} catch {
case e: Exception => logError("Source class " + classPath + " cannot be instantiated", e)
}
}
}
private def registerSinks() {
val instConfig = metricsConfig.getInstance(instance)
val sinkConfigs = metricsConfig.subProperties(instConfig, MetricsSystem.SINK_REGEX)
sinkConfigs.foreach { kv =>
val classPath = kv._2.getProperty("class")
if (null != classPath) {
try {
val sink = Class.forName(classPath)
.getConstructor(classOf[Properties], classOf[MetricRegistry], classOf[SecurityManager])
.newInstance(kv._2, registry, securityMgr)
if (kv._1 == "servlet") {
metricsServlet = Some(sink.asInstanceOf[MetricsServlet])
} else {
sinks += sink.asInstanceOf[Sink]
}
} catch {
case e: Exception => {
logError("Sink class " + classPath + " cannot be instantialized")
throw e
}
}
}
}
}
}
private[spark] object MetricsSystem {
val SINK_REGEX = "^sink\\.(.+)\\.(.+)".r
val SOURCE_REGEX = "^source\\.(.+)\\.(.+)".r
private[this] val MINIMAL_POLL_UNIT = TimeUnit.SECONDS
private[this] val MINIMAL_POLL_PERIOD = 1
def checkMinimalPollingPeriod(pollUnit: TimeUnit, pollPeriod: Int) {
val period = MINIMAL_POLL_UNIT.convert(pollPeriod, pollUnit)
if (period < MINIMAL_POLL_PERIOD) {
throw new IllegalArgumentException("Polling period " + pollPeriod + " " + pollUnit +
" below than minimal polling period ")
}
}
def createMetricsSystem(
instance: String, conf: SparkConf, securityMgr: SecurityManager): MetricsSystem = {
new MetricsSystem(instance, conf, securityMgr)
}
}
|
andrewor14/iolap
|
core/src/main/scala/org/apache/spark/metrics/MetricsSystem.scala
|
Scala
|
apache-2.0
| 8,432 |
package net.kemuridama.kafcon.model
import org.joda.time.DateTime
case class BrokerMetricsLog(
clusterId: Int,
brokerId: Int,
messageInPerSec: MeterMetric = new MeterMetric,
bytesInPerSec: MeterMetric = new MeterMetric,
bytesOutPerSec: MeterMetric = new MeterMetric,
system: SystemMetrics = new SystemMetrics,
created: DateTime = new DateTime
) {
def +(metricsLog: BrokerMetricsLog): CombinedBrokerMetricsLog = {
CombinedBrokerMetricsLog(
messageInPerSec + metricsLog.messageInPerSec,
bytesInPerSec + metricsLog.bytesInPerSec,
bytesOutPerSec + metricsLog.bytesOutPerSec,
if (created.isBefore(metricsLog.created)) created else metricsLog.created
)
}
}
|
kemuridama/kafcon
|
src/main/scala/net/kemuridama/kafcon/model/BrokerMetricsLog.scala
|
Scala
|
mit
| 705 |
// package ohnosequences.mg7.tests
//
// import ohnosequences.mg7._
// import ohnosequences.ncbitaxonomy._, api.{ Taxa => TaxaOps, Taxon => _, _ }
// import ohnosequences.mg7.loquats.countDataProcessing._
// import ohnosequences.mg7.tests.taxonomy._
//
//
// case object countsCtx {
//
// val realCounts = Map[AnyNode, Int](
// c2 -> 4,
// l2 -> 2,
// r1 -> 3,
// r3 -> 5
// )
//
// val ids: Taxa =
// realCounts.flatMap { case (node, count) =>
// List.fill(count)(node.id)
// }.toList
//
// def getLineage(id: Taxon): Taxa = id2node(id).lineage.map(_.id)
//
// val direct: Map[Taxon, (Int, Taxa)] = directCounts(ids, getLineage)
//
// val accumulated: Map[Taxon, (Int, Taxa)] = accumulatedCounts(direct, getLineage)
// }
//
//
// class CountsTest extends org.scalatest.FunSuite {
// import countsCtx._
//
// def assertMapsDiff[A, B](m1: Map[A, B], m2: Map[A, B]): Unit = {
// assertResult( List() ) {
// m1.toList diff m2.toList
// }
// }
//
//
// test("direct counts") {
//
// assertMapsDiff(
// realCounts.map { case (n, c) => n.id -> c },
// direct.map { case (id, (c, _)) => id -> c }
// )
// }
//
// test("accumulated counts") {
//
// // accumulated.foreach{ case (id, i) => info(s"${id}\\t-> ${i}") }
//
// assertMapsDiff(
// accumulated.map { case (id, (c, _)) => id -> c },
// Map(
// root.id -> 14,
// c1.id -> 14,
// c2.id -> 14,
// l1.id -> 2, r1.id -> 8,
// l2.id -> 2, r2.id -> 5,
// r3.id -> 5
// )
// )
// }
//
// }
|
ohnosequences/mg7
|
src/test/scala/mg7/counts.scala
|
Scala
|
agpl-3.0
| 1,621 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.events._
import SharedHelpers._
class DeprecatedTimesOnIntSpec extends FunSpec with TimesOnInt {
describe("The TimesOnInt trait") {
it("should allow people to repeat side effects a specified number of times") {
// Need to support this one, because someone may invoke times on an integer variable.
// Probably need to support 0 times as well, but should throw IAE if negative is passed.
var i = 0
0 times { i += 1 }
assert(i === 0)
1 times { i += 1 }
assert(i === 1)
2 times { i += 1 }
assert(i === 3)
3 times { i += 1 }
assert(i === 6)
4 times { i += 1 }
assert(i === 10)
90 times { i += 1 }
assert(i === 100)
}
it("should throw IllegalArgumentException if times is invoked on a negative integer") {
var i = 0
intercept[IllegalArgumentException] {
-1 times { i += 1 }
}
assert(i === 0)
}
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalatest/DeprecatedTimesOnIntSpec.scala
|
Scala
|
apache-2.0
| 1,584 |
package wakfutcp.protocol.messages.server
import wakfutcp.protocol.{Codec, ServerMessage}
final case class CharacterEnterPartitionMessage(worldX: Int, worldY: Int)
extends ServerMessage {
override val id = 4125
}
object CharacterEnterPartitionMessage {
import Codec._
import cats.syntax.apply._
implicit val codec: Codec[CharacterEnterPartitionMessage] =
(int, int).imapN(apply)(Function.unlift(unapply))
}
|
OpenWakfu/wakfutcp
|
protocol/src/main/scala/wakfutcp/protocol/messages/server/CharacterEnterPartitionMessage.scala
|
Scala
|
mit
| 427 |
/**
* Angles
* Copyright (C) 2014 Sebastian Schelter
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package io.ssc.angles.pipeline.http
import java.security.cert.X509Certificate
import org.apache.http.conn.ssl.TrustStrategy
class TrustEverybodyStrategy extends TrustStrategy {
override def isTrusted(chain: Array[X509Certificate], authType: String): Boolean = true
}
|
sscdotopen/angles
|
src/main/scala/io/ssc/angles/pipeline/http/TrustEverybodyStrategy.scala
|
Scala
|
gpl-3.0
| 983 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
// generated by genprod on Thu Apr 29 17:52:16 CEST 2010 (with fancy comment)
package scala
/** <p>
* Function with 0 parameters.
* </p>
* <p>
* In the following example the definition of
* <code>currentSeconds</code> is a shorthand for the anonymous class
* definition <code>anonfun0</code>:
* </p>
* <pre>
* <b>object</b> Main <b>extends</b> Application {
*
* <b>val</b> currentSeconds = () => System.currentTimeMillis() / 1000L
*
* <b>val</b> anonfun0 = <b>new</b> Function0[Long] {
* <b>def</b> apply(): Long = System.currentTimeMillis() / 1000L
* }
*
* println(currentSeconds())
* println(anonfun0())
* }</pre>
*/
trait Function0[@specialized +R] extends AnyRef { self =>
def apply(): R
override def toString() = "<function0>"
}
|
cran/rkafkajars
|
java/scala/Function0.scala
|
Scala
|
apache-2.0
| 1,329 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io._
import scala.util.Properties
import scala.collection.JavaConversions._
import sbt._
import sbt.Classpaths.publishTask
import sbt.Keys._
import sbtunidoc.Plugin.UnidocKeys.unidocGenjavadocVersion
import com.typesafe.sbt.pom.{loadEffectivePom, PomBuild, SbtPomKeys}
import net.virtualvoid.sbt.graph.Plugin.graphSettings
import spray.revolver.RevolverPlugin._
object BuildCommons {
private val buildLocation = file(".").getAbsoluteFile.getParentFile
val allProjects@Seq(bagel, catalyst, core, graphx, hive, hiveThriftServer, mllib, repl,
sql, networkCommon, networkShuffle, streaming, streamingFlumeSink, streamingFlume, streamingKafka,
streamingMqtt, streamingTwitter, streamingZeromq, launcher, unsafe) =
Seq("bagel", "catalyst", "core", "graphx", "hive", "hive-thriftserver", "mllib", "repl",
"sql", "network-common", "network-shuffle", "streaming", "streaming-flume-sink",
"streaming-flume", "streaming-kafka", "streaming-mqtt", "streaming-twitter",
"streaming-zeromq", "launcher", "unsafe").map(ProjectRef(buildLocation, _))
val optionallyEnabledProjects@Seq(yarn, yarnStable, java8Tests, sparkGangliaLgpl,
streamingKinesisAsl) = Seq("yarn", "yarn-stable", "java8-tests", "ganglia-lgpl",
"streaming-kinesis-asl").map(ProjectRef(buildLocation, _))
val assemblyProjects@Seq(assembly, examples, networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingMqttAssembly, streamingKinesisAslAssembly) =
Seq("assembly", "examples", "network-yarn", "streaming-flume-assembly", "streaming-kafka-assembly", "streaming-mqtt-assembly", "streaming-kinesis-asl-assembly")
.map(ProjectRef(buildLocation, _))
val tools = ProjectRef(buildLocation, "tools")
// Root project.
val spark = ProjectRef(buildLocation, "spark")
val sparkHome = buildLocation
val testTempDir = s"$sparkHome/target/tmp"
}
object SparkBuild extends PomBuild {
import BuildCommons._
import scala.collection.mutable.Map
val projectsMap: Map[String, Seq[Setting[_]]] = Map.empty
// Provides compatibility for older versions of the Spark build
def backwardCompatibility = {
import scala.collection.mutable
var isAlphaYarn = false
var profiles: mutable.Seq[String] = mutable.Seq("sbt")
// scalastyle:off println
if (Properties.envOrNone("SPARK_GANGLIA_LGPL").isDefined) {
println("NOTE: SPARK_GANGLIA_LGPL is deprecated, please use -Pspark-ganglia-lgpl flag.")
profiles ++= Seq("spark-ganglia-lgpl")
}
if (Properties.envOrNone("SPARK_HIVE").isDefined) {
println("NOTE: SPARK_HIVE is deprecated, please use -Phive and -Phive-thriftserver flags.")
profiles ++= Seq("hive", "hive-thriftserver")
}
Properties.envOrNone("SPARK_HADOOP_VERSION") match {
case Some(v) =>
if (v.matches("0.23.*")) isAlphaYarn = true
println("NOTE: SPARK_HADOOP_VERSION is deprecated, please use -Dhadoop.version=" + v)
System.setProperty("hadoop.version", v)
case None =>
}
if (Properties.envOrNone("SPARK_YARN").isDefined) {
println("NOTE: SPARK_YARN is deprecated, please use -Pyarn flag.")
profiles ++= Seq("yarn")
}
// scalastyle:on println
profiles
}
override val profiles = {
val profiles = Properties.envOrNone("SBT_MAVEN_PROFILES") match {
case None => backwardCompatibility
case Some(v) =>
if (backwardCompatibility.nonEmpty)
// scalastyle:off println
println("Note: We ignore environment variables, when use of profile is detected in " +
"conjunction with environment variable.")
// scalastyle:on println
v.split("(\\\\s+|,)").filterNot(_.isEmpty).map(_.trim.replaceAll("-P", "")).toSeq
}
if (System.getProperty("scala-2.11") == "") {
// To activate scala-2.11 profile, replace empty property value to non-empty value
// in the same way as Maven which handles -Dname as -Dname=true before executes build process.
// see: https://github.com/apache/maven/blob/maven-3.0.4/maven-embedder/src/main/java/org/apache/maven/cli/MavenCli.java#L1082
System.setProperty("scala-2.11", "true")
}
profiles
}
Properties.envOrNone("SBT_MAVEN_PROPERTIES") match {
case Some(v) =>
v.split("(\\\\s+|,)").filterNot(_.isEmpty).map(_.split("=")).foreach(x => System.setProperty(x(0), x(1)))
case _ =>
}
override val userPropertiesMap = System.getProperties.toMap
lazy val MavenCompile = config("m2r") extend(Compile)
lazy val publishLocalBoth = TaskKey[Unit]("publish-local", "publish local for m2 and ivy")
lazy val sparkGenjavadocSettings: Seq[sbt.Def.Setting[_]] = Seq(
libraryDependencies += compilerPlugin(
"org.spark-project" %% "genjavadoc-plugin" % unidocGenjavadocVersion.value cross CrossVersion.full),
scalacOptions <+= target.map(t => "-P:genjavadoc:out=" + (t / "java")))
lazy val sharedSettings = graphSettings ++ sparkGenjavadocSettings ++ Seq (
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
javaHome := sys.env.get("JAVA_HOME")
.orElse(sys.props.get("java.home").map { p => new File(p).getParentFile().getAbsolutePath() })
.map(file),
incOptions := incOptions.value.withNameHashing(true),
retrieveManaged := true,
retrievePattern := "[type]s/[artifact](-[revision])(-[classifier]).[ext]",
publishMavenStyle := true,
unidocGenjavadocVersion := "0.9-spark0",
resolvers += Resolver.mavenLocal,
otherResolvers <<= SbtPomKeys.mvnLocalRepository(dotM2 => Seq(Resolver.file("dotM2", dotM2))),
publishLocalConfiguration in MavenCompile <<= (packagedArtifacts, deliverLocal, ivyLoggingLevel) map {
(arts, _, level) => new PublishConfiguration(None, "dotM2", arts, Seq(), level)
},
publishMavenStyle in MavenCompile := true,
publishLocal in MavenCompile <<= publishTask(publishLocalConfiguration in MavenCompile, deliverLocal),
publishLocalBoth <<= Seq(publishLocal in MavenCompile, publishLocal).dependOn,
javacOptions in (Compile, doc) ++= {
val Array(major, minor, _) = System.getProperty("java.version").split("\\\\.", 3)
if (major.toInt >= 1 && minor.toInt >= 8) Seq("-Xdoclint:all", "-Xdoclint:-missing") else Seq.empty
},
javacOptions in Compile ++= Seq("-encoding", "UTF-8"),
// Implements -Xfatal-warnings, ignoring deprecation warnings.
// Code snippet taken from https://issues.scala-lang.org/browse/SI-8410.
compile in Compile := {
val analysis = (compile in Compile).value
val s = streams.value
def logProblem(l: (=> String) => Unit, f: File, p: xsbti.Problem) = {
l(f.toString + ":" + p.position.line.fold("")(_ + ":") + " " + p.message)
l(p.position.lineContent)
l("")
}
var failed = 0
analysis.infos.allInfos.foreach { case (k, i) =>
i.reportedProblems foreach { p =>
val deprecation = p.message.contains("is deprecated")
if (!deprecation) {
failed = failed + 1
}
logProblem(if (deprecation) s.log.warn else s.log.error, k, p)
}
}
if (failed > 0) {
sys.error(s"$failed fatal warnings")
}
analysis
}
)
def enable(settings: Seq[Setting[_]])(projectRef: ProjectRef) = {
val existingSettings = projectsMap.getOrElse(projectRef.project, Seq[Setting[_]]())
projectsMap += (projectRef.project -> (existingSettings ++ settings))
}
// Note ordering of these settings matter.
/* Enable shared settings on all projects */
(allProjects ++ optionallyEnabledProjects ++ assemblyProjects ++ Seq(spark, tools))
.foreach(enable(sharedSettings ++ ExcludedDependencies.settings ++ Revolver.settings))
/* Enable tests settings for all projects except examples, assembly and tools */
(allProjects ++ optionallyEnabledProjects).foreach(enable(TestSettings.settings))
allProjects.filterNot(x => Seq(spark, hive, hiveThriftServer, catalyst, repl,
networkCommon, networkShuffle, networkYarn, unsafe).contains(x)).foreach {
x => enable(MimaBuild.mimaSettings(sparkHome, x))(x)
}
/* Unsafe settings */
enable(Unsafe.settings)(unsafe)
/* Enable Assembly for all assembly projects */
assemblyProjects.foreach(enable(Assembly.settings))
/* Enable Assembly for streamingMqtt test */
enable(inConfig(Test)(Assembly.settings))(streamingMqtt)
/* Package pyspark artifacts in a separate zip file for YARN. */
enable(PySparkAssembly.settings)(assembly)
/* Enable unidoc only for the root spark project */
enable(Unidoc.settings)(spark)
/* Spark SQL Core console settings */
enable(SQL.settings)(sql)
/* Hive console settings */
enable(Hive.settings)(hive)
enable(Flume.settings)(streamingFlumeSink)
/**
* Adds the ability to run the spark shell directly from SBT without building an assembly
* jar.
*
* Usage: `build/sbt sparkShell`
*/
val sparkShell = taskKey[Unit]("start a spark-shell.")
val sparkSql = taskKey[Unit]("starts the spark sql CLI.")
enable(Seq(
connectInput in run := true,
fork := true,
outputStrategy in run := Some (StdoutOutput),
javaOptions ++= Seq("-Xmx2G", "-XX:MaxPermSize=256m"),
sparkShell := {
(runMain in Compile).toTask(" org.apache.spark.repl.Main -usejavacp").value
},
javaOptions in Compile += "-Dspark.master=local",
sparkSql := {
(runMain in Compile).toTask(" org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver").value
}
))(assembly)
enable(Seq(sparkShell := sparkShell in "assembly"))(spark)
// TODO: move this to its upstream project.
override def projectDefinitions(baseDirectory: File): Seq[Project] = {
super.projectDefinitions(baseDirectory).map { x =>
if (projectsMap.exists(_._1 == x.id)) x.settings(projectsMap(x.id): _*)
else x.settings(Seq[Setting[_]](): _*)
} ++ Seq[Project](OldDeps.project)
}
}
object Unsafe {
lazy val settings = Seq(
// This option is needed to suppress warnings from sun.misc.Unsafe usage
javacOptions in Compile += "-XDignore.symbol.file"
)
}
object Flume {
lazy val settings = sbtavro.SbtAvro.avroSettings
}
/**
This excludes library dependencies in sbt, which are specified in maven but are
not needed by sbt build.
*/
object ExcludedDependencies {
lazy val settings = Seq(
libraryDependencies ~= { libs => libs.filterNot(_.name == "groovy-all") }
)
}
/**
* Following project only exists to pull previous artifacts of Spark for generating
* Mima ignores. For more information see: SPARK 2071
*/
object OldDeps {
lazy val project = Project("oldDeps", file("dev"), settings = oldDepsSettings)
def versionArtifact(id: String): Option[sbt.ModuleID] = {
val fullId = id + "_2.10"
Some("org.apache.spark" % fullId % "1.2.0")
}
def oldDepsSettings() = Defaults.coreDefaultSettings ++ Seq(
name := "old-deps",
scalaVersion := "2.10.4",
retrieveManaged := true,
retrievePattern := "[type]s/[artifact](-[revision])(-[classifier]).[ext]",
libraryDependencies := Seq("spark-streaming-mqtt", "spark-streaming-zeromq",
"spark-streaming-flume", "spark-streaming-kafka", "spark-streaming-twitter",
"spark-streaming", "spark-mllib", "spark-bagel", "spark-graphx",
"spark-core").map(versionArtifact(_).get intransitive())
)
}
object SQL {
lazy val settings = Seq(
initialCommands in console :=
"""
|import org.apache.spark.SparkContext
|import org.apache.spark.sql.SQLContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._
|import org.apache.spark.sql.catalyst.expressions._
|import org.apache.spark.sql.catalyst.plans.logical._
|import org.apache.spark.sql.catalyst.rules._
|import org.apache.spark.sql.catalyst.util._
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.types._
|
|val sc = new SparkContext("local[*]", "dev-shell")
|val sqlContext = new SQLContext(sc)
|import sqlContext.implicits._
|import sqlContext._
""".stripMargin,
cleanupCommands in console := "sc.stop()"
)
}
object Hive {
lazy val settings = Seq(
javaOptions += "-XX:MaxPermSize=256m",
// Specially disable assertions since some Hive tests fail them
javaOptions in Test := (javaOptions in Test).value.filterNot(_ == "-ea"),
// Supporting all SerDes requires us to depend on deprecated APIs, so we turn off the warnings
// only for this subproject.
scalacOptions <<= scalacOptions map { currentOpts: Seq[String] =>
currentOpts.filterNot(_ == "-deprecation")
},
initialCommands in console :=
"""
|import org.apache.spark.SparkContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._
|import org.apache.spark.sql.catalyst.expressions._
|import org.apache.spark.sql.catalyst.plans.logical._
|import org.apache.spark.sql.catalyst.rules._
|import org.apache.spark.sql.catalyst.util._
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.hive._
|import org.apache.spark.sql.hive.test.TestHive._
|import org.apache.spark.sql.hive.test.TestHive.implicits._
|import org.apache.spark.sql.types._""".stripMargin,
cleanupCommands in console := "sparkContext.stop()",
// Some of our log4j jars make it impossible to submit jobs from this JVM to Hive Map/Reduce
// in order to generate golden files. This is only required for developers who are adding new
// new query tests.
fullClasspath in Test := (fullClasspath in Test).value.filterNot { f => f.toString.contains("jcl-over") }
)
}
object Assembly {
import sbtassembly.AssemblyUtils._
import sbtassembly.Plugin._
import AssemblyKeys._
val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.")
lazy val settings = assemblySettings ++ Seq(
test in assembly := {},
hadoopVersion := {
sys.props.get("hadoop.version")
.getOrElse(SbtPomKeys.effectivePom.value.getProperties.get("hadoop.version").asInstanceOf[String])
},
jarName in assembly <<= (version, moduleName, hadoopVersion) map { (v, mName, hv) =>
if (mName.contains("streaming-flume-assembly") || mName.contains("streaming-kafka-assembly") || mName.contains("streaming-mqtt-assembly") || mName.contains("streaming-kinesis-asl-assembly")) {
// This must match the same name used in maven (see external/kafka-assembly/pom.xml)
s"${mName}-${v}.jar"
} else {
s"${mName}-${v}-hadoop${hv}.jar"
}
},
jarName in (Test, assembly) <<= (version, moduleName, hadoopVersion) map { (v, mName, hv) =>
s"${mName}-test-${v}.jar"
},
mergeStrategy in assembly := {
case PathList("org", "datanucleus", xs @ _*) => MergeStrategy.discard
case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
case m if m.toLowerCase.matches("meta-inf.*\\\\.sf$") => MergeStrategy.discard
case "log4j.properties" => MergeStrategy.discard
case m if m.toLowerCase.startsWith("meta-inf/services/") => MergeStrategy.filterDistinctLines
case "reference.conf" => MergeStrategy.concat
case _ => MergeStrategy.first
}
)
}
object PySparkAssembly {
import sbtassembly.Plugin._
import AssemblyKeys._
import java.util.zip.{ZipOutputStream, ZipEntry}
lazy val settings = Seq(
// Use a resource generator to copy all .py files from python/pyspark into a managed directory
// to be included in the assembly. We can't just add "python/" to the assembly's resource dir
// list since that will copy unneeded / unwanted files.
resourceGenerators in Compile <+= resourceManaged in Compile map { outDir: File =>
val src = new File(BuildCommons.sparkHome, "python/pyspark")
val zipFile = new File(BuildCommons.sparkHome , "python/lib/pyspark.zip")
zipFile.delete()
zipRecursive(src, zipFile)
Seq[File]()
}
)
private def zipRecursive(source: File, destZipFile: File) = {
val destOutput = new ZipOutputStream(new FileOutputStream(destZipFile))
addFilesToZipStream("", source, destOutput)
destOutput.flush()
destOutput.close()
}
private def addFilesToZipStream(parent: String, source: File, output: ZipOutputStream): Unit = {
if (source.isDirectory()) {
output.putNextEntry(new ZipEntry(parent + source.getName()))
for (file <- source.listFiles()) {
addFilesToZipStream(parent + source.getName() + File.separator, file, output)
}
} else {
val in = new FileInputStream(source)
output.putNextEntry(new ZipEntry(parent + source.getName()))
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = in.read(buf)
if (n != -1) {
output.write(buf, 0, n)
}
}
output.closeEntry()
in.close()
}
}
}
object Unidoc {
import BuildCommons._
import sbtunidoc.Plugin._
import UnidocKeys._
// for easier specification of JavaDoc package groups
private def packageList(names: String*): String = {
names.map(s => "org.apache.spark." + s).mkString(":")
}
private def ignoreUndocumentedPackages(packages: Seq[Seq[File]]): Seq[Seq[File]] = {
packages
.map(_.filterNot(_.getName.contains("$")))
.map(_.filterNot(_.getCanonicalPath.contains("akka")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/deploy")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/network")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/shuffle")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/executor")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/unsafe")))
.map(_.filterNot(_.getCanonicalPath.contains("python")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/collection")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalyst")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/execution")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive/test")))
}
lazy val settings = scalaJavaUnidocSettings ++ Seq (
publish := {},
unidocProjectFilter in(ScalaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn),
unidocProjectFilter in(JavaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, bagel, examples, tools, streamingFlumeSink, yarn),
// Skip actual catalyst, but include the subproject.
// Catalyst is not public API and contains quasiquotes which break scaladoc.
unidocAllSources in (ScalaUnidoc, unidoc) := {
ignoreUndocumentedPackages((unidocAllSources in (ScalaUnidoc, unidoc)).value)
},
// Skip class names containing $ and some internal packages in Javadocs
unidocAllSources in (JavaUnidoc, unidoc) := {
ignoreUndocumentedPackages((unidocAllSources in (JavaUnidoc, unidoc)).value)
},
// Javadoc options: create a window title, and group key packages on index page
javacOptions in doc := Seq(
"-windowtitle", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " JavaDoc",
"-public",
"-group", "Core Java API", packageList("api.java", "api.java.function"),
"-group", "Spark Streaming", packageList(
"streaming.api.java", "streaming.flume", "streaming.kafka",
"streaming.mqtt", "streaming.twitter", "streaming.zeromq", "streaming.kinesis"
),
"-group", "MLlib", packageList(
"mllib.classification", "mllib.clustering", "mllib.evaluation.binary", "mllib.linalg",
"mllib.linalg.distributed", "mllib.optimization", "mllib.rdd", "mllib.recommendation",
"mllib.regression", "mllib.stat", "mllib.tree", "mllib.tree.configuration",
"mllib.tree.impurity", "mllib.tree.model", "mllib.util",
"mllib.evaluation", "mllib.feature", "mllib.random", "mllib.stat.correlation",
"mllib.stat.test", "mllib.tree.impl", "mllib.tree.loss",
"ml", "ml.attribute", "ml.classification", "ml.clustering", "ml.evaluation", "ml.feature",
"ml.param", "ml.recommendation", "ml.regression", "ml.tuning"
),
"-group", "Spark SQL", packageList("sql.api.java", "sql.api.java.types", "sql.hive.api.java"),
"-noqualifier", "java.lang"
),
// Group similar methods together based on the @group annotation.
scalacOptions in (ScalaUnidoc, unidoc) ++= Seq("-groups")
)
}
object TestSettings {
import BuildCommons._
lazy val settings = Seq (
// Fork new JVMs for tests and set Java options for those
fork := true,
// Setting SPARK_DIST_CLASSPATH is a simple way to make sure any child processes
// launched by the tests have access to the correct test-time classpath.
envVars in Test ++= Map(
"SPARK_DIST_CLASSPATH" ->
//stripSuffix去掉<string>字串中结尾的字符
(fullClasspath in Test).value.files.map(_.getAbsolutePath).mkString(":").stripSuffix(":"),
"JAVA_HOME" -> sys.env.get("JAVA_HOME").getOrElse(sys.props("java.home"))),
javaOptions in Test += s"-Djava.io.tmpdir=$testTempDir",
javaOptions in Test += "-Dspark.test.home=" + sparkHome,
javaOptions in Test += "-Dspark.testing=1",
javaOptions in Test += "-Dspark.port.maxRetries=100",
javaOptions in Test += "-Dspark.master.rest.enabled=false",
javaOptions in Test += "-Dspark.ui.enabled=false",
javaOptions in Test += "-Dspark.ui.showConsoleProgress=false",
javaOptions in Test += "-Dspark.driver.allowMultipleContexts=true",
javaOptions in Test += "-Dspark.unsafe.exceptionOnMemoryLeak=true",
javaOptions in Test += "-Dsun.io.serialization.extendedDebugInfo=true",
javaOptions in Test += "-Dderby.system.durability=test",
javaOptions in Test ++= System.getProperties.filter(_._1 startsWith "spark")
.map { case (k,v) => s"-D$k=$v" }.toSeq,
javaOptions in Test += "-ea",
javaOptions in Test ++= "-Xmx3g -Xss4096k -XX:PermSize=128M -XX:MaxNewSize=256m -XX:MaxPermSize=1g"
.split(" ").toSeq,
javaOptions += "-Xmx3g",
// Show full stack trace and duration in test cases.
testOptions in Test += Tests.Argument("-oDF"),
testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"),
// Enable Junit testing.
libraryDependencies += "com.novocode" % "junit-interface" % "0.9" % "test",
// Only allow one test at a time, even across projects, since they run in the same JVM
parallelExecution in Test := false,
// Make sure the test temp directory exists.
resourceGenerators in Test <+= resourceManaged in Test map { outDir: File =>
if (!new File(testTempDir).isDirectory()) {
require(new File(testTempDir).mkdirs())
}
Seq[File]()
},
concurrentRestrictions in Global += Tags.limit(Tags.Test, 1),
// Remove certain packages from Scaladoc
scalacOptions in (Compile, doc) := Seq(
"-groups",
"-skip-packages", Seq(
"akka",
"org.apache.spark.api.python",
"org.apache.spark.network",
"org.apache.spark.deploy",
"org.apache.spark.util.collection"
).mkString(":"),
"-doc-title", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " ScalaDoc"
)
)
}
|
tophua/spark1.52
|
project/SparkBuild.scala
|
Scala
|
apache-2.0
| 24,970 |
package service
import org.junit.Test;
import org.junit.Assert.assertEquals
class HelloServiceIT
{
@Test
def test1() = assertEquals("Hello2", HelloService.hello2)
}
|
scoverage/scoverage-maven-samples
|
integration-tests/src/test/scala/service/HelloServiceIT.scala
|
Scala
|
apache-2.0
| 176 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.chart.graphics
import java.awt.Color
import java.awt.Graphics2D
/**
* Draws a set of elements vertically.
*
* @param elements
* Set of elements to draw within the block.
* @param background
* Fill color to use for the background of the block. If not specified then the elements will
* be drawn directly over the existing content.
*/
case class Block(elements: List[Element], background: Option[Color] = None)
extends Element
with VariableHeight {
override def minHeight: Int = 0
override def computeHeight(g: Graphics2D, width: Int): Int = {
elements.map(_.getHeight(g, width)).sum
}
override def draw(g: Graphics2D, x1: Int, y1: Int, x2: Int, y2: Int): Unit = {
val width = x2 - x1
val height = y2 - y1
background.foreach { c =>
g.setColor(c)
g.fillRect(x1, y1, width, height)
}
var y = y1
elements.foreach { element =>
val h = element.getHeight(g, width)
element.draw(g, x1, y, x1 + width, y + h)
y += h
}
}
}
|
Netflix/atlas
|
atlas-chart/src/main/scala/com/netflix/atlas/chart/graphics/Block.scala
|
Scala
|
apache-2.0
| 1,654 |
package net.sansa_stack.rdf.flink.qualityassessment.metrics
import net.sansa_stack.rdf.flink.io._
import org.apache.flink.api.scala.ExecutionEnvironment
import org.scalatest.FunSuite
class FlinkLicensingTests extends FunSuite {
import net.sansa_stack.rdf.flink.qualityassessment._
val env = ExecutionEnvironment.getExecutionEnvironment
test("assessing the human readable license should match") {
val path = getClass.getResource("/data.nt").getPath
val lang = Lang.NTRIPLES
val triples = env.rdf(lang)(path)
val value = triples.assessHumanReadableLicense()
assert(value == 0.0)
}
test("assessing the machine readable license should match") {
val path = getClass.getResource("/data.nt").getPath
val lang = Lang.NTRIPLES
val triples = env.rdf(lang)(path)
val value = triples.assessMachineReadableLicense()
assert(value == 0.0)
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-rdf/sansa-rdf-flink/src/test/scala/net/sansa_stack/rdf/flink/qualityassessment/metrics/FlinkLicensingTests.scala
|
Scala
|
apache-2.0
| 893 |
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package no.uio.musit.healthcheck
import no.uio.musit.test.MusitSpec
class StopWatchSpec extends MusitSpec {
class ListTicker(var values: List[Long]) extends Ticker {
override def tick() = values match {
case head :: Nil =>
head
case head :: tail =>
values = tail
head
case Nil =>
throw new IllegalStateException()
}
}
"StopWatch" when {
"elapsed is called" should {
"calculate from the first tick" in {
val ticker = new ListTicker(List(2, 44))
val sw = StopWatch(ticker)
sw.elapsed() mustBe 42
}
}
}
}
|
kpmeen/musit
|
musit-service/src/test/scala/no/uio/musit/healthcheck/StopWatchSpec.scala
|
Scala
|
gpl-2.0
| 1,500 |
package org.joda.time
import org.joda.time.format.DateTimeFormatter
import web.moment._
class DateTime(datetime:Moment) {
def this(l: Long) = this(moment.moment.apply(l))
def this(year: Int, month: Int, day: Int, hour: Int = 0, minutes: Int = 0, seconds: Int = 0, milliseconds: Int = 0) = {
this(moment.moment.apply(f"$year%04d-$month%02d-$day%02d'T'$hour%02d:$minutes%02d:$seconds%02d.$milliseconds%03d",
format = DateTime.millis))
// println(f"$year%04d-$month%02d-$day%02d'T'$hour%02d:$minutes%02d:$seconds%02d.$milliseconds%03d")
}
def getMillis: Long = datetime.milliseconds().toLong
def toString(pattern: String): String = {
datetime.format(pattern)
}
def toString(pattern: DateTimeFormatter): String = {
datetime.format(pattern.pattern)
}
def toStringISO = datetime.format(DateTime.millis)
def year(): Option[Int] = Some(datetime.year().toInt)
def monthOfYear(): Option[Int] = Some(datetime.month().toInt + 1)
def dayOfMonth(): Option[Int] = Some(datetime.date().toInt)
def hourOfDay(): Option[Int] = Some(datetime.hours().toInt)
def minuteOfHour(): Option[Int] = Some(datetime.minutes().toInt)
def secondOfMinute(): Option[Int] = Some(datetime.seconds().toInt)
def millisOfSecond(): Option[Int] = Some(datetime.millisecond().toInt)
}
object DateTime {
val mini = "yyyy-MM-ddTHH:mm:ss"
val millis = "yyyy-MM-ddTHH:mm:ss.SSS"
def parse(input:String, formatter:DateTimeFormatter):DateTime={
new DateTime(moment.moment.apply(input, format = formatter.pattern))
}
def parse(input:String):DateTime={
new DateTime(moment.moment.apply(input, format = millis))
}
def now():DateTime={
new DateTime(moment.moment.utc())
}
}
|
easel/play-json-extra
|
scalajs-joda-time/src/main/scala/org/joda/time/DateTime.scala
|
Scala
|
apache-2.0
| 1,724 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.auth
import com.google.inject.{Inject, Singleton}
import play.api.Configuration
@Singleton
class ExternalUrls @Inject()(val runModeConfiguration: Configuration) {
val basGatewayHost = runModeConfiguration.getOptional[String]("gg-urls.bas-gateway.host").getOrElse("")
val loginCallback = runModeConfiguration.getOptional[String]("gg-urls.login-callback.url").getOrElse("")
val signOutCallback = runModeConfiguration.getOptional[String]("gg-urls.signout-callback.url").getOrElse("")
val loginPath = runModeConfiguration.getOptional[String]("gg-urls.login_path").getOrElse("")
val signOutPath = runModeConfiguration.getOptional[String]("gg-urls.signout_path").getOrElse("")
val signIn = s"$basGatewayHost/bas-gateway/$loginPath?continue_url=$loginCallback"
val signOut = s"$basGatewayHost/bas-gateway/$signOutPath?continue=$signOutCallback"
}
|
hmrc/gmp-frontend
|
app/controllers/auth/ExternalUrls.scala
|
Scala
|
apache-2.0
| 1,483 |
package java.util.concurrent.locks
import java.util.concurrent.TimeUnit
class ReentrantLock {
def lock() = ()
def unlock() = ()
def tryLock(timeout: Long, unit: TimeUnit): Boolean = true
}
|
jmnarloch/akka.js
|
akka-js-actor/js/src/main/scala/java/util/concurrent/locks/ReentrantLock.scala
|
Scala
|
bsd-3-clause
| 197 |
package org.jetbrains.plugins.scala.annotator
import com.intellij.psi.PsiFileFactory
import org.intellij.lang.annotations.Language
import org.jetbrains.plugins.scala.ScalaFileType
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.project.settings.ScalaCompilerConfiguration
import org.junit.Assert
/**
* Author: Svyatoslav Ilinskiy
* Date: 6/15/15
*/
class SingleAbstractMethodTest extends ScalaLightPlatformCodeInsightTestCaseAdapter {
protected override def setUp() {
super.setUp()
val defaultProfile = ScalaCompilerConfiguration.instanceIn(getProjectAdapter).defaultProfile
val newSettings = defaultProfile.getSettings
newSettings.experimental = true
defaultProfile.setSettings(newSettings)
}
def testBasicGenerics() {
val code =
"""
|trait Blargle[T] {
| def blargle(a: T): Unit
|}
|def foo(a: Blargle[String]) = a.blargle("10")
|foo(x => println(x.charAt(0)))
""".stripMargin
checkCodeHasNoErrors(code)
}
def testTypeInference() {
val code =
"""
| abstract class Foo {
| def bar(i: Int, j: String)
| }
|
| val b: Foo = (i, j) => println(i + j.charAt(0))
""".stripMargin
checkCodeHasNoErrors(code)
}
def testFunctionSAM() {
val code =
"""
|def z() = println()
|val y: Runnable = z
""".stripMargin
checkCodeHasNoErrors(code)
}
def testFunctionNegOne() {
val code =
"""
|def z(): Unit = println()
|val y: Runnable = z()
""".stripMargin
assertMatches(messages(code)) {
case Error("z()", typeMismatch()) :: Error("z()", doesNotConform()):: Nil =>
}
}
def testFunctionNegTwo() {
val code =
"""
|def z: Unit = println()
|val y: Runnable = z
""".stripMargin
assertMatches(messages(code)) {
case Error("z", typeMismatch()) :: Error("z", doesNotConform()) :: Nil =>
}
}
def testFunctionNegThree() {
val code =
"""
|def z(): Unit = println()
|val x = z
|val y: Runnable = x
""".stripMargin
assertMatches(messages(code)) {
case Error("x", typeMismatch()) :: Error("x", doesNotConform()) :: Nil =>
}
}
def testSCL7686(): Unit = {
val code =
"""
|trait FI { def apply(idx: Int): String }
|val a: FI = x => "result: " + x.toString
|println(a(5))
""".stripMargin
checkCodeHasNoErrors(code)
}
def testUnderscoreOne() {
val code =
"""
|trait Foo { def bar(i: Int, s: String): String }
|val f: Foo = _ + _
""".stripMargin
checkCodeHasNoErrors(code)
}
def testUnderscoreTwo() {
val code =
"""
|trait Foo { def bar(s: String): String }
|val i: Foo = _.charAt(0).toString
""".stripMargin
checkCodeHasNoErrors(code)
}
def testSimpleNeg() {
val code =
"""
|trait Foo { def blargle(i: Int): Unit }
|val f: Foo = s => println(s.charAt(0))
""".stripMargin
assertMatches(messages(code)) {
case Error("charAt", cannotResolveSymbol()) :: Nil =>
}
}
def testSimpleNegWrongReturnType() {
val code =
"""
|object T {
| trait Blergh { def apply(i: Int): String }
| ((j: Int) => j): Blergh
|}
""".stripMargin
assertMatches(messages(code)) {
case Error("((j: Int) => j)", typeMismatch()) :: Error("((j: Int) => j)", doesNotConform()) ::
Error("j", doesNotConform()) :: Nil =>
}
}
def testSimpleNegWrongParamNumber() {
val code =
"""
|object T {
| trait Blargle { def apply(i: Int, j: String): String }
| ((i: Int) => "aaa"): Blargle
|}
""".stripMargin
assertMatches(messages(code)) {
case Error("((i: Int) => \"aaa\")", typeMismatch()) :: Error("((i: Int) => \"aaa\")", doesNotConform()) :: Nil =>
}
}
def testSimpleNegWrongParamType() {
val code =
"""
|object T {
| trait Blargle { def apply(i: Int, j: String): String }
| ((i: Int, j: Int) => "aaa"): Blargle
|}
""".stripMargin
assertMatches(messages(code)) {
case Error("((i: Int, j: Int) => \"aaa\")", typeMismatch()) :: Error("((i: Int, j: Int) => \"aaa\")", doesNotConform()) :: Nil =>
}
}
def testSimpleNegRightParamWrongReturn() {
val code =
"""
|object T {
| trait Blergh { def apply(i: Int): String }
| (j => j): Blergh
|}
""".stripMargin
assertMatches(messages(code)) {
case Error("(j => j)", typeMismatch()) :: Error("(j => j)", doesNotConform()) :: Error("j", doesNotConform()) :: Nil =>
}
}
def testConstructorWithArgs() {
val code =
"""
|abstract class Foo(s: String) { def a(): String }
|val f: Foo = () => ""
""".stripMargin
assertMatches(messages(code)) {
case Error("() => \"\"", typeMismatch()) :: Nil =>
}
}
def testImplicitConversionWithSAM() {
val code =
"""
|import scala.language.implicitConversions
|object T {
| trait Foo {
| def bar(): Int
| }
|
| val i: Foo = () => 2
|
| implicit def FooToString(f: Foo): String = f.bar().toString
| wantFoo(i)
| wantString(i)
| wantFoo(() => 4)
| wantString(() => 3)
| def wantFoo(f: Foo) = println(f.bar())
| def wantString(s: String) = println(s)
|}
|
""".stripMargin
assertMatches(messages(code)) {
case Error("() => 3", typeMismatch()) :: Error("wantString", cannotResolveReference()) :: Nil =>
}
}
def testUnimplementedWithSAM(): Unit = {
val code =
"""
|abstract class Foo { def a(): String }
|val f: Foo = () => ???
""".stripMargin
checkCodeHasNoErrors(code)
}
def testConformance(): Unit = {
val code =
"""
|trait SAAM {
| def sam(s: String): Object
|}
|val s: SAAM = (i: Object) => ""
""".stripMargin
checkCodeHasNoErrors(code)
}
def testConformanceNeg(): Unit = {
val code =
"""
|trait SAAM {
| def sam(s: Object): Object
|}
|val s: SAAM = (i: String) => i
""".stripMargin
assertMatches(messages(code)) {
case Error("(i: String) => i", typeMismatch()) :: Nil =>
}
}
def testSimpleThreadRunnable(): Unit = {
val code = "new Thread(() => println()).run()"
checkCodeHasNoErrors(code)
}
def testValueDiscarding(): Unit = {
val code =
"""
|def goo(r: Runnable) = 2
|
|
|goo(() => {1 + 1})
""".stripMargin
checkCodeHasNoErrors(code)
}
def testJavaGenerics(): Unit = {
val code =
"""
|import java.util.concurrent.FutureTask
|
|new FutureTask[String](() => "hi")
""".stripMargin
checkCodeHasNoErrors(code)
}
def testSAMMethodReference(): Unit = {
val code =
"""
|trait F[T, R] {
| def apply(a: T): R
|}
|
|def len(s: String): Int = s.length
|
|val f: F[String, Int] = len
|
""".stripMargin
checkCodeHasNoErrors(code)
}
def testExistentialBounds(): Unit = {
val code =
"""
|trait Blargle[T] {
| def foo(a: T): String
|}
|
|def f(b: Blargle[_ >: Int]) = -1
|f(s => s.toString)
|
|def g[T](b: Blargle[_ >: T]) = -1
|g((s: String) => s)
|
|trait Blergh[T] {
| def foo(): T
|}
|
|def h[T](b: Blergh[_ <: T]) = -1
|h(() => "")
|def i(b: Blergh[_ <: String]) = -1
|i(() => "")
|
""".stripMargin
checkCodeHasNoErrors(code)
}
def testOverload(): Unit = {
val code =
"""
|trait SAMOverload[A] {
| def foo(s: A): Int = ???
|}
|
|def f[T](s: T): Unit = ()
|def f[T](s: T, a: SAMOverload[_ >: T]) = ()
|f("", (s: String) => 2)
|
""".stripMargin
checkCodeHasNoErrors(code)
}
def testJavaSAM(): Unit = {
val scalaCode = "new ObservableCopy(1).mapFunc(x => x + 1)"
val javaCode =
"""
|public interface Func1<T, R> {
| R call(T t);
|}
|
|public class ObservableCopy<T> {
| public ObservableCopy(T t) {}
|
| public final <R> ObservableCopy<R> mapFunc(Func1<? super T, ? extends R> func) {
| return null;
| }
|}
|
""".stripMargin
checkCodeHasNoErrors(scalaCode, Some(javaCode))
}
val etaExpansionPrefix: String =
"""
|def a = () => println()
|def b() = () => println()
|def c = println()
|def d() = println()
|def e: () => Unit = () => println()
|def f(): () => Unit = () => println()
|def g(): Unit = println()
|def h(): Unit = println()
|
""".stripMargin
def testSAMEtaExpansion1(): Unit = {
val code = etaExpansionPrefix + "val a1: Runnable = a"
assertMatches(messages(code)) {
case Error("a", typeMismatch()) :: Error("a", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion2(): Unit = {
val code = etaExpansionPrefix + "val a2: Runnable = a()"
assertMatches(messages(code)) {
case Error("a()", typeMismatch()) :: Error("a()", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion3(): Unit = {
val code = etaExpansionPrefix + "val b1: Runnable = b"
checkCodeHasNoErrors(code)
}
def testSAMEtaExpansion4(): Unit = {
val code = etaExpansionPrefix + "val b2: Runnable = b()"
assertMatches(messages(code)) {
case Error("b()", typeMismatch()) :: Error("b()", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion5(): Unit = {
val code = etaExpansionPrefix + "val c1: Runnable = c"
assertMatches(messages(code)) {
case Error("c", typeMismatch()) :: Error("c", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion6(): Unit = {
val code = etaExpansionPrefix + "val c2: Runnable = c()"
assertMatches(messages(code)) {
case Error("()", doesNotTakeParameters()) :: Nil =>
}
}
def testSAMEtaExpansion7(): Unit = {
val code = etaExpansionPrefix + "val d1: Runnable = d"
checkCodeHasNoErrors(code)
}
def testSAMEtaExpansion8(): Unit = {
val code = etaExpansionPrefix + "val d2: Runnable = d()"
assertMatches(messages(code)) {
case Error("d()", typeMismatch()) :: Error("d()", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion9(): Unit = {
val code = etaExpansionPrefix + "val e1: Runnable = e"
assertMatches(messages(code)) {
case Error("e", typeMismatch()) :: Error("e", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion10(): Unit = {
val code = etaExpansionPrefix + "val e2: Runnable = e()"
assertMatches(messages(code)) {
case Error("e()", typeMismatch()) :: Error("e()", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion11(): Unit = {
val code = etaExpansionPrefix + "val f1: Runnable = f"
checkCodeHasNoErrors(code)
}
def testSAMEtaExpansion12(): Unit = {
val code = etaExpansionPrefix + "val f2: Runnable = f()"
assertMatches(messages(code)) {
case Error("f()", typeMismatch()) :: Error("f()", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion13(): Unit = {
val code = etaExpansionPrefix + "val g1: Runnable = g"
checkCodeHasNoErrors(code)
}
def testSAMEtaExpansion14(): Unit = {
val code = etaExpansionPrefix + "val g2: Runnable = g()"
assertMatches(messages(code)) {
case Error("g()", typeMismatch()) :: Error("g()", doesNotConform()) :: Nil =>
}
}
def testSAMEtaExpansion15(): Unit = {
val code = etaExpansionPrefix + "val h1: Runnable = h"
checkCodeHasNoErrors(code)
}
def testSAMEtaExpansion16(): Unit = {
val code = etaExpansionPrefix + "val h2: Runnable = h()"
assertMatches(messages(code)) {
case Error("h()", typeMismatch()) :: Error("h()", doesNotConform()) :: Nil =>
}
}
def testEtaExpansionImplicit(): Unit = {
val code =
"""
|class A
|class B
|implicit def a2b(a: A): B = new B
|
|abstract class C {
| def foo(): B
|}
|
|def foo(): A = new A
|
|val u: C = foo
|
""".stripMargin
checkCodeHasNoErrors(code)
}
//similar to testEtaExpansion11
def testEtaExpansionUnitReturnWithParams(): Unit = {
val code =
"""
|trait S {
| def foo(i: Int): Unit
|}
|def ss(): Int => Unit = (i: Int) => Unit
|
|val s: S = ss
""".stripMargin
assertMatches(messages(code)) {
case Error("ss", typeMismatch()) :: Error("ss", doesNotConform()) :: Nil =>
}
}
def testOverrideImplementSAM(): Unit = {
val code =
"""
|val s: Bar = () => 2
|
|abstract class Foo {
| def foo(): Int
|}
|
|abstract class Bar extends Foo
|
""".stripMargin
checkCodeHasNoErrors(code)
}
def testOverrideImplementSAM2(): Unit = {
val code =
"""
|val s: Bar = () => 2
|
|abstract class Foo {
| def foo2(): Int
|}
|
|abstract class Bar extends Foo {
| def foo1(): String = ""
|}
|
""".stripMargin
checkCodeHasNoErrors(code)
}
def testSAMComparable(): Unit = {
val code =
"""
|import java.util.Comparator
|
|val comp: Comparator[String] = (o1, o2) => o1.compareTo(o2)
""".stripMargin
checkCodeHasNoErrors(code)
}
def testNotSAM(): Unit = {
val code =
"""
|abstract class U {
| def foo(): Unit
|}
|def z(): U = null
|val x: U = z()
""".stripMargin
checkCodeHasNoErrors(code)
}
def checkCodeHasNoErrors(scalaCode: String, javaCode: Option[String] = None) {
assertMatches(messages(scalaCode, javaCode)) {
case Nil =>
}
}
def messages(@Language("Scala") scalaCode: String, javaCode: Option[String] = None): List[Message] = {
javaCode match {
case Some(s) => configureFromFileTextAdapter("dummy.java", s)
case _ =>
}
val annotator = new ScalaAnnotator() {}
val mock = new AnnotatorHolderMock
val parse: ScalaFile = parseText(scalaCode)
parse.depthFirst.foreach(annotator.annotate(_, mock))
mock.errorAnnotations.filter {
case Error(_, null) => false
case _ => true
}
}
def assertMatches[T](actual: T)(pattern: PartialFunction[T, Unit]) {
Assert.assertTrue("actual: " + actual.toString, pattern.isDefinedAt(actual))
}
def parseText(@Language("Scala") s: String): ScalaFile = {
PsiFileFactory.getInstance(getProjectAdapter)
.createFileFromText("foo" + ScalaFileType.DEFAULT_EXTENSION, ScalaFileType.SCALA_FILE_TYPE, s)
.asInstanceOf[ScalaFile]
}
val cannotResolveSymbol = ContainsPattern("Cannot resolve symbol")
val doesNotConform = ContainsPattern("doesn't conform to expected type")
val typeMismatch = ContainsPattern("Type mismatch")
val cannotResolveReference = ContainsPattern("Cannot resolve reference")
val doesNotTakeParameters = ContainsPattern("does not take parameters")
case class ContainsPattern(fr: String) {
def unapply(s: String) = s.contains(fr)
}
}
|
JetBrains/intellij-scala-historical
|
test/org/jetbrains/plugins/scala/annotator/SingleAbstractMethodTest.scala
|
Scala
|
apache-2.0
| 15,837 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp
import cc.factorie.util.BasicEvaluatableClustering
/**
* @author John Sullivan
*/
package object hcoref {
implicit class NodeListUtils[Vars <: NodeVariables[Vars]](val nodes:Iterable[Node[Vars]]) {
private val mentionToRoot = nodes.filter(_.isMention).map(m => m.uniqueId -> m.root.uniqueId)
def predictedClustering = new BasicEvaluatableClustering(mentionToRoot)
def toSingletons() {
nodes.foreach { node =>
node.alterParent(None)(null)
}
}
}
implicit class MentionListUtils[Vars <: NodeVariables[Vars]](val ments:Iterable[Node[Vars]]) extends AnyVal {
def roots = ments.map(_.root).toSet.toSeq
def nonMentionRoots = ments.map(_.root).filterNot(_.isMention).toSet.toSeq
}
implicit class NodeListGroundTruthUtils[Vars <: NodeVariables[Vars] with GroundTruth](val nodes:Iterable[Node[Vars]]) {
//this logic is ugly, but should always be correct for mentions
private lazy val mentionToTruth = nodes.filter(_.isMention)
.map(m => m.uniqueId -> m.variables.truth.iterator.next()._1)
def trueClustering = new BasicEvaluatableClustering(mentionToTruth)
def labeled = nodes.filter(_.variables.truth.size > 0)
}
}
|
patverga/factorie
|
src/main/scala/cc/factorie/app/nlp/hcoref/package.scala
|
Scala
|
apache-2.0
| 1,969 |
package ch.epfl.yinyang
package api
import language.experimental.macros
import scala.reflect.macros.blackbox.Context
/**
* Member method-based virtualization of the `AnyRef` API.
*
* This trait provides implementations of the infix methods
* corresponding to the `AnyRef` API that delegate to virtualized
* method calls on the first argument of the infix method.
*
* Example: When faced with an expression of the form `x.eq(y)`, the
* `ch.epfl.yinyang.transformers.LanguageVirtualization` transformation (or the
* `@virtualized` macro annotation) will generate a method call:
* `infix_eq(x, y)`. This method call will be bound to an
* implementation based on normal rules of scoping. If it binds to
* the one in this trait, the corresponding macro will rewrite it to
* `x.__eq(y)`.
*/
trait VirtualAnyRef extends VirtualAny {
import VirtualAnyRef._
// NOTE: Some of the signatures below have "by-val" arguments where
// one would expect "by-name" arguments. However, since these are
// all macros the difference is irrelevant. Furthermore, there's
// currently a bug precluding the use of "by-name" parameters in
// macros (See [[https://issues.scala-lang.org/browse/SI-5778
// SI-5778]]).
// Poor man's infix methods for `AnyRef` methods
def infix_eq(x1: AnyRef, x2: AnyRef): Boolean = macro anyRef_eq
def infix_ne(x1: AnyRef, x2: AnyRef): Boolean = macro anyRef_ne
def infix_notify(x: AnyRef): Unit = macro anyRef_notify
def infix_notifyAll(x: AnyRef): Unit = macro anyRef_notifyAll
def infix_synchronized[T](x: AnyRef, body: T): T = macro anyRef_synchronized[T]
def infix_wait(x: AnyRef): Unit = macro anyRef_wait0
def infix_wait(x: AnyRef, timeout: Long): Unit = macro anyRef_wait1
def infix_wait(x: AnyRef, timeout: Long, nanos: Int): Unit = macro anyRef_wait2
def infix_clone(x: AnyRef): AnyRef = macro anyRef_clone
def infix_finalize(x: AnyRef): Unit = macro anyRef_finalize
}
/**
* VirtualAnyRef companion object containing macro implementations.
*/
private object VirtualAnyRef {
def anyRef_eq(c: Context)(
x1: c.Expr[AnyRef], x2: c.Expr[AnyRef]): c.Expr[Boolean] = {
import c.universe._
c.Expr(q"$x1.__eq($x2)")
}
def anyRef_ne(c: Context)(
x1: c.Expr[AnyRef], x2: c.Expr[AnyRef]): c.Expr[Boolean] = {
import c.universe._
c.Expr(q"$x1.__ne($x2)")
}
def anyRef_notify(c: Context)(x: c.Expr[AnyRef]): c.Expr[Unit] = {
import c.universe._
c.Expr(q"$x.__notify()")
}
def anyRef_notifyAll(c: Context)(x: c.Expr[AnyRef]): c.Expr[Unit] = {
import c.universe._
c.Expr(q"$x.__notifyAll()")
}
def anyRef_synchronized[T](c: Context)(
x: c.Expr[AnyRef], body: c.Expr[T]): c.Expr[T] = {
import c.universe._
c.Expr(q"$x.__synchronized($body)")
}
def anyRef_wait0(c: Context)(x: c.Expr[AnyRef]): c.Expr[Unit] = {
import c.universe._
c.Expr(q"$x.__wait()")
}
def anyRef_wait1(c: Context)(
x: c.Expr[AnyRef], timeout: c.Expr[Long]): c.Expr[Unit] = {
import c.universe._
c.Expr(q"$x.__wait($timeout)")
}
def anyRef_wait2(c: Context)(
x: c.Expr[AnyRef], timeout: c.Expr[Long],
nanos: c.Expr[Int]): c.Expr[Unit] = {
import c.universe._
c.Expr(q"$x.__wait($timeout, $nanos)")
}
def anyRef_clone(c: Context)(x: c.Expr[AnyRef]): c.Expr[AnyRef] = {
import c.universe._
c.Expr(q"$x.__clone()")
}
def anyRef_finalize(c: Context)(x: c.Expr[AnyRef]): c.Expr[Unit] = {
import c.universe._
c.Expr(q"$x.__finalize()")
}
}
|
vjovanov/scala-yinyang
|
components/yin-yang/src/api/VirtualAnyRef.scala
|
Scala
|
bsd-3-clause
| 3,527 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.yggdrasil.bytecode
trait Instructions {
type Lib <: Library
val library: Lib
object instructions extends InstructionSet[library.type](library)
}
class InstructionSet[Lib <: Library](val library: Lib) {
import library._
sealed trait Instruction
object RootInstr {
def unapply(in: Instruction): Boolean = in match {
case _: PushString => true
case _: PushNum => true
case PushTrue => true
case PushFalse => true
case PushNull => true
case PushObject => true
case PushArray => true
case _ => false
}
}
sealed trait JoinInstr extends Instruction
final case class Map2Cross(op: BinaryOperation) extends JoinInstr
final case class Map2Match(op: BinaryOperation) extends JoinInstr
final case object Assert extends JoinInstr
final case object IIntersect extends JoinInstr
final case object IUnion extends JoinInstr
final case object Observe extends JoinInstr
final case object SetDifference extends JoinInstr
sealed trait DataInstr extends Instruction
final case class Line(line: Int, col: Int, text: String) extends DataInstr { override def toString = s"<$line:$col>" }
final case class PushNum(num: String) extends DataInstr
final case class PushString(str: String) extends DataInstr
final case class Swap(depth: Int) extends DataInstr
final case object FilterCross extends DataInstr
final case object FilterMatch extends DataInstr
final case class Group(id: Int) extends Instruction
final case class KeyPart(id: Int) extends Instruction
final case class Map1(op: UnaryOperation) extends Instruction
final case class MergeBuckets(and: Boolean) extends Instruction
final case class Morph1(m1: BuiltInMorphism1) extends Instruction
final case class Morph2(m2: BuiltInMorphism2) extends Instruction
final case class PushGroup(id: Int) extends Instruction
final case class PushKey(id: Int) extends Instruction
final case class Reduce(red: BuiltInReduction) extends Instruction
final case object AbsoluteLoad extends Instruction
final case object Distinct extends Instruction
final case object Drop extends Instruction
final case object Dup extends Instruction
final case object Extra extends Instruction
final case object Merge extends Instruction
final case object PushArray extends Instruction
final case object PushFalse extends Instruction
final case object PushNull extends Instruction
final case object PushObject extends Instruction
final case object PushTrue extends Instruction
final case object PushUndefined extends Instruction
final case object RelativeLoad extends Instruction
final case object Split extends Instruction
private def DateNumUnion = JUnionT(JNumberT, JDateT)
private def BinOpType(tp: JType) = BinaryOperationType(tp, tp, tp)
import JType.JUniverseT
sealed abstract class UnaryOperation(val tpe: UnaryOperationType)
sealed abstract class BinaryOperation(val tpe: BinaryOperationType)
sealed abstract class NumericBinaryOperation extends BinaryOperation(BinOpType(JNumberT))
sealed abstract class NumericComparisonOperation extends BinaryOperation(BinaryOperationType(DateNumUnion, DateNumUnion, JBooleanT))
sealed abstract class BooleanBinaryOperation extends BinaryOperation(BinOpType(JBooleanT))
sealed abstract class EqualityOperation extends BinaryOperation(BinaryOperationType(JUniverseT, JUniverseT, JBooleanT))
final case class BuiltInFunction1Op(op: Op1) extends UnaryOperation(op.tpe)
final case class BuiltInFunction2Op(op: Op2) extends BinaryOperation(op.tpe)
final case class BuiltInMorphism1(mor: Morphism1) extends UnaryOperation(mor.tpe)
final case class BuiltInMorphism2(mor: Morphism2) extends BinaryOperation(mor.tpe)
final case class BuiltInReduction(red: Reduction) extends UnaryOperation(red.tpe)
final case object Add extends NumericBinaryOperation
final case object And extends BooleanBinaryOperation
final case object ArraySwap extends BinaryOperation(BinaryOperationType(JArrayUnfixedT, JNumberT, JArrayUnfixedT))
final case object Comp extends UnaryOperation(UnaryOperationType(JBooleanT, JBooleanT))
final case object DerefArray extends BinaryOperation(BinaryOperationType(JArrayUnfixedT, JNumberT, JUniverseT))
final case object DerefMetadata extends BinaryOperation(BinOpType(JUniverseT))
final case object DerefObject extends BinaryOperation(BinaryOperationType(JObjectUnfixedT, JTextT, JUniverseT))
final case object Div extends NumericBinaryOperation
final case object Eq extends EqualityOperation
final case object Gt extends NumericComparisonOperation
final case object GtEq extends NumericComparisonOperation
final case object JoinArray extends BinaryOperation(BinOpType(JArrayUnfixedT))
final case object JoinObject extends BinaryOperation(BinOpType(JObjectUnfixedT))
final case object Lt extends NumericComparisonOperation
final case object LtEq extends NumericComparisonOperation
final case object Mod extends NumericBinaryOperation
final case object Mul extends NumericBinaryOperation
final case object Neg extends UnaryOperation(UnaryOperationType(JNumberT, JNumberT))
final case object New extends UnaryOperation(UnaryOperationType(JUniverseT, JUniverseT))
final case object NotEq extends EqualityOperation
final case object Or extends BooleanBinaryOperation
final case object Pow extends NumericBinaryOperation
final case object Sub extends NumericBinaryOperation
final case object WrapArray extends UnaryOperation(UnaryOperationType(JUniverseT, JArrayUnfixedT))
final case object WrapObject extends BinaryOperation(BinaryOperationType(JTextT, JUniverseT, JObjectUnfixedT))
}
|
drostron/quasar
|
yggdrasil/src/main/scala/quasar/yggdrasil/bytecode/Instructions.scala
|
Scala
|
apache-2.0
| 7,171 |
package com.github.btmorr
package tutorials.spark.stream
/* This version of the app makes repeated requests to the API on a timer and processes the result as a stream.
*/
object NewsStream extends App {
import tutorials.spark.step0.ApiOps._
import tutorials.spark.step0.SparkInit
import tutorials.spark.step1.Schemas.Ops._
// Before running this app, the NEWSAPI_KEY environment variable must be
val newsApiKey = sys.env.getOrElse( "NEWSAPI_KEY", throw new Exception( "NEWSAPI_KEY environment variable must be set before running this application" ) )
val batchFrequencySeconds = 5
val ssc = SparkInit.streamingContext( batchFrequencySeconds )
val source = "bbc-news"
val requestString = s"https://newsapi.org/v1/articles?source=$source&sortBy=top&apiKey=$newsApiKey"
// make a request to NewsAPI once per minute
val requestFrequencySeconds = 10
val uriStream = ssc.receiverStream(new PollingSource(requestFrequencySeconds, requestString))
val articleStream = for {
uri <- uriStream
respString = makeNewsApiRequest( uri )
respObj = deserialize(respString)
article <- respObj.articles
} yield article
// todo: replace this print with an updateIfNotExists to a database
articleStream.foreachRDD( rdd => {
rdd collect() foreach prettyPrintArticle
})
ssc.start()
/* `awaitTerminationOrTimeout` times out after the specified amount of time, even if operations are being conducted.
* Using `awaitTermination` to allow the operation to run until Ctl-C is pressed.
*/
//ssc.awaitTerminationOrTimeout(10 * 5 * 1000)
ssc.awaitTermination
val stopSparkContext = true
val shutdownGracefully = true
ssc.stop(stopSparkContext, shutdownGracefully)
}
|
btmorr/scala-boost
|
src/main/scala/tutorials/spark/stream/NewsStream.scala
|
Scala
|
mit
| 1,718 |
package be.cmpg.walk
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import be.cmpg.graph.Gene
import be.cmpg.graph.Interaction
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class PathSpecification extends Specification {
"A Path" should {
"represent a randomwalk state in the current network" in {
"it should be empty on initialization" in {
val path = new Path(startGene = Gene("from"))
path.getVisitedInteractions() must have size(0)
path.currentEndpoint must beEqualTo(Gene("from"))
path.size() must beEqualTo(0)
}
"it should be expandable only when it can take an interaction" in {
val path = new Path(Gene("from"))
val firstInteraction = Interaction(Gene("from"), Gene("to"), "typ", probability = 1)
path.canTakeInteraction(firstInteraction) must beTrue
path.expand(firstInteraction)
path.getVisitedInteractions must contain(allOf(firstInteraction))
path.currentEndpoint must beEqualTo(Gene("to"))
path.size() must beEqualTo(1)
}
"it should throw an IllegalArgumentException when an element is added that cannot used to expand the RandomWalk" in {
val walk = new Path(Gene("from"))
val firstInteraction = Interaction(Gene("from"), Gene("to"), "typ", probability = 1)
walk.expand(firstInteraction)
"when it revisits a node already visited by the RandomWalk" in {
val secondInteraction = Interaction(Gene("to"), Gene("from"), "typ", probability = 1)
walk.canTakeInteraction(secondInteraction) must beFalse
walk.expand(secondInteraction) must throwA[IllegalArgumentException]
}
"when an interaction is added which is not connected with the current endpoint" in {
val secondInteraction = Interaction(Gene("to_1"), Gene("from_1"), "typ", probability = 1)
walk.canTakeInteraction(secondInteraction) must beFalse
walk.expand(secondInteraction) must throwA[IllegalArgumentException]
}
}
"it should be resetable" in {
val walk = new Path(Gene("from"))
val firstInteraction = Interaction(Gene("from"), Gene("to"), "typ", probability = 1)
walk.expand(firstInteraction)
walk.reset
walk.currentEndpoint must beEqualTo(Gene("from"))
walk.getVisitedInteractions must have size(0)
walk.visitedGenes must contain (allOf(Gene("from")))
}
}
}
}
|
spulido99/SSA
|
src/test/scala/be/cmpg/walk/PathSpecification.scala
|
Scala
|
gpl-2.0
| 2,515 |
package object hatedabot{
type BLOG_URL = String
}
|
xuwei-k/hatedabot
|
src/main/scala/package.scala
|
Scala
|
mit
| 58 |
package colang.ast.parsed.expression
import colang.ast.parsed.{Function, LocalContext, Scope, Type}
import colang.ast.raw.{expression => raw}
import colang.issues.{Issue, Issues, Terms}
/**
* Represents a function call.
* @param function called function
* @param arguments function arguments
*/
case class FunctionCall(function: Function,
arguments: Seq[Expression],
rawNode: Option[raw.Expression]) extends Expression {
val type_ = function.returnType
}
object FunctionCall {
// Note that method and constructor calls are also represented by raw.FunctionCall objects.
// This function correctly handles them, creating appropriate MethodCall and ConstructorCall expressions.
def analyze(rawExpr: raw.FunctionCall)(implicit scope: Scope, localContext: LocalContext): (Expression, Seq[Issue]) = {
val function = rawExpr.function
val args = rawExpr.arguments.args
val (parsedFunction, functionIssues) = Expression.analyze(function)
val argsResult = args map Expression.analyze
val parsedArgs = argsResult map { _._1 }
val argsIssues = argsResult flatMap { _._2 }
parsedFunction match {
// Function calls:
case OverloadedFunctionReference(of, _) =>
val (overloadOption, overloadingIssues) = of.resolveOverload(parsedArgs map { _.type_ }, Some(rawExpr.source))
val result = overloadOption match {
case Some(overload) =>
val functionArgs = Type.performImplicitConversions(parsedArgs, overload.parameters map { _.type_ })
FunctionCall(overload, functionArgs, Some(rawExpr))
case None => InvalidExpression()
}
(result, functionIssues ++ argsIssues ++ overloadingIssues)
case FunctionReference(f, _) if f.canBeAppliedTo(parsedArgs map { _.type_ }) =>
val functionArgs = Type.performImplicitConversions(parsedArgs, f.parameters map { _.type_ })
(FunctionCall(f, functionArgs, Some(rawExpr)), functionIssues ++ argsIssues)
case FunctionReference(f, _) =>
val argTypeNames = parsedArgs map { _.type_.qualifiedName }
val issue = Issues.InvalidCallArguments(rawExpr.source, (Terms.Function, argTypeNames))
(InvalidExpression(), functionIssues ++ argsIssues :+ issue)
// Method calls:
case OverloadedMethodAccess(instance, om, _) =>
val (overloadOption, overloadingIssues) = om.resolveOverload(parsedArgs map { _.type_ }, Some(rawExpr.source))
val result = overloadOption match {
case Some(overload) =>
val methodArgs = Type.performImplicitConversions(parsedArgs, overload.parameters map { _.type_ })
MethodCall(overload, instance, methodArgs, Some(rawExpr))
case None => InvalidExpression()
}
(result, functionIssues ++ argsIssues ++ overloadingIssues)
case MethodAccess(instance, m, _) if m.canBeAppliedTo(parsedArgs map { _.type_ }) =>
val methodArgs = Type.performImplicitConversions(parsedArgs, m.parameters map { _.type_ })
(MethodCall(m, instance, methodArgs, Some(rawExpr)), functionIssues ++ argsIssues)
case MethodAccess(instance, m, _) =>
val argTypeNames = parsedArgs map { _.type_.qualifiedName }
val issue = Issues.InvalidCallArguments(rawExpr.source, (Terms.Method, argTypeNames))
(InvalidExpression(), functionIssues ++ argsIssues :+ issue)
// Constructor calls:
case TypeReference(type_, _) =>
val (constructorOption, overloadingIssues) = type_.resolveConstructor(parsedArgs map { _.type_ }, Some(rawExpr.source))
val result = constructorOption match {
case Some(constructor) =>
val constructorArgs = Type.performImplicitConversions(parsedArgs, constructor.parameters map { _.type_ })
ConstructorCall(constructor, constructorArgs, Some(rawExpr))
case None => InvalidExpression()
}
(result, functionIssues ++ argsIssues ++ overloadingIssues)
case _ =>
val issue = Issues.ExpressionIsNotCallable(function.source, ())
(InvalidExpression(), functionIssues ++ argsIssues :+ issue)
}
}
}
|
psenchanka/colang
|
src/main/scala/colang/ast/parsed/expression/FunctionCall.scala
|
Scala
|
mit
| 4,191 |
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2015 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.dcop.algorithms
import com.signalcollect.dcop.modules._
import com.signalcollect.dcop.evaluation._
import com.signalcollect._
import com.signalcollect.configuration.ExecutionMode
class TargetFunctionTemplate(
val name: String,
val state: String,
val extraDefs: Map[String, List[Double]])
//class StateTemplate {
// def name: String
//}
class AdjustmentScheduleTemplate(
val name: String,
val extraDefs: Map[String, List[Double]])
class DecisionRuleTemplate(
val name: String,
val terminationRule: String,
val extraDefs: Map[String, List[Double]])
object AlgorithmsGenerator extends App {
val targetFunctions: List[TargetFunctionTemplate] = List(
new TargetFunctionTemplate("MemoryLessTargetFunction", "SimpleState", Map.empty),
new TargetFunctionTemplate("AverageExpectedUtilityTargetFunction", "SimpleMemoryState", Map.empty),
new TargetFunctionTemplate("WeightedExpectedUtilityTargetFunction", "SimpleMemoryState", Map(("rho", List(0.2, 0.4, 0.6, 0.8)))),
new TargetFunctionTemplate("AverageRegretsTargetFunction", "SimpleMemoryState", Map.empty),
new TargetFunctionTemplate("DiscountedAverageRegretsTargetFunction", "SimpleMemoryState", Map(("rho", List(0.2, 0.4, 0.6, 0.8)))) //new TargetFunctionTemplate("FictitiousPlayTargetFunction", "NeighborMemoryState"),
)
val adjustmentSchedulesSync: List[AdjustmentScheduleTemplate] = List(
new AdjustmentScheduleTemplate("ParallelRandomAdjustmentSchedule", Map(("changeProbability", List(0.2, 0.4, 0.6, 0.8)))),
new AdjustmentScheduleTemplate("FloodAdjustmentSchedule", Map.empty) // new AdjustmentScheduleTemplate("MaximumGainSchedule", Map.empty),
// new AdjustmentScheduleTemplate("SequentialRandomSchedule", Map.empty)
)
val adjustmentSchedulesAsync: List[AdjustmentScheduleTemplate] = List(
new AdjustmentScheduleTemplate("ParallelRandomAdjustmentSchedule", Map(("changeProbability", List(0.95)))),
new AdjustmentScheduleTemplate("FloodAdjustmentSchedule", Map.empty) // new AdjustmentScheduleTemplate("MaximumGainSchedule", Map.empty),
// new AdjustmentScheduleTemplate("SequentialRandomSchedule", Map.empty)
)
val decisionRules: List[DecisionRuleTemplate] = List(
new DecisionRuleTemplate("ArgmaxADecisionRule", "NashEquilibriumConvergence", Map.empty),
new DecisionRuleTemplate("ArgmaxBDecisionRule", "NashEquilibriumConvergence", Map.empty),
new DecisionRuleTemplate("EpsilonGreedyDecisionRule", "NashEquilibriumConvergence", Map(("epsilon", List(0.001, 0.01, 0.1)))),
new DecisionRuleTemplate("SimulatedAnnealingDecisionRule", "SimulatedAnnealingConvergence", Map(("const", List(1000, 1)), ("k", List(2)), ("negDeltaMax", List(-0.01, -0.0001)))),
new DecisionRuleTemplate("LinearProbabilisticDecisionRule", "DistributionConvergence", Map.empty) // new DecisionRuleTemplate("LogisticDecisionRule", "DistributionConvergence", Map(("eta", List(1, 100, 10000))))
)
def combine(a: List[List[(String, Double)]], b: (String, List[Double])): List[List[(String, Double)]] = {
a match {
case head :: tail => for (aElem <- a; bElem <- b._2) yield (b._1, bElem) :: aElem
case other => for (bElem <- b._2) yield List((b._1, bElem))
}
}
val algorithmsFile = new java.io.FileWriter("src/main/scala/com/signalcollect/dcop/algorithms/MixedAlgorithms.scala")
val listFile = new java.io.FileWriter("src/main/scala/com/signalcollect/dcop/algorithms/AlgorithmsList.scala")
algorithmsFile.write("""
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2015 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* File automatically generated with "AlgorithmsGenerator"
*/
package com.signalcollect.dcop.algorithms
import com.signalcollect.dcop.modules._
import com.signalcollect.dcop.evaluation._
import com.signalcollect._
import com.signalcollect.configuration.ExecutionMode
""")
listFile.write("""
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2015 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* File automatically generated with "AlgorithmsGenerator"
*/
package com.signalcollect.dcop.algorithms
import com.signalcollect.dcop.graph._
import com.signalcollect.dcop.modules._
import com.signalcollect.dcop.algorithms._
import com.signalcollect.dcop.evaluation._
import com.signalcollect._
import com.signalcollect.configuration.ExecutionMode
object MixedAlgorithmList {
val algorithmsSync = List(
""")
def printClass(
file1: java.io.FileWriter,
file2: java.io.FileWriter,
t: TargetFunctionTemplate,
a: AdjustmentScheduleTemplate,
d: DecisionRuleTemplate,
isEnd: Boolean,
isSecondTime: Boolean) = {
val allParams = (t.extraDefs ++ a.extraDefs ++ d.extraDefs).toList
val allCombinations: List[List[(String, Double)]] = allParams.foldLeft(List[List[(String, Double)]]())((a, b) => combine(a, b))
var counter = 0
for (comb <- allCombinations) {
counter += 1
val classNameShort = s"${t.name}_${d.name}_${a.name}"
var className = s""""$classNameShort""""
var defs = ""
var parListClass = ""
var parListList = ""
for (par <- comb) {
className = className + s"""+"${par._1}" + par${par._1} + "" """ //par._2.toString.replaceAll(".", "")
defs = defs + s" def ${par._1} = par${par._1} \\n"
parListClass = parListClass + s"par${par._1}: Double, "
parListList = parListList + s"par${par._1} = ${par._2}, "
}
parListClass = parListClass.subSequence(0, parListClass.size - 2).toString
parListList = parListList.subSequence(0, parListList.size - 2).toString
defs = defs + s""" def algorithmName = $className"""
val theState = if (t.state == "SimpleState" && d.name == "SimulatedAnnealingDecisionRule") {
"SimpleNumberOfCollectsState"
} else if (d.name == "LinearProbabilisticDecisionRule") {
"ExtendedMemoryState"
} else { t.state }
if (counter == allCombinations.size && !isSecondTime)
file1.write(s"""class $classNameShort($parListClass) extends IntAlgorithm
with ${theState}
with VertexColoringUtility
with ${a.name}
with ${d.name}
with ${d.terminationRule}
with ${if (t.name == "MemoryLessTargetFunction" && d.name == "SimulatedAnnealingDecisionRule") "NumberOfCollectsTargetFunction" else t.name}
with SignalCollectAlgorithmBridge
with Execution {
${defs}
}
""")
if (isEnd && counter == allCombinations.size) {
file2.write(s" new $classNameShort($parListList) \\n")
} else {
file2.write(s" new $classNameShort($parListList), \\n")
}
}
}
var counter = 0
val numberOfSync = targetFunctions.size * decisionRules.size * adjustmentSchedulesSync.size
for (asSync <- adjustmentSchedulesSync) {
for (d <- decisionRules) {
for (t <- targetFunctions) {
counter += 1
printClass(algorithmsFile, listFile, t, asSync, d, counter == numberOfSync, isSecondTime = false)
}
}
}
counter = 0
listFile.write("""
)
val algorithmsAsync = List(
""")
for (asAsync <- adjustmentSchedulesAsync) {
for (d <- decisionRules) {
for (t <- targetFunctions) {
counter += 1
printClass(algorithmsFile, listFile, t, asAsync, d, counter == numberOfSync, isSecondTime = true)
}
}
}
listFile.write(""")
}""")
algorithmsFile.close()
listFile.close()
}
|
elaverman/cuilt
|
src/main/scala/com/signalcollect/dcop/algorithms/AlgorithmsGenerator.scala
|
Scala
|
apache-2.0
| 9,435 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import scala.language.existentials
import org.apache.spark.api.java.function.MapFunction
import org.apache.spark.api.r._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.api.r.SQLUtils._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.objects.Invoke
import org.apache.spark.sql.catalyst.plans.logical.FunctionUtils
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState
import org.apache.spark.sql.execution.streaming.GroupStateImpl
import org.apache.spark.sql.streaming.GroupStateTimeout
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* Physical version of `ObjectProducer`.
*/
trait ObjectProducerExec extends SparkPlan {
// The attribute that reference to the single object field this operator outputs.
protected def outputObjAttr: Attribute
override def output: Seq[Attribute] = outputObjAttr :: Nil
override def producedAttributes: AttributeSet = AttributeSet(outputObjAttr)
def outputObjectType: DataType = outputObjAttr.dataType
}
/**
* Physical version of `ObjectConsumer`.
*/
trait ObjectConsumerExec extends UnaryExecNode {
assert(child.output.length == 1)
// This operator always need all columns of its child, even it doesn't reference to.
override def references: AttributeSet = child.outputSet
def inputObjectType: DataType = child.output.head.dataType
}
/**
* Takes the input row from child and turns it into object using the given deserializer expression.
* The output of this operator is a single-field safe row containing the deserialized object.
*/
case class DeserializeToObjectExec(
deserializer: Expression,
outputObjAttr: Attribute,
child: SparkPlan) extends UnaryExecNode with ObjectProducerExec with CodegenSupport {
override def outputPartitioning: Partitioning = child.outputPartitioning
override def inputRDDs(): Seq[RDD[InternalRow]] = {
child.asInstanceOf[CodegenSupport].inputRDDs()
}
protected override def doProduce(ctx: CodegenContext): String = {
child.asInstanceOf[CodegenSupport].produce(ctx, this)
}
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
val bound = ExpressionCanonicalizer.execute(
BindReferences.bindReference(deserializer, child.output))
ctx.currentVars = input
val resultVars = bound.genCode(ctx) :: Nil
consume(ctx, resultVars)
}
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitionsWithIndexInternal { (index, iter) =>
val projection = GenerateSafeProjection.generate(deserializer :: Nil, child.output)
projection.initialize(index)
iter.map(projection)
}
}
}
/**
* Takes the input object from child and turns in into unsafe row using the given serializer
* expression. The output of its child must be a single-field row containing the input object.
*/
case class SerializeFromObjectExec(
serializer: Seq[NamedExpression],
child: SparkPlan) extends ObjectConsumerExec with CodegenSupport {
override def output: Seq[Attribute] = serializer.map(_.toAttribute)
override def outputPartitioning: Partitioning = child.outputPartitioning
override def inputRDDs(): Seq[RDD[InternalRow]] = {
child.asInstanceOf[CodegenSupport].inputRDDs()
}
protected override def doProduce(ctx: CodegenContext): String = {
child.asInstanceOf[CodegenSupport].produce(ctx, this)
}
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
val bound = serializer.map { expr =>
ExpressionCanonicalizer.execute(BindReferences.bindReference(expr, child.output))
}
ctx.currentVars = input
val resultVars = bound.map(_.genCode(ctx))
consume(ctx, resultVars)
}
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitionsWithIndexInternal { (index, iter) =>
val projection = UnsafeProjection.create(serializer)
projection.initialize(index)
iter.map(projection)
}
}
}
/**
* Helper functions for physical operators that work with user defined objects.
*/
object ObjectOperator {
def deserializeRowToObject(
deserializer: Expression,
inputSchema: Seq[Attribute]): InternalRow => Any = {
val proj = GenerateSafeProjection.generate(deserializer :: Nil, inputSchema)
(i: InternalRow) => proj(i).get(0, deserializer.dataType)
}
def deserializeRowToObject(deserializer: Expression): InternalRow => Any = {
val proj = GenerateSafeProjection.generate(deserializer :: Nil)
(i: InternalRow) => proj(i).get(0, deserializer.dataType)
}
def serializeObjectToRow(serializer: Seq[Expression]): Any => UnsafeRow = {
val proj = GenerateUnsafeProjection.generate(serializer)
val objType = serializer.head.collect { case b: BoundReference => b.dataType }.head
val objRow = new SpecificInternalRow(objType :: Nil)
(o: Any) => {
objRow(0) = o
proj(objRow)
}
}
def wrapObjectToRow(objType: DataType): Any => InternalRow = {
val outputRow = new SpecificInternalRow(objType :: Nil)
(o: Any) => {
outputRow(0) = o
outputRow
}
}
def unwrapObjectFromRow(objType: DataType): InternalRow => Any = {
(i: InternalRow) => i.get(0, objType)
}
}
/**
* Applies the given function to input object iterator.
* The output of its child must be a single-field row containing the input object.
*/
case class MapPartitionsExec(
func: Iterator[Any] => Iterator[Any],
outputObjAttr: Attribute,
child: SparkPlan)
extends ObjectConsumerExec with ObjectProducerExec {
override def outputPartitioning: Partitioning = child.outputPartitioning
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitionsInternal { iter =>
val getObject = ObjectOperator.unwrapObjectFromRow(child.output.head.dataType)
val outputObject = ObjectOperator.wrapObjectToRow(outputObjAttr.dataType)
func(iter.map(getObject)).map(outputObject)
}
}
}
/**
* Applies the given function to each input object.
* The output of its child must be a single-field row containing the input object.
*
* This operator is kind of a safe version of [[ProjectExec]], as its output is custom object,
* we need to use safe row to contain it.
*/
case class MapElementsExec(
func: AnyRef,
outputObjAttr: Attribute,
child: SparkPlan)
extends ObjectConsumerExec with ObjectProducerExec with CodegenSupport {
override def inputRDDs(): Seq[RDD[InternalRow]] = {
child.asInstanceOf[CodegenSupport].inputRDDs()
}
protected override def doProduce(ctx: CodegenContext): String = {
child.asInstanceOf[CodegenSupport].produce(ctx, this)
}
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
val (funcClass, methodName) = func match {
case m: MapFunction[_, _] => classOf[MapFunction[_, _]] -> "call"
case _ => FunctionUtils.getFunctionOneName(outputObjAttr.dataType, child.output(0).dataType)
}
val funcObj = Literal.create(func, ObjectType(funcClass))
val callFunc = Invoke(funcObj, methodName, outputObjAttr.dataType, child.output)
val bound = ExpressionCanonicalizer.execute(
BindReferences.bindReference(callFunc, child.output))
ctx.currentVars = input
val resultVars = bound.genCode(ctx) :: Nil
consume(ctx, resultVars)
}
override protected def doExecute(): RDD[InternalRow] = {
val callFunc: Any => Any = func match {
case m: MapFunction[_, _] => i => m.asInstanceOf[MapFunction[Any, Any]].call(i)
case _ => func.asInstanceOf[Any => Any]
}
child.execute().mapPartitionsInternal { iter =>
val getObject = ObjectOperator.unwrapObjectFromRow(child.output.head.dataType)
val outputObject = ObjectOperator.wrapObjectToRow(outputObjAttr.dataType)
iter.map(row => outputObject(callFunc(getObject(row))))
}
}
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def outputPartitioning: Partitioning = child.outputPartitioning
}
/**
* Applies the given function to each input row, appending the encoded result at the end of the row.
*/
case class AppendColumnsExec(
func: Any => Any,
deserializer: Expression,
serializer: Seq[NamedExpression],
child: SparkPlan) extends UnaryExecNode {
override def output: Seq[Attribute] = child.output ++ serializer.map(_.toAttribute)
override def outputPartitioning: Partitioning = child.outputPartitioning
private def newColumnSchema = serializer.map(_.toAttribute).toStructType
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitionsInternal { iter =>
val getObject = ObjectOperator.deserializeRowToObject(deserializer, child.output)
val combiner = GenerateUnsafeRowJoiner.create(child.schema, newColumnSchema)
val outputObject = ObjectOperator.serializeObjectToRow(serializer)
iter.map { row =>
val newColumns = outputObject(func(getObject(row)))
combiner.join(row.asInstanceOf[UnsafeRow], newColumns): InternalRow
}
}
}
}
/**
* An optimized version of [[AppendColumnsExec]], that can be executed
* on deserialized object directly.
*/
case class AppendColumnsWithObjectExec(
func: Any => Any,
inputSerializer: Seq[NamedExpression],
newColumnsSerializer: Seq[NamedExpression],
child: SparkPlan) extends ObjectConsumerExec {
override def output: Seq[Attribute] = (inputSerializer ++ newColumnsSerializer).map(_.toAttribute)
override def outputPartitioning: Partitioning = child.outputPartitioning
private def inputSchema = inputSerializer.map(_.toAttribute).toStructType
private def newColumnSchema = newColumnsSerializer.map(_.toAttribute).toStructType
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitionsInternal { iter =>
val getChildObject = ObjectOperator.unwrapObjectFromRow(child.output.head.dataType)
val outputChildObject = ObjectOperator.serializeObjectToRow(inputSerializer)
val outputNewColumnOjb = ObjectOperator.serializeObjectToRow(newColumnsSerializer)
val combiner = GenerateUnsafeRowJoiner.create(inputSchema, newColumnSchema)
iter.map { row =>
val childObj = getChildObject(row)
val newColumns = outputNewColumnOjb(func(childObj))
combiner.join(outputChildObject(childObj), newColumns): InternalRow
}
}
}
}
/**
* Groups the input rows together and calls the function with each group and an iterator containing
* all elements in the group. The result of this function is flattened before being output.
*/
case class MapGroupsExec(
func: (Any, Iterator[Any]) => TraversableOnce[Any],
keyDeserializer: Expression,
valueDeserializer: Expression,
groupingAttributes: Seq[Attribute],
dataAttributes: Seq[Attribute],
outputObjAttr: Attribute,
child: SparkPlan) extends UnaryExecNode with ObjectProducerExec {
override def outputPartitioning: Partitioning = child.outputPartitioning
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(groupingAttributes) :: Nil
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
Seq(groupingAttributes.map(SortOrder(_, Ascending)))
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitionsInternal { iter =>
val grouped = GroupedIterator(iter, groupingAttributes, child.output)
val getKey = ObjectOperator.deserializeRowToObject(keyDeserializer, groupingAttributes)
val getValue = ObjectOperator.deserializeRowToObject(valueDeserializer, dataAttributes)
val outputObject = ObjectOperator.wrapObjectToRow(outputObjAttr.dataType)
grouped.flatMap { case (key, rowIter) =>
val result = func(
getKey(key),
rowIter.map(getValue))
result.map(outputObject)
}
}
}
}
object MapGroupsExec {
def apply(
func: (Any, Iterator[Any], LogicalGroupState[Any]) => TraversableOnce[Any],
keyDeserializer: Expression,
valueDeserializer: Expression,
groupingAttributes: Seq[Attribute],
dataAttributes: Seq[Attribute],
outputObjAttr: Attribute,
timeoutConf: GroupStateTimeout,
child: SparkPlan): MapGroupsExec = {
val f = (key: Any, values: Iterator[Any]) => {
func(key, values, GroupStateImpl.createForBatch(timeoutConf))
}
new MapGroupsExec(f, keyDeserializer, valueDeserializer,
groupingAttributes, dataAttributes, outputObjAttr, child)
}
}
/**
* Groups the input rows together and calls the R function with each group and an iterator
* containing all elements in the group.
* The result of this function is flattened before being output.
*/
case class FlatMapGroupsInRExec(
func: Array[Byte],
packageNames: Array[Byte],
broadcastVars: Array[Broadcast[Object]],
inputSchema: StructType,
outputSchema: StructType,
keyDeserializer: Expression,
valueDeserializer: Expression,
groupingAttributes: Seq[Attribute],
dataAttributes: Seq[Attribute],
outputObjAttr: Attribute,
child: SparkPlan) extends UnaryExecNode with ObjectProducerExec {
override def output: Seq[Attribute] = outputObjAttr :: Nil
override def outputPartitioning: Partitioning = child.outputPartitioning
override def producedAttributes: AttributeSet = AttributeSet(outputObjAttr)
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(groupingAttributes) :: Nil
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
Seq(groupingAttributes.map(SortOrder(_, Ascending)))
override protected def doExecute(): RDD[InternalRow] = {
val isSerializedRData =
if (outputSchema == SERIALIZED_R_DATA_SCHEMA) true else false
val serializerForR = if (!isSerializedRData) {
SerializationFormats.ROW
} else {
SerializationFormats.BYTE
}
child.execute().mapPartitionsInternal { iter =>
val grouped = GroupedIterator(iter, groupingAttributes, child.output)
val getKey = ObjectOperator.deserializeRowToObject(keyDeserializer, groupingAttributes)
val getValue = ObjectOperator.deserializeRowToObject(valueDeserializer, dataAttributes)
val outputObject = ObjectOperator.wrapObjectToRow(outputObjAttr.dataType)
val runner = new RRunner[Array[Byte]](
func, SerializationFormats.ROW, serializerForR, packageNames, broadcastVars,
isDataFrame = true, colNames = inputSchema.fieldNames,
mode = RRunnerModes.DATAFRAME_GAPPLY)
val groupedRBytes = grouped.map { case (key, rowIter) =>
val deserializedIter = rowIter.map(getValue)
val newIter =
deserializedIter.asInstanceOf[Iterator[Row]].map { row => rowToRBytes(row) }
val newKey = rowToRBytes(getKey(key).asInstanceOf[Row])
(newKey, newIter)
}
val outputIter = runner.compute(groupedRBytes, -1)
if (!isSerializedRData) {
val result = outputIter.map { bytes => bytesToRow(bytes, outputSchema) }
result.map(outputObject)
} else {
val result = outputIter.map { bytes => Row.fromSeq(Seq(bytes)) }
result.map(outputObject)
}
}
}
}
/**
* Co-groups the data from left and right children, and calls the function with each group and 2
* iterators containing all elements in the group from left and right side.
* The result of this function is flattened before being output.
*/
case class CoGroupExec(
func: (Any, Iterator[Any], Iterator[Any]) => TraversableOnce[Any],
keyDeserializer: Expression,
leftDeserializer: Expression,
rightDeserializer: Expression,
leftGroup: Seq[Attribute],
rightGroup: Seq[Attribute],
leftAttr: Seq[Attribute],
rightAttr: Seq[Attribute],
outputObjAttr: Attribute,
left: SparkPlan,
right: SparkPlan) extends BinaryExecNode with ObjectProducerExec {
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(leftGroup) :: ClusteredDistribution(rightGroup) :: Nil
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
leftGroup.map(SortOrder(_, Ascending)) :: rightGroup.map(SortOrder(_, Ascending)) :: Nil
override protected def doExecute(): RDD[InternalRow] = {
left.execute().zipPartitions(right.execute()) { (leftData, rightData) =>
val leftGrouped = GroupedIterator(leftData, leftGroup, left.output)
val rightGrouped = GroupedIterator(rightData, rightGroup, right.output)
val getKey = ObjectOperator.deserializeRowToObject(keyDeserializer, leftGroup)
val getLeft = ObjectOperator.deserializeRowToObject(leftDeserializer, leftAttr)
val getRight = ObjectOperator.deserializeRowToObject(rightDeserializer, rightAttr)
val outputObject = ObjectOperator.wrapObjectToRow(outputObjAttr.dataType)
new CoGroupedIterator(leftGrouped, rightGrouped, leftGroup).flatMap {
case (key, leftResult, rightResult) =>
val result = func(
getKey(key),
leftResult.map(getLeft),
rightResult.map(getRight))
result.map(outputObject)
}
}
}
}
|
aokolnychyi/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/objects.scala
|
Scala
|
apache-2.0
| 18,323 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kafka.index
import com.typesafe.scalalogging.StrictLogging
import org.geotools.data.simple.SimpleFeatureSource
import org.geotools.data.{FeatureEvent, FeatureListener}
import org.locationtech.geomesa.kafka.utils.KafkaFeatureEvent
import org.opengis.feature.simple.SimpleFeature
import java.util.Collections
import java.util.concurrent.ConcurrentHashMap
import scala.util.control.NonFatal
trait KafkaListeners extends StrictLogging {
import scala.collection.JavaConverters._
// use a flag instead of checking listeners.isEmpty, which is slightly expensive for ConcurrentHashMap
@volatile
private var hasListeners = false
private val listeners = {
val map = new ConcurrentHashMap[(SimpleFeatureSource, FeatureListener), java.lang.Boolean]()
Collections.newSetFromMap(map).asScala
}
def addListener(source: SimpleFeatureSource, listener: FeatureListener): Unit = synchronized {
listeners.add((source, listener))
hasListeners = true
}
def removeListener(source: SimpleFeatureSource, listener: FeatureListener): Unit = synchronized {
listeners.remove((source, listener))
hasListeners = listeners.nonEmpty
}
private[kafka] def fireChange(timestamp: Long, feature: SimpleFeature): Unit = {
if (hasListeners) {
fireEvent(KafkaFeatureEvent.changed(_, feature, timestamp))
}
}
private[kafka] def fireDelete(timestamp: Long, id: String, removed: => SimpleFeature): Unit = {
if (hasListeners) {
fireEvent(KafkaFeatureEvent.removed(_, id, removed, timestamp))
}
}
private[kafka] def fireClear(timestamp: Long): Unit = {
if (hasListeners) {
fireEvent(KafkaFeatureEvent.cleared(_, timestamp))
}
}
private def fireEvent(toEvent: SimpleFeatureSource => FeatureEvent): Unit = {
listeners.foreach { case (source, listener) =>
val event = toEvent(source)
try { listener.changed(event) } catch {
case NonFatal(e) => logger.error(s"Error in feature listener for $event", e)
}
}
}
}
|
locationtech/geomesa
|
geomesa-kafka/geomesa-kafka-datastore/src/main/scala/org/locationtech/geomesa/kafka/index/KafkaListeners.scala
|
Scala
|
apache-2.0
| 2,506 |
/**
* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.tensorflow.spark.datasources.tfrecords
import java.io._
import java.nio.file.Paths
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, NullWritable}
import org.apache.spark.rdd.RDD
import org.apache.hadoop.conf.Configuration
import org.apache.spark.sql._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.StructType
import org.tensorflow.hadoop.io.TFRecordFileOutputFormat
import org.tensorflow.hadoop.util._
import org.tensorflow.spark.datasources.tfrecords.serde.DefaultTfRecordRowEncoder
/**
* Provides access to TensorFlow record source
*/
class DefaultSource extends DataSourceRegister
with CreatableRelationProvider
with RelationProvider
with SchemaRelationProvider{
/**
* Short alias for spark-tensorflow data source.
*/
override def shortName(): String = "tfrecords"
// Writes DataFrame as TensorFlow Records
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val path = parameters("path")
val codec = parameters.getOrElse("codec", "")
val recordType = parameters.getOrElse("recordType", "Example")
//Export DataFrame as TFRecords
val features = data.rdd.map(row => {
recordType match {
case "Example" =>
val example = DefaultTfRecordRowEncoder.encodeExample(row)
(new BytesWritable(example.toByteArray), NullWritable.get())
case "SequenceExample" =>
val sequenceExample = DefaultTfRecordRowEncoder.encodeSequenceExample(row)
(new BytesWritable(sequenceExample.toByteArray), NullWritable.get())
case _ =>
throw new IllegalArgumentException(s"Unsupported recordType ${recordType}: recordType can be Example or SequenceExample")
}
})
parameters.getOrElse("writeLocality", "distributed") match {
case "distributed" =>
saveDistributed(features, path, sqlContext, mode, codec)
case "local" =>
saveLocal(features, path, mode, codec)
case s: String =>
throw new IllegalArgumentException(
s"Expected 'distributed' or 'local', got $s")
}
TensorflowRelation(parameters)(sqlContext.sparkSession)
}
private def save(sqlContext: SQLContext, features: RDD[(BytesWritable, NullWritable)], path: String, codec: String) = {
val hadoopConf = new Configuration(sqlContext.sparkContext.hadoopConfiguration)
if (!codec.isEmpty) {
hadoopConf.set("mapreduce.output.fileoutputformat.compress", "true")
hadoopConf.set("mapreduce.output.fileoutputformat.compress.codec", codec)
}
features.saveAsNewAPIHadoopFile(
path,
classOf[NullWritable],
classOf[BytesWritable],
classOf[TFRecordFileOutputFormat],
hadoopConf
)
}
private def saveDistributed(
features: RDD[(BytesWritable, NullWritable)],
path: String,
sqlContext: SQLContext,
mode: SaveMode,
codec: String): Unit = {
val hadoopConf = sqlContext.sparkContext.hadoopConfiguration
val outputPath = new Path(path)
val fs = outputPath.getFileSystem(hadoopConf)
val qualifiedOutputPath = outputPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
val pathExists = fs.exists(qualifiedOutputPath)
mode match {
case SaveMode.Overwrite =>
fs.delete(qualifiedOutputPath, true)
save(sqlContext, features, path, codec)
case SaveMode.Append =>
throw new IllegalArgumentException("Append mode is not supported")
case SaveMode.ErrorIfExists =>
if (pathExists)
throw new IllegalStateException(
s"Path $path already exists. SaveMode: ErrorIfExists.")
save(sqlContext, features, path, codec)
case SaveMode.Ignore =>
// With `SaveMode.Ignore` mode, if data already exists, the save operation is expected
// to not save the contents of the DataFrame and to not change the existing data.
// Therefore, it is okay to do nothing here and then just return the relation below.
if (pathExists == false)
save(sqlContext, features, path, codec)
}
}
private def saveLocal(
features: RDD[(BytesWritable, NullWritable)],
localPath: String,
mode: SaveMode,
codec: String): Unit = {
val cleanedPath = Paths.get(localPath).toAbsolutePath.toString
if (!codec.isEmpty) {
throw new IllegalArgumentException("codec can not be used in local write mode")
}
if (mode == SaveMode.Append) {
throw new IllegalArgumentException("Append mode is not supported in local write mode")
}
// Not supported now, but it should be a small fix eventually.
if (mode == SaveMode.Overwrite) {
throw new IllegalArgumentException("Overwrite mode is not supported in local write mode")
}
val f = DefaultSource.writePartitionLocalFun(localPath, mode)
// Perform the action.
features.mapPartitionsWithIndex(f).collect()
}
// Reads TensorFlow Records into DataFrame with Custom Schema
override def createRelation(sqlContext: SQLContext,
parameters: Map[String, String],
schema: StructType): BaseRelation = {
TensorflowRelation(parameters, Some(schema))(sqlContext.sparkSession)
}
// Reads TensorFlow Records into DataFrame with schema inferred
override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): TensorflowRelation = {
TensorflowRelation(parameters)(sqlContext.sparkSession)
}
}
object DefaultSource extends scala.Serializable {
// The function run on each worker.
// Writes the partition to a file and returns the number of records output.
private def writePartitionLocal(
index: Int,
part: Iterator[(BytesWritable, NullWritable)],
localPath: String,
mode: SaveMode): Iterator[Int] = {
val dir = new File(localPath)
if (dir.exists()) {
if (mode == SaveMode.ErrorIfExists) {
throw new IllegalStateException(
s"LocalPath $localPath already exists. SaveMode: ErrorIfExists.")
}
if (mode == SaveMode.Ignore) {
return Iterator.empty
}
}
// Make the directory if it does not exist
dir.mkdirs()
// The path to the partition file.
val filePath = localPath + s"/part-" + String.format("%05d", java.lang.Integer.valueOf(index))
val fos = new DataOutputStream(new FileOutputStream(filePath))
var count = 0
try {
val tfw = new TFRecordWriter(fos)
for((bw, _) <- part) {
tfw.write(bw.getBytes)
count += 1
}
} finally {
fos.close()
}
Iterator(count)
}
// Working around the closure variable captures.
private def writePartitionLocalFun(
localPath: String,
mode: SaveMode): (Int, Iterator[(BytesWritable, NullWritable)]) => Iterator[Int] = {
def mapFun(index: Int, part: Iterator[(BytesWritable, NullWritable)]) = {
writePartitionLocal(index, part, localPath, mode)
}
mapFun
}
}
|
tensorflow/ecosystem
|
spark/spark-tensorflow-connector/src/main/scala/org/tensorflow/spark/datasources/tfrecords/DefaultSource.scala
|
Scala
|
apache-2.0
| 7,799 |
package gitbucket.core.controller
import gitbucket.core.issues.milestones.html
import gitbucket.core.service.{RepositoryService, MilestonesService, AccountService}
import gitbucket.core.util.{ReferrerAuthenticator, WritableUsersAuthenticator}
import gitbucket.core.util.Implicits._
import io.github.gitbucket.scalatra.forms._
class MilestonesController extends MilestonesControllerBase
with MilestonesService with RepositoryService with AccountService
with ReferrerAuthenticator with WritableUsersAuthenticator
trait MilestonesControllerBase extends ControllerBase {
self: MilestonesService with RepositoryService
with ReferrerAuthenticator with WritableUsersAuthenticator =>
case class MilestoneForm(title: String, description: Option[String], dueDate: Option[java.util.Date])
val milestoneForm = mapping(
"title" -> trim(label("Title", text(required, maxlength(100)))),
"description" -> trim(label("Description", optional(text()))),
"dueDate" -> trim(label("Due Date", optional(date())))
)(MilestoneForm.apply)
get("/:owner/:repository/issues/milestones")(referrersOnly { repository =>
html.list(
params.getOrElse("state", "open"),
getMilestonesWithIssueCount(repository.owner, repository.name),
repository,
hasDeveloperRole(repository.owner, repository.name, context.loginAccount))
})
get("/:owner/:repository/issues/milestones/new")(writableUsersOnly {
html.edit(None, _)
})
post("/:owner/:repository/issues/milestones/new", milestoneForm)(writableUsersOnly { (form, repository) =>
createMilestone(repository.owner, repository.name, form.title, form.description, form.dueDate)
redirect(s"/${repository.owner}/${repository.name}/issues/milestones")
})
get("/:owner/:repository/issues/milestones/:milestoneId/edit")(writableUsersOnly { repository =>
params("milestoneId").toIntOpt.map{ milestoneId =>
html.edit(getMilestone(repository.owner, repository.name, milestoneId), repository)
} getOrElse NotFound()
})
post("/:owner/:repository/issues/milestones/:milestoneId/edit", milestoneForm)(writableUsersOnly { (form, repository) =>
params("milestoneId").toIntOpt.flatMap{ milestoneId =>
getMilestone(repository.owner, repository.name, milestoneId).map { milestone =>
updateMilestone(milestone.copy(title = form.title, description = form.description, dueDate = form.dueDate))
redirect(s"/${repository.owner}/${repository.name}/issues/milestones")
}
} getOrElse NotFound()
})
get("/:owner/:repository/issues/milestones/:milestoneId/close")(writableUsersOnly { repository =>
params("milestoneId").toIntOpt.flatMap{ milestoneId =>
getMilestone(repository.owner, repository.name, milestoneId).map { milestone =>
closeMilestone(milestone)
redirect(s"/${repository.owner}/${repository.name}/issues/milestones")
}
} getOrElse NotFound()
})
get("/:owner/:repository/issues/milestones/:milestoneId/open")(writableUsersOnly { repository =>
params("milestoneId").toIntOpt.flatMap{ milestoneId =>
getMilestone(repository.owner, repository.name, milestoneId).map { milestone =>
openMilestone(milestone)
redirect(s"/${repository.owner}/${repository.name}/issues/milestones")
}
} getOrElse NotFound()
})
get("/:owner/:repository/issues/milestones/:milestoneId/delete")(writableUsersOnly { repository =>
params("milestoneId").toIntOpt.flatMap{ milestoneId =>
getMilestone(repository.owner, repository.name, milestoneId).map { milestone =>
deleteMilestone(repository.owner, repository.name, milestone.milestoneId)
redirect(s"/${repository.owner}/${repository.name}/issues/milestones")
}
} getOrElse NotFound()
})
}
|
nobusugi246/gitbucket
|
src/main/scala/gitbucket/core/controller/MilestonesController.scala
|
Scala
|
apache-2.0
| 3,782 |
/*
JPA Scala Support for Play Framework 2
Copyright (C) 2014 Radim Kolar
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.filez.play2
/**
* Scala JPA Support for Play Framework 2.
* This package is using standard JPA EntityManager
*
* @author Radim Kolar
* @version 2.3
*/
package object jpa {
}
|
hsn10/playjpa
|
src/main/scala/jpa/package.scala
|
Scala
|
agpl-3.0
| 951 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.test
import java.util.concurrent.TimeUnit
import akka.stream.Materializer
import akka.util.ByteString
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.Json
import play.api.mvc.Results._
import play.api.mvc._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
class FakesSpec extends PlaySpecification {
sequential
private val Action = ActionBuilder.ignoringBody
"FakeRequest" should {
def app = GuiceApplicationBuilder().routes {
case (PUT, "/process") => Action { req =>
Results.Ok(req.headers.get(CONTENT_TYPE) getOrElse "")
}
}.build()
"Define Content-Type header based on body" in new WithApplication(app) {
val xml =
<foo>
<bar>
baz
</bar>
</foo>
val bytes = ByteString(xml.toString, "utf-16le")
val req = FakeRequest(PUT, "/process")
.withRawBody(bytes)
route(app, req) aka "response" must beSome.which { resp =>
contentAsString(resp) aka "content" must_== "application/octet-stream"
}
}
"Not override explicit Content-Type header" in new WithApplication(app) {
val xml =
<foo>
<bar>
baz
</bar>
</foo>
val bytes = ByteString(xml.toString, "utf-16le")
val req = FakeRequest(PUT, "/process")
.withRawBody(bytes)
.withHeaders(
CONTENT_TYPE -> "text/xml;charset=utf-16le"
)
route(app, req) aka "response" must beSome.which { resp =>
contentAsString(resp) aka "content" must_== "text/xml;charset=utf-16le"
}
}
"set a Content-Type header when one is unspecified and required" in new WithApplication() {
val request = FakeRequest(GET, "/testCall")
.withJsonBody(Json.obj("foo" -> "bar"))
contentTypeForFakeRequest(request) must contain("application/json")
}
"not overwrite the Content-Type header when specified" in new WithApplication() {
val request = FakeRequest(GET, "/testCall")
.withJsonBody(Json.obj("foo" -> "bar"))
.withHeaders(CONTENT_TYPE -> "application/test+json")
contentTypeForFakeRequest(request) must contain("application/test+json")
}
}
def contentTypeForFakeRequest[T](request: FakeRequest[AnyContentAsJson])(implicit mat: Materializer): String = {
var testContentType: Option[String] = None
val action = Action { request: Request[_] => testContentType = request.headers.get(CONTENT_TYPE); Ok }
val headers = new WrappedRequest(request)
val execution = (new TestActionCaller).call(action, headers, request.body)
Await.result(execution, Duration(3, TimeUnit.SECONDS))
testContentType.getOrElse("No Content-Type found")
}
}
class TestActionCaller extends EssentialActionCaller with Writeables
|
Shenker93/playframework
|
framework/src/play-specs2/src/test/scala/play/api/test/FakesSpec.scala
|
Scala
|
apache-2.0
| 2,929 |
/*
* Copyright (C) 2015 Noorq, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package modules
import scala.reflect.runtime.universe
import com.mailrest.maildal.config.MailDalConfig
import com.mailrest.maildal.config.RepositoryConfig
import com.mailrest.maildal.repository._
import scaldi.Injectable
import scaldi.Module
import scaldi.Injector
import play.api.Application
import com.typesafe.config.ConfigFactory
import com.typesafe.config.Config
class DalModule extends Module {
// Account
bind [AccountRepository] to new RepositoryConfig(dalConf) with AccountRepository
bind [AccountLogRepository] to new RepositoryConfig(dalConf) with AccountLogRepository
bind [UserRepository] to new RepositoryConfig(dalConf) with UserRepository
// Domain
bind [DomainRepository] to new RepositoryConfig(dalConf) with DomainRepository
bind [DomainOwnerRepository] to new RepositoryConfig(dalConf) with DomainOwnerRepository
bind [TemplateRepository] to new RepositoryConfig(dalConf) with TemplateRepository
bind [DomainVerificationQueueRepository] to new RepositoryConfig(dalConf) with DomainVerificationQueueRepository
// Message
bind [MessageRepository] to new RepositoryConfig(dalConf) with MessageRepository
bind [MessageLogRepository] to new RepositoryConfig(dalConf) with MessageLogRepository
bind [MessageQueueRepository] to new RepositoryConfig(dalConf) with MessageQueueRepository
bind [MessageStatsDailyRepository] to new RepositoryConfig(dalConf) with MessageStatsDailyRepository
bind [UnsubscribedRecipientRepository] to new RepositoryConfig(dalConf) with UnsubscribedRecipientRepository
// Config
val appConf = ConfigFactory.load()
//val cassandraHost = appConf.getString("cassandra.host")
lazy val cassandraHost = inject [String] ("cassandra.host")
val cassandraKeyspace = appConf.getString("cassandra.keyspace")
lazy val dalConf = new MailDalConfig(cassandraHost, cassandraKeyspace)
}
|
mailrest/mailrest
|
app/modules/DalModule.scala
|
Scala
|
apache-2.0
| 2,505 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.management
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.execution.command.{Checker, DataCommand}
import org.apache.spark.sql.types.StringType
import org.apache.carbondata.api.CarbonStore
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
case class CarbonShowLoadsCommand(
databaseNameOp: Option[String],
tableName: String,
limit: Option[String],
showHistory: Boolean = false)
extends DataCommand {
// add new columns of show segments at last
override def output: Seq[Attribute] = {
if (showHistory) {
Seq(AttributeReference("SegmentSequenceId", StringType, nullable = false)(),
AttributeReference("Status", StringType, nullable = false)(),
AttributeReference("Load Start Time", StringType, nullable = false)(),
AttributeReference("Load End Time", StringType, nullable = true)(),
AttributeReference("Merged To", StringType, nullable = false)(),
AttributeReference("File Format", StringType, nullable = false)(),
AttributeReference("Visibility", StringType, nullable = false)(),
AttributeReference("Data Size", StringType, nullable = false)(),
AttributeReference("Index Size", StringType, nullable = false)())
} else {
Seq(AttributeReference("SegmentSequenceId", StringType, nullable = false)(),
AttributeReference("Status", StringType, nullable = false)(),
AttributeReference("Load Start Time", StringType, nullable = false)(),
AttributeReference("Load End Time", StringType, nullable = true)(),
AttributeReference("Merged To", StringType, nullable = false)(),
AttributeReference("File Format", StringType, nullable = false)(),
AttributeReference("Data Size", StringType, nullable = false)(),
AttributeReference("Index Size", StringType, nullable = false)())
}
}
override def processData(sparkSession: SparkSession): Seq[Row] = {
Checker.validateTableExists(databaseNameOp, tableName, sparkSession)
val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession)
setAuditTable(carbonTable)
if (!carbonTable.getTableInfo.isTransactionalTable) {
throw new MalformedCarbonCommandException("Unsupported operation on non transactional table")
}
CarbonStore.showSegments(
limit,
carbonTable.getTablePath,
showHistory
)
}
override protected def opName: String = "SHOW SEGMENTS"
}
|
manishgupta88/carbondata
|
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
|
Scala
|
apache-2.0
| 3,429 |
/*
* Copyright 2014 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.sbt
import sbt._
import Keys._
import java.net.{URL, URLStreamHandler, URLStreamHandlerFactory}
import org.apache.ivy.util.Message
import org.apache.ivy.util.url.{URLHandlerDispatcher, URLHandlerRegistry}
/**
* All this does is register the s3:// url handler with the JVM and IVY
*/
object S3ResolverPlugin extends AutoPlugin {
object autoImport extends S3Implicits
//
// This *should* work but it looks like SBT is doing some multi class loader stuff
// because the class loader used to load java.net.URL doesn't see fm.sbt.s3.Handler.
// So instead we use the URL.setURLStreamHandlerFactory method below which only works
// if nobody else has called URL.setURLStreamHandlerFactory.
//
/*
// See JavaDocs for the java.net.URL Constructors
private def protocolPkgKey: String = "java.protocol.handler.pkgs"
private def existingProtocolHandlers: Option[String] = Option(System.getProperty(protocolPkgKey))
// Register our S3URLStreamHandler so that we can create instances of URLs with an "s3" Protocol
System.setProperty(protocolPkgKey, "fm.sbt"+existingProtocolHandlers.map{ "|"+_ }.getOrElse(""))
*/
private object S3URLStreamHandlerFactory extends URLStreamHandlerFactory {
def createURLStreamHandler(protocol: String): URLStreamHandler = protocol match {
case "s3" => new fm.sbt.s3.Handler
case _ => null
}
}
// We need s3:// URLs to work without throwing a java.net.MalformedURLException
// which means installeing a dummy URLStreamHandler. We only install the handler
// if it's not already installed (since a second call to URL.setURLStreamHandlerFactory
// will fail).
try {
new URL("s3://example.com")
info("The s3:// URLStreamHandler is already installed")
} catch {
// This means we haven't installed the handler, so install it
case _: java.net.MalformedURLException =>
info("Installing the s3:// URLStreamHandler via java.net.URL.setURLStreamHandlerFactory")
URL.setURLStreamHandlerFactory(S3URLStreamHandlerFactory)
}
//
// This sets up the Ivy URLHandler for s3:// URLs
//
private val dispatcher: URLHandlerDispatcher = URLHandlerRegistry.getDefault() match {
// If the default is already a URLHandlerDispatcher then just use that
case disp: URLHandlerDispatcher =>
info("Using the existing Ivy URLHandlerDispatcher to handle s3:// URLs")
disp
// Otherwise create a new URLHandlerDispatcher
case default =>
info("Creating a new Ivy URLHandlerDispatcher to handle s3:// URLs")
val disp: URLHandlerDispatcher = new URLHandlerDispatcher()
disp.setDefault(default)
URLHandlerRegistry.setDefault(disp)
disp
}
// Register (or replace) the s3 handler
dispatcher.setDownloader("s3", new S3URLHandler)
// Not sure how to log using SBT so I'm using Ivy's Message class
private def info(msg: String): Unit = Message.info(msg)
}
|
Gesto/fm-sbt-s3-resolver
|
src/main/scala/fm/sbt/S3ResolverPlugin.scala
|
Scala
|
apache-2.0
| 3,568 |
package com.twitter.finagle.http
import org.specs.SpecificationWithJUnit
import org.jboss.netty.handler.codec.http.DefaultCookie
class CookieSetSpec extends SpecificationWithJUnit {
"CookieSet" should {
"no cookies" in {
val request = Request()
request.cookies must beEmpty
}
"request cookie basics" in {
val request = Request()
request.headers("Cookie") = "name=value; name2=value2"
request.cookies.contains(new DefaultCookie("name", "value")) must beTrue
request.cookies.contains(new DefaultCookie("name2", "value2")) must beTrue
request.cookies.isValid must beTrue
}
"response cookie basics" in {
val response = Response()
response.headers("Set-Cookie") = "name=value; name2=value2"
response.cookies.contains(new DefaultCookie("name", "value")) must beTrue
response.cookies.contains(new DefaultCookie("name2", "value2")) must beTrue
}
"cookie with attributes" in {
val request = Request()
request.headers("Cookie") = "name=value; Max-Age=23; Domain=.example.com; Path=/"
val cookie = request.cookies.iterator.toList.head
cookie.getValue must_== "value"
cookie.getMaxAge() must_== 23
cookie.getDomain() must_== ".example.com"
cookie.getPath() must_== "/"
}
"add cookie" in {
val request = Request()
val cookie = new DefaultCookie("name", "value")
request.cookies += cookie
request.cookies.contains(new DefaultCookie("name", "value")) must beTrue
request.headers("Cookie") must_== "name=value"
}
"add same cookie only once" in {
val request = Request()
val cookie = new DefaultCookie("name", "value")
request.cookies += cookie
request.cookies += cookie
request.cookies.contains(new DefaultCookie("name", "value")) must beTrue
request.headers("Cookie") must_== "name=value"
request.cookies.iterator.length must_== 1
}
"remove cookie" in {
val request = Request()
request.headers.add("Cookie", "name=value")
request.headers.add("Cookie", "name=value2") // same name - gets removed too
request.cookies -= (new DefaultCookie("name", "value2"))
request.cookies must haveSize(0)
}
"netty Cookie.equals is not broken in netty 3.5" in {
val cookie1 = new DefaultCookie("name", "value")
val cookie2 = new DefaultCookie("name", "value")
cookie1 must_==(cookie2)
}
"invalid cookies are ignored" in {
val request = Request()
request.headers.add("Cookie", "namé=value")
request.cookies must haveSize(0)
request.cookies.isValid must beFalse
}
}
}
|
joshbedo/finagle
|
finagle-http/src/test/scala/com/twitter/finagle/http/CookieSetSpec.scala
|
Scala
|
apache-2.0
| 2,685 |
package net.randallalexander.restaurant.chooser.model
import io.circe.HCursor
object Validation {
def state(hCursor: HCursor): Boolean = {
hCursor.downField("state").focus.flatMap(_.asString) match {
case Some(value) => (value.size == 2) //Good enough for now
case _ => false
}
}
def zip(hCursor: HCursor): Boolean = {
hCursor.downField("zip").focus.flatMap(_.asString) match {
case Some(value) =>
value.length == 5 //Good enough for now
case _ => false
}
}
def positiveMoney(fieldName:String)(hCursor: HCursor): Boolean = {
hCursor.downField(fieldName).focus.flatMap(_.asNumber.map(_.toDouble)) match {
case Some(value) =>
value.toString.matches("\\\\d{1,}(.\\\\d?\\\\d?)?")
case _ => false
}
}
def intGTE(fieldName:String, minValue:Int)(hCursor: HCursor): Boolean = {
hCursor.downField(fieldName).focus.flatMap(_.asNumber.flatMap(_.toInt)) match {
case Some(value) =>
value >= minValue
case _ => false
}
}
}
|
randallalexander/restaurant-chooser
|
service/src/main/scala/net/randallalexander/restaurant/chooser/model/Validation.scala
|
Scala
|
mit
| 1,029 |
/* sbt -- Simple Build Tool
* Copyright 2008 Mark Harrah */
package sbt.internal.util
import org.scalacheck._
import Prop._
import scala.collection.mutable.HashSet
object DagSpecification extends Properties("Dag") {
property("No repeated nodes") = forAll { (dag: TestDag) => isSet(dag.topologicalSort) }
property("Sort contains node") = forAll { (dag: TestDag) => dag.topologicalSort.contains(dag) }
property("Dependencies precede node") = forAll { (dag: TestDag) => dependenciesPrecedeNodes(dag.topologicalSort) }
implicit lazy val arbTestDag: Arbitrary[TestDag] = Arbitrary(Gen.sized(dagGen))
private def dagGen(nodeCount: Int): Gen[TestDag] =
{
val nodes = new HashSet[TestDag]
def nonterminalGen(p: Gen.Parameters): Gen[TestDag] =
{
for (i <- 0 until nodeCount; nextDeps <- Gen.someOf(nodes).apply(p))
nodes += new TestDag(i, nextDeps)
for (nextDeps <- Gen.someOf(nodes)) yield new TestDag(nodeCount, nextDeps)
}
Gen.parameterized(nonterminalGen)
}
private def isSet[T](c: Seq[T]) = Set(c: _*).size == c.size
private def dependenciesPrecedeNodes(sort: List[TestDag]) =
{
val seen = new HashSet[TestDag]
def iterate(remaining: List[TestDag]): Boolean =
{
remaining match {
case Nil => true
case node :: tail =>
if (node.dependencies.forall(seen.contains) && !seen.contains(node)) {
seen += node
iterate(tail)
} else
false
}
}
iterate(sort)
}
}
class TestDag(id: Int, val dependencies: Iterable[TestDag]) extends Dag[TestDag] {
override def toString = id + "->" + dependencies.mkString("[", ",", "]")
}
|
Duhemm/util
|
internal/util-collection/src/test/scala/DagSpecification.scala
|
Scala
|
bsd-3-clause
| 1,763 |
package org.bitcoins.core.protocol.script
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.script.constant._
import org.bitcoins.core.util.BytesUtil
import org.bitcoins.crypto.{CryptoUtil, Sha256Digest, Sha256Hash160Digest}
import scala.util.{Failure, Success, Try}
/** Created by chris on 11/10/16.
* The version of the [[org.bitcoins.core.protocol.script.WitnessScriptPubKey WitnessScriptPubKey]],
* this indicates how a [[org.bitcoins.core.protocol.script.ScriptWitness ScriptWitness]] is rebuilt.
* [[https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#witness-program BIP141]]
*/
sealed trait WitnessVersion {
/** Rebuilds the full script from the given witness and [[org.bitcoins.core.protocol.script.ScriptPubKey ScriptPubKey]]
* Either returns the [[org.bitcoins.core.protocol.script.ScriptPubKey ScriptPubKey]]
* it needs to be executed against or the failure that was encountered while rebuilding the witness
*/
def rebuild(
scriptWitness: ScriptWitness,
witnessProgram: Seq[ScriptToken]): Try[ScriptPubKey]
def version: ScriptNumberOperation
}
case object WitnessVersion0 extends WitnessVersion {
/** Rebuilds a witness version 0 SPK program, see BIP141 */
override def rebuild(
scriptWitness: ScriptWitness,
witnessProgram: Seq[ScriptToken]): Try[ScriptPubKey] = {
val programBytes = BytesUtil.toByteVector(witnessProgram)
programBytes.size match {
case 20 =>
//p2wpkh
val hash = Sha256Hash160Digest(programBytes)
Success(P2PKHScriptPubKey(hash))
case 32 =>
//p2wsh
if (scriptWitness.stack.isEmpty)
Failure(
new IllegalArgumentException(
"P2WSH cannot be rebuilt without redeem script"))
else {
//need to check if the hashes match
val stackTop = scriptWitness.stack.head
val stackHash = CryptoUtil.sha256(stackTop)
val witnessHash = Sha256Digest(witnessProgram.head.bytes)
if (stackHash != witnessHash) {
Failure(new IllegalArgumentException(
s"Witness hash $witnessHash did not match stack hash $stackHash"))
} else {
val compactSizeUInt =
CompactSizeUInt.calculateCompactSizeUInt(stackTop)
val scriptPubKey = ScriptPubKey(compactSizeUInt.bytes ++ stackTop)
Success(scriptPubKey)
}
}
case _ =>
//witness version 0 programs need to be 20 bytes or 32 bytes in size
Failure(new IllegalArgumentException(
s"Witness program had invalid length (${programBytes.length}) for version 0, must be 20 or 30: $witnessProgram"))
}
}
override def version = OP_0
}
case object WitnessVersion1 extends WitnessVersion {
override def rebuild(
scriptWitness: ScriptWitness,
witnessProgram: Seq[ScriptToken]): Try[ScriptPubKey] = {
throw new UnsupportedOperationException("Taproot is not yet supported")
}
override def version: ScriptNumberOperation = OP_1
}
/** The witness version that represents all witnesses that have not been allocated yet */
case class UnassignedWitness(version: ScriptNumberOperation)
extends WitnessVersion {
require(
WitnessScriptPubKey.unassignedWitVersions.contains(version),
"Cannot created an unassigend witness version from one that is assigned already, got: " + version
)
override def rebuild(
scriptWitness: ScriptWitness,
witnessProgram: Seq[ScriptToken]): Try[ScriptPubKey] =
Failure(
new UnsupportedOperationException(
s"Rebuilding is not defined for version $version yet."))
}
object WitnessVersion {
def apply(scriptNumberOp: ScriptNumberOperation): WitnessVersion =
scriptNumberOp match {
case OP_0 | OP_FALSE => WitnessVersion0
case OP_1 | OP_TRUE => WitnessVersion1
case x @ (OP_2 | OP_3 | OP_4 | OP_5 | OP_6 | OP_7 | OP_8 | OP_9 | OP_10 |
OP_11 | OP_12 | OP_13 | OP_14 | OP_15 | OP_16) =>
UnassignedWitness(x)
case OP_1NEGATE =>
throw new IllegalArgumentException(
"OP_1NEGATE is not a valid witness version")
}
def apply(token: ScriptToken): WitnessVersion =
token match {
case scriptNumberOp: ScriptNumberOperation =>
WitnessVersion(scriptNumberOp)
case _: ScriptConstant | _: ScriptNumber | _: ScriptOperation =>
throw new IllegalArgumentException(
"We can only have witness version that is a script number operation, i.e OP_0 through OP_16")
}
def apply(int: Int): Option[WitnessVersion] =
ScriptNumberOperation.fromNumber(int).map(WitnessVersion(_))
}
|
bitcoin-s/bitcoin-s
|
core/src/main/scala/org/bitcoins/core/protocol/script/WitnessVersion.scala
|
Scala
|
mit
| 4,694 |
package scala.tools.nsc
package transform.patmat
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import scala.tools.asm.Opcodes._
import scala.tools.nsc.backend.jvm.AsmUtils._
import scala.tools.testkit.ASMConverters.Instruction
import scala.tools.testkit.BytecodeTesting
import scala.tools.testkit.BytecodeTesting._
import PartialFunction.cond
@RunWith(classOf[JUnit4])
class PatmatBytecodeTest extends BytecodeTesting {
val optCompiler = cached("optCompiler", () => newCompiler(extraArgs = "-opt:inline:**"))
import compiler._
@Test
def t6956(): Unit = {
val code =
"""class C {
| private[this] final val ONE = 1
|
| def s1(i: Byte): Int = i match {
| case ONE => 1
| case 2 => 2
| case 3 => 3
| case _ => 0
| }
|
| def s2(i: Byte): Int = i match {
| case 1 => 1
| case 2 => 2
| case 3 => 3
| case _ => 0
| }
|}
""".stripMargin
val c = compileClass(code)
assert(getInstructions(c, "s1").count(_.opcode == TABLESWITCH) == 1, textify(c))
assert(getInstructions(c, "s2").count(_.opcode == TABLESWITCH) == 1, textify(c))
}
@Test
def t6955(): Unit = {
val code =
"""class C {
| type Tag = Byte
|
| def s1(i: Tag): Int = i match { // notice type of i is Tag = Byte
| case 1 => 1
| case 2 => 2
| case 3 => 3
| case _ => 0
| }
|
| // this worked before, should keep working
| def s2(i: Byte): Int = i match {
| case 1 => 1
| case 2 => 2
| case 3 => 3
| case _ => 0
| }
|}
""".stripMargin
val c = compileClass(code)
assert(getInstructions(c, "s1").count(_.opcode == TABLESWITCH) == 1, textify(c))
assert(getInstructions(c, "s2").count(_.opcode == TABLESWITCH) == 1, textify(c))
}
@Test
def optNoPrimitiveTypetest(): Unit = {
val code =
"""case class Foo(x: Int, y: String)
|class C {
| def a = Foo(1, "a") match {
| case Foo(_: Int, y) => y
| }
|}
""".stripMargin
val c :: _ = optCompiler.compileClasses(code): @unchecked
assertSameSummary(getMethod(c, "a"), List(
NEW, DUP, ICONST_1, LDC, "<init>",
"y", ARETURN))
}
@Test
def optNoNullCheck(): Unit = {
val code =
"""case class Foo(x: Any)
|class C {
| def a = (Foo(1): Any) match {
| case Foo(_: String) =>
| case x => throw new MatchError(x)
| }
|}
""".stripMargin
val c :: _ = optCompiler.compileClasses(code): @unchecked
assert(!getInstructions(c, "a").exists(i => i.opcode == IFNULL || i.opcode == IFNONNULL), textify(getAsmMethod(c, "a")))
}
@Test
def optNoLoacalForUnderscore(): Unit = {
val code =
"""case class Foo(x: Any, y: String)
|class C {
| def a = (Foo(1, "a"): @unchecked) match {
| case Foo(_: String, y) => y
| }
|}
""".stripMargin
val c :: _ = optCompiler.compileClasses(code): @unchecked
assertSameSummary(getMethod(c, "a"), List(
NEW, DUP, ICONST_1, "valueOf", LDC, "<init>", ASTORE /*1*/,
ALOAD /*1*/, "y", ASTORE /*2*/,
ALOAD /*1*/, "x", INSTANCEOF, IFNE /*R*/,
NEW, DUP, ALOAD /*1*/, "<init>", ATHROW,
/*R*/ -1, ALOAD /*2*/, ARETURN))
}
@Test
def t6941(): Unit = {
val code =
"""class C {
| def a(xs: List[Int]) = xs match {
| case x :: _ => x
| }
| def b(xs: List[Int]) = xs match {
| case xs: ::[Int] => xs.head
| }
|}
""".stripMargin
val c = optCompiler.compileClass(code, allowMessage = _.msg.contains("may not be exhaustive"))
val expected = List[Any](
ALOAD /*1*/ , INSTANCEOF /*::*/ , IFEQ /*A*/ ,
ALOAD, CHECKCAST /*::*/ , "head", "unboxToInt",
ISTORE, GOTO /*B*/ ,
-1 /*A*/ , NEW /*MatchError*/ , DUP, ALOAD /*1*/ , "<init>", ATHROW,
-1 /*B*/ , ILOAD, IRETURN)
assertSameSummary(getMethod(c, "a"), expected)
assertSameSummary(getMethod(c, "b"), expected)
}
@Test
def valPatterns(): Unit = {
val code =
"""case class C(a: Any, b: Int) {
| def tplCall = ("hi", 3)
| @inline final def tplInline = (true, 'z')
|
| def t1 = { val (a, b) = (1, 2); a + b }
| def t2 = { val (a, _) = (1, 3); a }
| def t3 = { val (s, i) = tplCall; s.length + i }
| def t4 = { val (_, i) = tplCall; i }
| def t5 = { val (b, c) = tplInline; b || c == 'e' }
| def t6 = { val (_, c) = tplInline; c }
|
| def t7 = { val C(s: String, b) = this; s.length + b }
| def t8 = { val C(_, b) = this; b }
| def t9 = { val C(a, _) = C("hi", 23); a.toString }
|}
""".stripMargin
val List(c, cMod) = optCompiler.compileClasses(code)
assertSameSummary(getMethod(c, "t1"), List(ICONST_1, ICONST_2, IADD, IRETURN))
assertSameSummary(getMethod(c, "t2"), List(ICONST_1, IRETURN))
assertInvokedMethods(getMethod(c, "t3"), List("C.tplCall", "scala/Tuple2._1", "scala/Tuple2._2$mcI$sp", "scala/MatchError.<init>", "java/lang/String.length"))
assertInvokedMethods(getMethod(c, "t4"), List("C.tplCall", "scala/Tuple2._2$mcI$sp", "scala/MatchError.<init>"))
assertNoInvoke(getMethod(c, "t5"))
assertSameSummary(getMethod(c, "t6"), List(BIPUSH, IRETURN))
// MatchError reachable because of the type pattern `s: String`
assertInvokedMethods(getMethod(c, "t7"), List("C.a", "C.b", "scala/MatchError.<init>", "java/lang/String.length"))
assertSameSummary(getMethod(c, "t8"), List(ALOAD, "b", IRETURN))
// C allocation not eliminated - constructor may have side-effects.
assertSameSummary(getMethod(c, "t9"), List(NEW, DUP, LDC, BIPUSH, "<init>", "a", "toString", ARETURN))
}
@Test
def stringSwitch(): Unit = {
val code =
"""import annotation.switch
|class Switches {
| val cond = true
| def two = ("foo" : @switch) match { case "foo" => case "bar" => }
| def guard = ("foo" : @switch) match { case "z" => case "y" => case x if cond => }
| def colli = ("foo" : @switch) match { case "DB" => case "Ca" => }
|}
""".stripMargin
val List(switches) = compiler.compileClasses(code)
def isSwitchInsn(insn: Instruction) = cond(insn.opcode) { case LOOKUPSWITCH | TABLESWITCH => true }
List("two", "guard", "colli") foreach { m =>
assert(getInstructions(switches, m).exists(isSwitchInsn))
}
}
}
|
scala/scala
|
test/junit/scala/tools/nsc/transform/patmat/PatmatBytecodeTest.scala
|
Scala
|
apache-2.0
| 6,815 |
package io.github.tpartyka.testapp
import com.typesafe.config.ConfigFactory
import net.ceedubs.ficus.Ficus
import net.ceedubs.ficus.readers.ArbitraryTypeReader
trait Config {
import ArbitraryTypeReader._
import Ficus._
protected case class HttpConfig(interface: String, port: Int)
private val config = ConfigFactory.load()
protected val httpConfig: HttpConfig = config.as[HttpConfig]("http")
}
|
tpartyka/test-app
|
src/main/scala/io/github/tpartyka/testapp/Config.scala
|
Scala
|
mit
| 426 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.broker.kafka
import akka.util.ByteString
import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer.NegotiatedDeserializer
import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer.NegotiatedSerializer
import org.apache.kafka.common.serialization.Deserializer
import org.apache.kafka.common.serialization.Serializer
/**
* Adapts a Lagom NegotiatedDeserializer into a Kafka Deserializer so that messages
* stored in Kafka can be deserialized into the expected application's type.
*/
private[lagom] class ScaladslKafkaDeserializer[T](deserializer: NegotiatedDeserializer[T, ByteString])
extends Deserializer[T] {
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = {
() // ignore
}
override def deserialize(topic: String, data: Array[Byte]): T =
deserializer.deserialize(ByteString(data))
override def close(): Unit = () // nothing to do
}
/**
* Adapts a Lagom NegotiatedSerializer into a Kafka Serializer so that application's
* messages can be serialized into a byte array and published into Kafka.
*/
private[lagom] class ScaladslKafkaSerializer[T](serializer: NegotiatedSerializer[T, ByteString]) extends Serializer[T] {
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = {
() // ignore
}
override def serialize(topic: String, data: T): Array[Byte] =
serializer.serialize(data).toArray
override def close(): Unit = () // nothing to do
}
|
lagom/lagom
|
service/scaladsl/kafka/client/src/main/scala/com/lightbend/lagom/internal/scaladsl/broker/kafka/KafkaSerializers.scala
|
Scala
|
apache-2.0
| 1,574 |
package com.banno.salat.avro
import test.models._
import scala.collection.immutable.{Map => IMap}
import scala.collection.mutable.{Map => MMap}
package object test {
def ed() = Edward(a = "hello", b = 1, c = 1.1, aa = Some("there"), bb = Some(2), cc = Some(2.2))
def graph() = Alice("x", Some("y"),
Basil(Some(80), 81))
def clara() = Clara(l = Seq("hello", "there"), m = List(1,2,3), n = List(Desmond(Map("hello" -> 1))))
def desmond() = Desmond(IMap("hello" -> 1), MMap("there" -> 2))
}
|
Banno/salat-avro
|
src/test/scala/test.scala
|
Scala
|
apache-2.0
| 520 |
package jsky.app.ot.gemini.editor.targetComponent
import edu.gemini.spModel.obs.context.ObsContext
import jsky.app.ot.ags.AgsContext
import scala.swing._
class GuidingControls extends GridBagPanel {
opaque = false
private object guiderLabel extends Label {
text = "Auto Guide Search:"
horizontalAlignment = Alignment.Right
opaque = false
}
layout(guiderLabel) = new Constraints {
gridx = 0
insets = new Insets(0, 0, 0, 0)
}
val autoGuideStarGuiderSelector = new AgsStrategyCombo
layout(Component.wrap(autoGuideStarGuiderSelector.getUi)) = new Constraints {
gridx = 1
insets = new Insets(0, 5, 0, 10)
}
val manualGuideStarButton = new Button("Manual GS")
layout(manualGuideStarButton) = new Constraints {
gridx = 2
insets = new Insets(0, 0, 0, 5)
}
def update(ctxOpt: edu.gemini.shared.util.immutable.Option[ObsContext]): Unit = {
autoGuideStarGuiderSelector.setAgsOptions(AgsContext.create(ctxOpt))
}
def supportsAgs_=(supports: Boolean): Unit = {
guiderLabel.visible = supports
autoGuideStarGuiderSelector.getUi.setVisible(supports)
}
}
|
spakzad/ocs
|
bundle/jsky.app.ot/src/main/scala/jsky/app/ot/gemini/editor/targetComponent/GuidingControls.scala
|
Scala
|
bsd-3-clause
| 1,158 |
package core.formatter.marketplace.order
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.databind.{ JsonSerializer, SerializerProvider }
import com.lvxingpai.model.marketplace.order.OrderActivity
import core.misc.Utils
import scala.collection.JavaConversions._
/**
* Created by pengyt on 2015/11/21.
*/
class OrderActivitySerializer extends JsonSerializer[OrderActivity] {
override def serialize(act: OrderActivity, gen: JsonGenerator, serializers: SerializerProvider): Unit = {
gen.writeStartObject()
if (act == null) {
gen.writeEndObject()
return
}
gen.writeStringField("action", Option(act.action) getOrElse "")
gen.writeStringField("prevStatus", Option(act.prevStatus) getOrElse "")
gen.writeFieldName("data")
val data = act.data
gen.writeStartObject()
if (data != null) {
data foreach (entry => {
val key = entry._1
val value = entry._2.toString
val valueFilter = key match {
case "amount" => Utils.getActualPrice(value.toInt).toString
case _ => value
}
gen.writeStringField(entry._1, valueFilter)
})
}
gen.writeEndObject()
//foreach (gen.writeStringField("prevStatus", Option(act.prevStatus) getOrElse ""))
gen.writeNumberField("timestamp", if (act.timestamp != null) act.timestamp.getTime else 0)
gen.writeEndObject()
}
}
|
Lvxingpai/Hanse
|
app/core/formatter/marketplace/order/OrderActivitySerializer.scala
|
Scala
|
apache-2.0
| 1,414 |
package sbtazurepack.settings
trait CsPackageSettings {
override val toString = "not defined"
}
|
kostrse/sbt-azurepack
|
src/main/scala/sbtazurepack/settings/CsPackageSettings.scala
|
Scala
|
mit
| 100 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations.calculations
import uk.gov.hmrc.ct.computations.{CP286, CP294, CP998}
trait TradingLossesThisAndLaterPeriodCalculation {
def tradingLosses(cp286: CP286, cp998: CP998): CP294 = {
CP294(cp286.orZero + cp998.orZero)
}
}
|
ahudspith-equalexperts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/calculations/TradingLossesThisAndLaterPeriodCalculation.scala
|
Scala
|
apache-2.0
| 869 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.TimeZone
import scala.collection.JavaConverters._
import org.junit.Assert
import org.scalatest.Assertions
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.columnar.InMemoryRelation
import org.apache.spark.storage.StorageLevel
abstract class QueryTest extends PlanTest {
protected def spark: SparkSession
/**
* Runs the plan and makes sure the answer contains all of the keywords.
*/
def checkKeywordsExist(df: DataFrame, keywords: String*): Unit = {
val outputs = df.collect().map(_.mkString).mkString
for (key <- keywords) {
assert(outputs.contains(key), s"Failed for $df ($key doesn't exist in result)")
}
}
/**
* Runs the plan and makes sure the answer does NOT contain any of the keywords.
*/
def checkKeywordsNotExist(df: DataFrame, keywords: String*): Unit = {
val outputs = df.collect().map(_.mkString).mkString
for (key <- keywords) {
assert(!outputs.contains(key), s"Failed for $df ($key existed in the result)")
}
}
/**
* Evaluates a dataset to make sure that the result of calling collect matches the given
* expected answer.
*/
protected def checkDataset[T](
ds: => Dataset[T],
expectedAnswer: T*): Unit = {
val result = getResult(ds)
if (!QueryTest.compare(result.toSeq, expectedAnswer)) {
fail(
s"""
|Decoded objects do not match expected objects:
|expected: $expectedAnswer
|actual: ${result.toSeq}
|${ds.exprEnc.deserializer.treeString}
""".stripMargin)
}
}
/**
* Evaluates a dataset to make sure that the result of calling collect matches the given
* expected answer, after sort.
*/
protected def checkDatasetUnorderly[T : Ordering](
ds: => Dataset[T],
expectedAnswer: T*): Unit = {
val result = getResult(ds)
if (!QueryTest.compare(result.toSeq.sorted, expectedAnswer.sorted)) {
fail(
s"""
|Decoded objects do not match expected objects:
|expected: $expectedAnswer
|actual: ${result.toSeq}
|${ds.exprEnc.deserializer.treeString}
""".stripMargin)
}
}
private def getResult[T](ds: => Dataset[T]): Array[T] = {
val analyzedDS = try ds catch {
case ae: AnalysisException =>
if (ae.plan.isDefined) {
fail(
s"""
|Failed to analyze query: $ae
|${ae.plan.get}
|
|${stackTraceToString(ae)}
""".stripMargin)
} else {
throw ae
}
}
assertEmptyMissingInput(analyzedDS)
try ds.collect() catch {
case e: Exception =>
fail(
s"""
|Exception collecting dataset as objects
|${ds.exprEnc}
|${ds.exprEnc.deserializer.treeString}
|${ds.queryExecution}
""".stripMargin, e)
}
}
/**
* Runs the plan and makes sure the answer matches the expected result.
*
* @param df the [[DataFrame]] to be executed
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
*/
protected def checkAnswer(df: => DataFrame, expectedAnswer: Seq[Row]): Unit = {
val analyzedDF = try df catch {
case ae: AnalysisException =>
if (ae.plan.isDefined) {
fail(
s"""
|Failed to analyze query: $ae
|${ae.plan.get}
|
|${stackTraceToString(ae)}
|""".stripMargin)
} else {
throw ae
}
}
assertEmptyMissingInput(analyzedDF)
QueryTest.checkAnswer(analyzedDF, expectedAnswer)
}
protected def checkAnswer(df: => DataFrame, expectedAnswer: Row): Unit = {
checkAnswer(df, Seq(expectedAnswer))
}
protected def checkAnswer(df: => DataFrame, expectedAnswer: DataFrame): Unit = {
checkAnswer(df, expectedAnswer.collect())
}
/**
* Runs the plan and makes sure the answer is within absTol of the expected result.
*
* @param dataFrame the [[DataFrame]] to be executed
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
* @param absTol the absolute tolerance between actual and expected answers.
*/
protected def checkAggregatesWithTol(dataFrame: DataFrame,
expectedAnswer: Seq[Row],
absTol: Double): Unit = {
// TODO: catch exceptions in data frame execution
val actualAnswer = dataFrame.collect()
require(actualAnswer.length == expectedAnswer.length,
s"actual num rows ${actualAnswer.length} != expected num of rows ${expectedAnswer.length}")
actualAnswer.zip(expectedAnswer).foreach {
case (actualRow, expectedRow) =>
QueryTest.checkAggregatesWithTol(actualRow, expectedRow, absTol)
}
}
protected def checkAggregatesWithTol(dataFrame: DataFrame,
expectedAnswer: Row,
absTol: Double): Unit = {
checkAggregatesWithTol(dataFrame, Seq(expectedAnswer), absTol)
}
/**
* Asserts that a given [[Dataset]] will be executed using the given number of cached results.
*/
def assertCached(query: Dataset[_], numCachedTables: Int = 1): Unit = {
val planWithCaching = query.queryExecution.withCachedData
val cachedData = planWithCaching collect {
case cached: InMemoryRelation => cached
}
assert(
cachedData.size == numCachedTables,
s"Expected query to contain $numCachedTables, but it actually had ${cachedData.size}\\n" +
planWithCaching)
}
/**
* Asserts that a given [[Dataset]] will be executed using the cache with the given name and
* storage level.
*/
def assertCached(query: Dataset[_], cachedName: String, storageLevel: StorageLevel): Unit = {
val planWithCaching = query.queryExecution.withCachedData
val matched = planWithCaching.collectFirst { case cached: InMemoryRelation =>
val cacheBuilder = cached.cacheBuilder
cachedName == cacheBuilder.tableName.get &&
(storageLevel == cacheBuilder.storageLevel)
}.getOrElse(false)
assert(matched, s"Expected query plan to hit cache $cachedName with storage " +
s"level $storageLevel, but it doesn't.")
}
/**
* Asserts that a given [[Dataset]] does not have missing inputs in all the analyzed plans.
*/
def assertEmptyMissingInput(query: Dataset[_]): Unit = {
assert(query.queryExecution.analyzed.missingInput.isEmpty,
s"The analyzed logical plan has missing inputs:\\n${query.queryExecution.analyzed}")
assert(query.queryExecution.optimizedPlan.missingInput.isEmpty,
s"The optimized logical plan has missing inputs:\\n${query.queryExecution.optimizedPlan}")
assert(query.queryExecution.executedPlan.missingInput.isEmpty,
s"The physical plan has missing inputs:\\n${query.queryExecution.executedPlan}")
}
}
object QueryTest extends Assertions {
/**
* Runs the plan and makes sure the answer matches the expected result.
*
* @param df the DataFrame to be executed
* @param expectedAnswer the expected result in a Seq of Rows.
* @param checkToRDD whether to verify deserialization to an RDD. This runs the query twice.
*/
def checkAnswer(df: DataFrame, expectedAnswer: Seq[Row], checkToRDD: Boolean = true): Unit = {
getErrorMessageInCheckAnswer(df, expectedAnswer, checkToRDD) match {
case Some(errorMessage) => fail(errorMessage)
case None =>
}
}
/**
* Runs the plan and makes sure the answer matches the expected result.
* If there was exception during the execution or the contents of the DataFrame does not
* match the expected result, an error message will be returned. Otherwise, a None will
* be returned.
*
* @param df the DataFrame to be executed
* @param expectedAnswer the expected result in a Seq of Rows.
* @param checkToRDD whether to verify deserialization to an RDD. This runs the query twice.
*/
def getErrorMessageInCheckAnswer(
df: DataFrame,
expectedAnswer: Seq[Row],
checkToRDD: Boolean = true): Option[String] = {
val isSorted = df.logicalPlan.collect { case s: logical.Sort => s }.nonEmpty
if (checkToRDD) {
SQLExecution.withSQLConfPropagated(df.sparkSession) {
df.rdd.count() // Also attempt to deserialize as an RDD [SPARK-15791]
}
}
val sparkAnswer = try df.collect().toSeq catch {
case e: Exception =>
val errorMessage =
s"""
|Exception thrown while executing query:
|${df.queryExecution}
|== Exception ==
|$e
|${org.apache.spark.sql.catalyst.util.stackTraceToString(e)}
""".stripMargin
return Some(errorMessage)
}
sameRows(expectedAnswer, sparkAnswer, isSorted).map { results =>
s"""
|Results do not match for query:
|Timezone: ${TimeZone.getDefault}
|Timezone Env: ${sys.env.getOrElse("TZ", "")}
|
|${df.queryExecution}
|== Results ==
|$results
""".stripMargin
}
}
def prepareAnswer(answer: Seq[Row], isSorted: Boolean): Seq[Row] = {
// Converts data to types that we can do equality comparison using Scala collections.
// For BigDecimal type, the Scala type has a better definition of equality test (similar to
// Java's java.math.BigDecimal.compareTo).
// For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for
// equality test.
val converted: Seq[Row] = answer.map(prepareRow)
if (!isSorted) converted.sortBy(_.toString()) else converted
}
// We need to call prepareRow recursively to handle schemas with struct types.
def prepareRow(row: Row): Row = {
Row.fromSeq(row.toSeq.map {
case null => null
case bd: java.math.BigDecimal => BigDecimal(bd)
// Equality of WrappedArray differs for AnyVal and AnyRef in Scala 2.12.2+
case seq: Seq[_] => seq.map {
case b: java.lang.Byte => b.byteValue
case s: java.lang.Short => s.shortValue
case i: java.lang.Integer => i.intValue
case l: java.lang.Long => l.longValue
case f: java.lang.Float => f.floatValue
case d: java.lang.Double => d.doubleValue
case x => x
}
// Convert array to Seq for easy equality check.
case b: Array[_] => b.toSeq
case r: Row => prepareRow(r)
case o => o
})
}
private def genError(
expectedAnswer: Seq[Row],
sparkAnswer: Seq[Row],
isSorted: Boolean = false): String = {
val getRowType: Option[Row] => String = row =>
row.map(row =>
if (row.schema == null) {
"struct<>"
} else {
s"${row.schema.catalogString}"
}).getOrElse("struct<>")
s"""
|== Results ==
|${
sideBySide(
s"== Correct Answer - ${expectedAnswer.size} ==" +:
getRowType(expectedAnswer.headOption) +:
prepareAnswer(expectedAnswer, isSorted).map(_.toString()),
s"== Spark Answer - ${sparkAnswer.size} ==" +:
getRowType(sparkAnswer.headOption) +:
prepareAnswer(sparkAnswer, isSorted).map(_.toString())).mkString("\\n")
}
""".stripMargin
}
def includesRows(
expectedRows: Seq[Row],
sparkAnswer: Seq[Row]): Option[String] = {
if (!prepareAnswer(expectedRows, true).toSet.subsetOf(prepareAnswer(sparkAnswer, true).toSet)) {
return Some(genError(expectedRows, sparkAnswer, true))
}
None
}
private def compare(obj1: Any, obj2: Any): Boolean = (obj1, obj2) match {
case (null, null) => true
case (null, _) => false
case (_, null) => false
case (a: Array[_], b: Array[_]) =>
a.length == b.length && a.zip(b).forall { case (l, r) => compare(l, r)}
case (a: Map[_, _], b: Map[_, _]) =>
a.size == b.size && a.keys.forall { aKey =>
b.keys.find(bKey => compare(aKey, bKey)).exists(bKey => compare(a(aKey), b(bKey)))
}
case (a: Iterable[_], b: Iterable[_]) =>
a.size == b.size && a.zip(b).forall { case (l, r) => compare(l, r)}
case (a: Product, b: Product) =>
compare(a.productIterator.toSeq, b.productIterator.toSeq)
case (a: Row, b: Row) =>
compare(a.toSeq, b.toSeq)
// 0.0 == -0.0, turn float/double to bits before comparison, to distinguish 0.0 and -0.0.
case (a: Double, b: Double) =>
java.lang.Double.doubleToRawLongBits(a) == java.lang.Double.doubleToRawLongBits(b)
case (a: Float, b: Float) =>
java.lang.Float.floatToRawIntBits(a) == java.lang.Float.floatToRawIntBits(b)
case (a, b) => a == b
}
def sameRows(
expectedAnswer: Seq[Row],
sparkAnswer: Seq[Row],
isSorted: Boolean = false): Option[String] = {
if (!compare(prepareAnswer(expectedAnswer, isSorted), prepareAnswer(sparkAnswer, isSorted))) {
return Some(genError(expectedAnswer, sparkAnswer, isSorted))
}
None
}
/**
* Runs the plan and makes sure the answer is within absTol of the expected result.
*
* @param actualAnswer the actual result in a [[Row]].
* @param expectedAnswer the expected result in a[[Row]].
* @param absTol the absolute tolerance between actual and expected answers.
*/
protected def checkAggregatesWithTol(actualAnswer: Row, expectedAnswer: Row, absTol: Double) = {
require(actualAnswer.length == expectedAnswer.length,
s"actual answer length ${actualAnswer.length} != " +
s"expected answer length ${expectedAnswer.length}")
// TODO: support other numeric types besides Double
// TODO: support struct types?
actualAnswer.toSeq.zip(expectedAnswer.toSeq).foreach {
case (actual: Double, expected: Double) =>
assert(math.abs(actual - expected) < absTol,
s"actual answer $actual not within $absTol of correct answer $expected")
case (actual, expected) =>
assert(actual == expected, s"$actual did not equal $expected")
}
}
def checkAnswer(df: DataFrame, expectedAnswer: java.util.List[Row]): Unit = {
getErrorMessageInCheckAnswer(df, expectedAnswer.asScala.toSeq) match {
case Some(errorMessage) => Assert.fail(errorMessage)
case None =>
}
}
}
class QueryTestSuite extends QueryTest with test.SharedSparkSession {
test("SPARK-16940: checkAnswer should raise TestFailedException for wrong results") {
intercept[org.scalatest.exceptions.TestFailedException] {
checkAnswer(sql("SELECT 1"), Row(2) :: Nil)
}
}
}
|
ueshin/apache-spark
|
sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
|
Scala
|
apache-2.0
| 15,424 |
/*
* Copyright (c) 2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.storage.kinesis.s3
// Java
import java.io.File
import java.util.Properties
// Argot
import org.clapper.argot._
// Config
import com.typesafe.config.{Config, ConfigFactory}
// AWS libs
import com.amazonaws.auth.AWSCredentialsProvider
// AWS Kinesis Connector libs
import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration
// This project
import sinks._
/**
* The entrypoint class for the Kinesis-S3 Sink applciation.
*/
object SinkApp extends App {
// Argument specifications
import ArgotConverters._
// General bumf for our app
val parser = new ArgotParser(
programName = "generated",
compactUsage = true,
preUsage = Some("%s: Version %s. Copyright (c) 2013, %s.".format(
generated.Settings.name,
generated.Settings.version,
generated.Settings.organization)
)
)
// Optional config argument
val config = parser.option[Config](List("config"),
"filename",
"Configuration file.") {
(c, opt) =>
val file = new File(c)
if (file.exists) {
ConfigFactory.parseFile(file)
} else {
parser.usage("Configuration file \\"%s\\" does not exist".format(c))
ConfigFactory.empty()
}
}
parser.parse(args)
val conf = config.value.getOrElse(throw new RuntimeException("--config argument must be provided"))
// TODO: make the conf file more like the Elasticsearch equivalent
val kinesisSinkRegion = conf.getConfig("connector").getConfig("kinesis").getString("region")
val kinesisSinkEndpoint = s"https://kinesis.${kinesisSinkRegion}.amazonaws.com"
val kinesisSink = conf.getConfig("connector").getConfig("kinesis").getConfig("out")
val kinesisSinkName = kinesisSink.getString("stream-name")
val kinesisSinkShards = kinesisSink.getInt("shards")
val credentialConfig = conf.getConfig("connector").getConfig("aws")
val credentials = CredentialsLookup.getCredentialsProvider(credentialConfig.getString("access-key"), credentialConfig.getString("secret-key"))
val badSink = new KinesisSink(credentials, kinesisSinkEndpoint, kinesisSinkName, kinesisSinkShards)
val executor = new S3SinkExecutor(convertConfig(conf, credentials), badSink)
executor.run()
/**
* This function converts the config file into the format
* expected by the Kinesis connector interfaces.
*
* @param connector The configuration HOCON
* @return A KinesisConnectorConfiguration
*/
def convertConfig(conf: Config, credentials: AWSCredentialsProvider): KinesisConnectorConfiguration = {
val props = new Properties()
val connector = conf.resolve.getConfig("connector")
val kinesis = connector.getConfig("kinesis")
val kinesisIn = kinesis.getConfig("in")
val kinesisRegion = kinesis.getString("region")
val kEndpoint = s"https://kinesis.${kinesisSinkRegion}.amazonaws.com"
val streamName = kinesisIn.getString("stream-name")
val initialPosition = kinesisIn.getString("initial-position")
val appName = kinesis.getString("app-name")
val s3 = connector.getConfig("s3")
val s3Endpoint = s3.getString("endpoint")
val bucket = s3.getString("bucket")
val buffer = connector.getConfig("buffer")
val byteLimit = buffer.getString("byte-limit")
val recordLimit = buffer.getString("record-limit")
val timeLimit = buffer.getString("time-limit")
props.setProperty(KinesisConnectorConfiguration.PROP_KINESIS_INPUT_STREAM, streamName)
props.setProperty(KinesisConnectorConfiguration.PROP_KINESIS_ENDPOINT, kEndpoint)
props.setProperty(KinesisConnectorConfiguration.PROP_APP_NAME, appName)
props.setProperty(KinesisConnectorConfiguration.PROP_INITIAL_POSITION_IN_STREAM, initialPosition)
props.setProperty(KinesisConnectorConfiguration.PROP_S3_ENDPOINT, s3Endpoint)
props.setProperty(KinesisConnectorConfiguration.PROP_S3_BUCKET, bucket)
props.setProperty(KinesisConnectorConfiguration.PROP_BUFFER_BYTE_SIZE_LIMIT, byteLimit)
props.setProperty(KinesisConnectorConfiguration.PROP_BUFFER_RECORD_COUNT_LIMIT, recordLimit)
props.setProperty(KinesisConnectorConfiguration.PROP_BUFFER_MILLISECONDS_LIMIT, timeLimit)
props.setProperty(KinesisConnectorConfiguration.PROP_CONNECTOR_DESTINATION, "s3")
// The emit method retries sending to S3 indefinitely, so it only needs to be called once
props.setProperty(KinesisConnectorConfiguration.PROP_RETRY_LIMIT, "1")
new KinesisConnectorConfiguration(props, credentials)
}
}
|
janeefting/snowplow
|
4-storage/kinesis-lzo-s3-sink/src/main/scala/com.snowplowanalytics.snowplow.storage.kinesis/s3/SinkApp.scala
|
Scala
|
apache-2.0
| 5,262 |
package com.edinhodzic.service.controller
import java.net.URI
import javax.ws.rs._
import javax.ws.rs.core.MediaType._
import javax.ws.rs.core.Response
import javax.ws.rs.core.Response.Status.{NOT_FOUND, NO_CONTENT}
import com.edinhodzic.service.domain.Identifiable
import com.edinhodzic.service.repository.AbstractPartialCrudRepository
import org.slf4j.{Logger, LoggerFactory}
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
abstract class AbstractPartialRestController[T <: Identifiable : Manifest](abstractCrudRepository: AbstractPartialCrudRepository[T]) {
protected val logger: Logger = LoggerFactory getLogger getClass
@POST
@Consumes(Array(APPLICATION_JSON))
@Produces(Array(APPLICATION_JSON))
def post(resource: T): Response = {
logger info s"posting $resource"
process[T](abstractCrudRepository create resource,
t => Response created uri(t) entity t build())
}
@GET
@Path("{resourceId}")
@Produces(Array(APPLICATION_JSON))
def get(@PathParam("resourceId") resourceId: String): Response = {
logger info s"getting $resourceId"
process[Option[T]](abstractCrudRepository read resourceId, {
case Some(resource) => Response ok() entity resource build()
case None => notFound
})
}
@DELETE
@Path("{resourceId}")
def delete(@PathParam("resourceId") resourceId: String): Response = {
logger info s"deleting $resourceId"
process[Option[Unit]](abstractCrudRepository delete resourceId, {
case Some(resource) => noContent
case None => notFound
})
}
private def notFound: Response = Response status NOT_FOUND build()
protected def noContent: Response = Response status NO_CONTENT build()
protected def serverError: Response = Response serverError() build()
private def uri(resource: T)(implicit manifest: Manifest[T]): URI =
new URI(s"${manifest.runtimeClass.getSimpleName.toLowerCase}/${resource id}")
protected def process[S](repositoryFunction: (Try[S]), successFunction: (S => Response)): Response =
repositoryFunction match {
case Success(subject) => successFunction(subject)
case Failure(throwable) =>
logger error s"$throwable"
Response serverError() build()
}
}
|
edinhodzic/jersey-rest-service
|
src/main/scala/com/edinhodzic/service/controller/AbstractPartialRestController.scala
|
Scala
|
apache-2.0
| 2,247 |
package pio.refactor
import io.prediction.controller.IEngineFactory
import io.prediction.controller.Engine
import io.prediction.controller._
//import io.prediction.workflow.CoreWorkflow
import grizzled.slf4j.Logger
case class Query(q: Int) extends Serializable
case class PredictedResult(p: Int) extends Serializable
case class ActualResult() extends Serializable
object VanillaEngine extends IEngineFactory {
def apply() = {
new Engine(
classOf[DataSource],
//classOf[Preparator],
PIdentityPreparator(classOf[DataSource]),
Map("algo" -> classOf[Algorithm]),
classOf[Serving])
}
}
object Runner {
@transient lazy val logger = Logger[this.type]
def main(args: Array[String]) {
val engine = VanillaEngine()
val engineParams = EngineParams(
algorithmParamsList = Seq(("algo", AlgorithmParams(2)))
)
logger.error("Runner. before evaluation!!!")
val evaluator = new VanillaEvaluator()
logger.error("Runner before runEval!!!")
Workflow.runEval(
engine = engine,
engineParams = engineParams,
evaluator = evaluator,
evaluatorParams = EmptyParams())
}
}
|
ch33hau/PredictionIO
|
examples/experimental/scala-refactor-test/src/main/scala/Engine.scala
|
Scala
|
apache-2.0
| 1,162 |
package net.mentalarray.doozie.Tasks
/**
* Created by kdivincenzo on 1/5/2015.
*/
abstract class AbstractCollectionTask(name: String) extends WorkflowTask(name) with Traversable[WorkflowTask] {
private var _tasks: TasksSequence = new TasksSequence
private var _continueOnError: Boolean = false
def addTask(task: WorkflowTask) = _tasks += task
def continueOnError = _continueOnError
def continueOnError_=(value: Boolean) = _continueOnError = value
def length = _tasks.length
override def validate: Unit = {
if (_tasks.length == 0) throw new WorkflowStateException(this, "No tasks in collection.")
}
override def foreach[U](f: (WorkflowTask) => U): Unit = _tasks.foreach(f)
}
|
antagonist112358/tomahawk
|
workflow-engine/src/net/mentalarray/doozie/Tasks/AbstractCollectionTask.scala
|
Scala
|
apache-2.0
| 705 |
import language.experimental.macros
import scala.reflect.macros.blackbox.Context
object Macro {
def apply(a: Any): Any = macro impl
def impl(c: Context)(a: c.Tree): c.Tree = {
import c.universe._
q"{$a; true}"
}
}
|
AlexSikia/dotty
|
tests/pending/pos/t8064b/Macro_1.scala
|
Scala
|
bsd-3-clause
| 230 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.