code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.github.tminglei.slickpg
import org.scalatest.FunSuite
import slick.jdbc.GetResult
import scala.concurrent.Await
import scala.concurrent.duration._
class PgJsonSupportSuite extends FunSuite {
import MyPostgresDriver.api._
val db = Database.forURL(url = dbUrl, driver = "org.postgresql.Driver")
case class JsonBean(id: Long, json: JsonString)
class JsonTestTable(tag: Tag) extends Table[JsonBean](tag, "JsonTest0") {
def id = column[Long]("id", O.AutoInc, O.PrimaryKey)
def json = column[JsonString]("json", O.Default(JsonString(""" {"a":"v1","b":2} """)))
def * = (id, json) <> (JsonBean.tupled, JsonBean.unapply)
}
val JsonTests = TableQuery[JsonTestTable]
//------------------------------------------------------------------------------
val testRec1 = JsonBean(33L, JsonString(""" { "a":101, "b":"aaa", "c":[3,4,5,9] } """))
val testRec2 = JsonBean(35L, JsonString(""" [ {"a":"v1","b":2}, {"a":"v5","b":3} ] """))
val testRec3 = JsonBean(37L, JsonString(""" ["a", "b"] """))
test("Json Lifted support") {
val json1 = """{"a":"v1","b":2}"""
val json2 = """{"a":"v5","b":3}"""
Await.result(db.run(
DBIO.seq(
JsonTests.schema create,
///
JsonTests forceInsertAll List(testRec1, testRec2, testRec3)
).andThen(
DBIO.seq(
JsonTests.filter(_.id === testRec2.id.bind).map(_.json).result.head.map(
r => assert("""[{"a":"v1","b":2},{"a":"v5","b":3}]""" === r.value.replace(" ", ""))
),
// ->>/->
JsonTests.filter(_.json.+>>("a") === "101".bind).map(_.json.+>>("c")).result.head.map(
r => assert("[3,4,5,9]" === r.replace(" ", ""))
),
JsonTests.filter(_.json.+>>("a") === "101".bind).map(_.json.+>("c")).result.head.map(
r => assert("[3,4,5,9]" === r.value.replace(" ", ""))
),
JsonTests.filter(_.id === testRec2.id).map(_.json.~>>(1)).result.head.map(
r => assert("""{"a":"v5","b":3}""" === r.replace(" ", ""))
),
JsonTests.filter(_.id === testRec2.id).map(_.json.~>(1)).result.head.map(
r => assert(json2 === r.value.replace(" ", ""))
),
// #>>/#>
JsonTests.filter(_.id === testRec1.id).map(_.json.#>(List("c"))).result.head.map(
r => assert("[3,4,5,9]" === r.value.replace(" ", ""))
),
JsonTests.filter(_.json.#>>(List("a")) === "101").result.head.map(
r => assert(testRec1.json.value.replace(" ", "") === r.json.value.replace(" ", ""))
),
// {}_array_length
JsonTests.filter(_.id === testRec2.id).map(_.json.arrayLength).result.head.map(
r => assert(2 === r)
),
// {}_array_elements
JsonTests.filter(_.id === testRec2.id).map(_.json.arrayElements).to[List].result.map(
r => assert(List(json1, json2) === r.map(_.value.replace(" ", "")))
),
JsonTests.filter(_.id === testRec2.id).map(_.json.arrayElements).result.head.map(
r => assert(json1 === r.value.replace(" ", ""))
),
// {}_array_elements_text
JsonTests.filter(_.id === testRec2.id).map(_.json.arrayElementsText).result.head.map(
r => assert(json1 === r.replace(" ", ""))
),
// {}_object_keys
JsonTests.filter(_.id === testRec1.id).map(_.json.objectKeys).to[List].result.map(
r => assert(List("a","b","c") === r)
),
JsonTests.filter(_.id === testRec1.id).map(_.json.objectKeys).result.head.map(
r => assert("a" === r)
),
// @>
JsonTests.filter(_.json @> JsonString(""" {"b":"aaa"} """).bind).map(_.id).result.head.map(
r => assert(33L === r)
),
// <@
JsonTests.filter(JsonString(""" {"b":"aaa"} """).bind <@: _.json).map(_.id).result.head.map(
r => assert(33L === r)
),
// {}_typeof
JsonTests.filter(_.id === testRec1.id).map(_.json.+>("a").jsonType).result.head.map(
r => assert("number" === r.toLowerCase)
),
// ?
JsonTests.filter(_.json ?? "b".bind).map(_.json).to[List].result.map(
r => assert(List(testRec1, testRec3).map(_.json.value.replace(" ", "")) === r.map(_.value.replace(" ", "")))
),
// ?|
JsonTests.filter(_.json ?| List("a", "c").bind).map(_.json).to[List].result.map(
r => assert(List(testRec1, testRec3).map(_.json.value.replace(" ", "")) === r.map(_.value.replace(" ", "")))
),
// ?&
JsonTests.filter(_.json ?& List("a", "c").bind).map(_.json).to[List].result.map(
r => assert(List(testRec1).map(_.json.value.replace(" ", "")) === r.map(_.value.replace(" ", "")))
)
)
).andFinally(
JsonTests.schema drop
).transactionally
), Duration.Inf)
}
//------------------------------------------------------------------------------
test("Json Plain SQL support") {
import MyPostgresDriver.plainAPI._
implicit val getJsonBeanResult = GetResult(r => JsonBean(r.nextLong(), r.nextJson()))
val b = JsonBean(34L, JsonString(""" { "a":101, "b":"aaa", "c":[3,4,5,9] } """))
Await.result(db.run(
DBIO.seq(
sqlu"""create table JsonTest0(
id int8 not null primary key,
json #${MyPostgresDriver.pgjson} not null)
""",
///
sqlu""" insert into JsonTest0 values(${b.id}, ${b.json}) """,
sql""" select * from JsonTest0 where id = ${b.id} """.as[JsonBean].head.map(
r => assert(b.json.value.replace(" ", "") === r.json.value.replace(" ", ""))
),
///
sqlu"drop table if exists JsonTest0 cascade"
).transactionally
), Duration.Inf)
}
}
|
bearrito/slick-pg
|
src/test/scala/com/github/tminglei/slickpg/PgJsonSupportSuite.scala
|
Scala
|
bsd-2-clause
| 5,898 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.words
import org.scalatest.matchers._
import org.scalactic._
import scala.util.matching.Regex
import org.scalatest.Resources
import org.scalatest.UnquotedString
import org.scalatest.MatchersHelper.startWithRegexWithGroups
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="../Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class StartWithWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* "1.7b" should (startWith ("1.7") and startWith ("1.7b"))
* ^
* </pre>
*/
def apply(right: String): Matcher[String] =
new Matcher[String] {
def apply(left: String): MatchResult =
MatchResult(
left startsWith right,
Resources("didNotStartWith"),
Resources("startedWith"),
Vector(left, right)
)
override def toString: String = "startWith (" + Prettifier.default(right) + ")"
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* val decimal = """(-)?(\\d+)(\\.\\d*)?"""
* "1.7b" should (startWith regex (decimal) and startWith regex (decimal))
* ^
* </pre>
*/
def regex[T <: String](right: T): Matcher[T] = regex(right.r)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should not { startWith regex ("a(b*)c" withGroup "bb") }
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups) =
new Matcher[String] {
def apply(left: String): MatchResult =
startWithRegexWithGroups(left, regexWithGroups.regex, regexWithGroups.groups)
override def toString: String = "startWith regex " + Prettifier.default(regexWithGroups)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* val decimalRegex = """(-)?(\\d+)(\\.\\d*)?""".r
* "1.7" should (startWith regex (decimalRegex) and startWith regex (decimalRegex))
* ^
* </pre>
*/
def regex(rightRegex: Regex): Matcher[String] =
new Matcher[String] {
def apply(left: String): MatchResult =
MatchResult(
rightRegex.pattern.matcher(left).lookingAt,
Resources("didNotStartWithRegex"),
Resources("startedWithRegex"),
Vector(left, UnquotedString(rightRegex.toString))
)
override def toString: String = "startWith regex " + Prettifier.default(rightRegex)
}
/**
* Overrides toString to return "startWith"
*/
override def toString: String = "startWith"
}
|
travisbrown/scalatest
|
src/main/scala/org/scalatest/words/StartWithWord.scala
|
Scala
|
apache-2.0
| 3,354 |
/**
* MIT License
*
* Copyright (c) 2016-2018 James Sherwood-Jones <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.jsherz.luskydive.dao
import java.sql.Date
import java.util.UUID
import com.jsherz.luskydive.core.{Course, CourseWithOrganisers}
import com.jsherz.luskydive.json.CoursesJsonSupport._
import com.jsherz.luskydive.json.{CourseSpaceWithMember, CourseWithNumSpaces}
import com.jsherz.luskydive.util.Util
import scalaz.{-\\/, \\/, \\/-}
import scala.concurrent.Future
/**
* A [[CourseDao]] that returns canned responses.
*/
class StubCourseDao extends CourseDao {
/**
* Try and find a course with the given UUID.
*
* @param uuid
* @return
*/
override def get(uuid: UUID): Future[Option[CourseWithOrganisers]] = {
if (StubCourseDao.validCourseUuid.equals(uuid)) {
Future.successful(Some(StubCourseDao.validCourse))
} else if (StubCourseDao.notFoundCourseUuid.equals(uuid)) {
Future.successful(None)
} else {
throw new RuntimeException(s"unknown uuid $uuid used with stub")
}
}
/**
* Attempt to find courses within the given dates (inclusive).
*
* @param startDate
* @param endDate
* @return
*/
override def find(startDate: Date, endDate: Date): Future[Seq[CourseWithNumSpaces]] = {
Future.successful(StubCourseDao.coursesWithNumSpaces)
}
/**
* Get the space(s) (if any) on a course.
*
* @param uuid
* @return
*/
override def spaces(uuid: UUID): Future[Seq[CourseSpaceWithMember]] = {
if (StubCourseDao.validCourseUuid.equals(uuid)) {
Future.successful(StubCourseDao.validCourseSpaces)
} else if (StubCourseDao.notFoundCourseUuid.equals(uuid)) {
Future.successful(Seq())
} else {
throw new RuntimeException(s"unknown uuid $uuid used with stub")
}
}
/**
* Create a course on the given date and add numSpaces to it.
*
* @param course
* @param numSpaces
* @return
*/
override def create(course: Course, numSpaces: Int): Future[String \\/ UUID] = {
Future.successful {
if (numSpaces >= CourseSpaceDaoImpl.MIN_SPACES && numSpaces <= CourseSpaceDaoImpl.MAX_SPACES) {
\\/-(course.uuid)
} else {
-\\/("error.invalidNumSpaces")
}
}
}
}
object StubCourseDao {
val validCourse = Util.fixture[CourseWithOrganisers]("aaf47dc8.json")
val validCourseUuid = validCourse.course.uuid
val validCourseSpaces = Util.fixture[Seq[CourseSpaceWithMember]]("aaf47dc8.json")
val notFoundCourseUuid = UUID.fromString("f309d4ca-c8b2-44ac-8380-678bb7bcc3cb")
val courses: Seq[Course] = Seq()
val coursesWithNumSpaces: Seq[CourseWithNumSpaces] = Seq()
}
|
jSherz/lsd-members
|
backend/src/test/scala/com/jsherz/luskydive/dao/StubCourseDao.scala
|
Scala
|
mit
| 3,783 |
package com.github.sparkfy.rpc
import com.github.sparkfy.{SparkfyException, Logging}
import com.github.sparkfy.util.RpcUtils
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* A reference for a remote [[RpcEndpoint]]. [[RpcEndpointRef]] is thread-safe.
*/
abstract class RpcEndpointRef(conf: Map[String, String]) extends Serializable with Logging {
private[this] val maxRetries = RpcUtils.numRetries(conf)
private[this] val retryWaitMs = RpcUtils.retryWaitMs(conf)
private[this] val defaultAskTimeout = RpcUtils.askRpcTimeout(conf)
/**
* return the address for the [[RpcEndpointRef]]
*/
def address: RpcAddress
def name: String
/**
* Sends a one-way asynchronous message. Fire-and-forget semantics.
*/
def send(message: Any): Unit
/**
* Send a message to the corresponding [[RpcEndpoint.receiveAndReply)]] and return a [[Future]] to
* receive the reply within the specified timeout.
*
* This method only sends the message once and never retries.
*/
def ask[T: ClassTag](message: Any, timeout: RpcTimeout): Future[T]
/**
* Send a message to the corresponding [[RpcEndpoint.receiveAndReply)]] and return a [[Future]] to
* receive the reply within a default timeout.
*
* This method only sends the message once and never retries.
*/
def ask[T: ClassTag](message: Any): Future[T] = ask(message, defaultAskTimeout)
/**
* Send a message to the corresponding [[RpcEndpoint]] and get its result within a default
* timeout, or throw a SparkException if this fails even after the default number of retries.
* The default `timeout` will be used in every trial of calling `sendWithReply`. Because this
* method retries, the message handling in the receiver side should be idempotent.
*
* Note: this is a blocking action which may cost a lot of time, so don't call it in an message
* loop of [[RpcEndpoint]].
*
* @param message the message to send
* @tparam T type of the reply message
* @return the reply message from the corresponding [[RpcEndpoint]]
*/
def askWithRetry[T: ClassTag](message: Any): T = askWithRetry(message, defaultAskTimeout)
/**
* Send a message to the corresponding [[RpcEndpoint.receive]] and get its result within a
* specified timeout, throw a SparkException if this fails even after the specified number of
* retries. `timeout` will be used in every trial of calling `sendWithReply`. Because this method
* retries, the message handling in the receiver side should be idempotent.
*
* Note: this is a blocking action which may cost a lot of time, so don't call it in an message
* loop of [[RpcEndpoint]].
*
* @param message the message to send
* @param timeout the timeout duration
* @tparam T type of the reply message
* @return the reply message from the corresponding [[RpcEndpoint]]
*/
def askWithRetry[T: ClassTag](message: Any, timeout: RpcTimeout): T = {
// TODO: Consider removing multiple attempts
var attempts = 0
var lastException: Exception = null
while (attempts < maxRetries) {
attempts += 1
try {
val future = ask[T](message, timeout)
val result = timeout.awaitResult(future)
if (result == null) {
throw new SparkfyException("RpcEndpoint returned null")
}
return result
} catch {
case ie: InterruptedException => throw ie
case e: Exception =>
lastException = e
logWarning(s"Error sending message [message = $message] in $attempts attempts", e)
}
if (attempts < maxRetries) {
Thread.sleep(retryWaitMs)
}
}
throw new SparkfyException(
s"Error sending message [message = $message]", lastException)
}
}
|
sparkfy/sparkfy
|
sparkfy-common/src/main/scala/com/github/sparkfy/rpc/RpcEndpointRef.scala
|
Scala
|
apache-2.0
| 3,774 |
package ui
import java.awt.event.{ActionEvent, ActionListener}
import javax.swing.{JComboBox, AbstractButton}
import scala.language.reflectiveCalls
/**
* Created by weijiayi on 2/29/16.
*/
object MySwing {
def addAction(jButton: AbstractButton, action: ()=>Unit): Unit ={
jButton.addActionListener(new ActionListener {
override def actionPerformed(e: ActionEvent): Unit = action()
})
}
type ActionSource = { def addActionListener(l: ActionListener): Unit}
def reactAction(source: ActionSource)(action: => Unit): Unit ={
source.addActionListener(new ActionListener {
override def actionPerformed(e: ActionEvent): Unit = action
})
}
// trait ActionSource{
// def howToAdd: ActionListener => Unit
//
// def reactAction(action: =>Unit): Unit ={
// howToAdd(new ActionListener {
// override def actionPerformed(e: ActionEvent): Unit = action
// })
// }
// }
//
// implicit class ButtonIsActionSource(implicit jButton: AbstractButton) extends ActionSource {
// override def howToAdd: (ActionListener) => Unit = jButton.addActionListener
// }
//
// implicit class JComboboxIsActionSource[T](implicit jCombo: JComboBox[T]) extends ActionSource {
// override def howToAdd: (ActionListener) => Unit = jCombo.addActionListener
// }
}
|
MrVPlussOne/Muse-CGH
|
src/ui/MySwing.scala
|
Scala
|
mit
| 1,309 |
package com.marmoush.scalasamples.concepts
object Linearization {
trait Furry extends Animal
trait HasLegs extends Animal
trait FourLegged extends HasLegs
class Animal
class Cat extends Animal with Furry with FourLegged
// Animal, AnyRef, Any
// Furry, Animal, AnyRef, Any
// FourLegged, HasLegs, Animal, AnyRef, Any
// HasLegs, Animal, AnyRef, Any
// Cat, FourLegged, HasLegs, Furry, Animal, AnyRef, Any
}
|
IsmailMarmoush/scala-samples
|
scalalang/src/main/scala/com/marmoush/scalasamples/concepts/Linearization.scala
|
Scala
|
agpl-3.0
| 434 |
package ems.util
import java.net.{URLDecoder, URI, URLEncoder}
import scala.Option
case class URIBuilder(scheme: Option[String], host: Option[String], port: Option[Int], path: List[Segment], params: Map[String, Seq[String]], pathEndsWithSlash: Boolean = false) {
def withScheme(scheme: String) = copy(scheme = Some(scheme))
def withHost(host: String) = copy(host = Some(host))
def withPort(port: Int) = copy(port = Some(port))
def segments(segments: String*) = copy(path = path ::: segments.map(Segment(_)).toList)
def path(path: String): URIBuilder = {
val (segments, endsWithSlash) = URIBuilder.decodePath(path)
copy(path = this.path ::: segments, pathEndsWithSlash = endsWithSlash)
}
def replacePath(path: String): URIBuilder = {
val (segments, endsWithSlash) = URIBuilder.decodePath(path)
copy(path = segments, pathEndsWithSlash = endsWithSlash)
}
def emptyPath() = copy(path = Nil)
def emptyParams() = copy(params = Map.empty)
def queryParam(name: String, value: String): URIBuilder = queryParam(name, Some(value))
def queryParam(name: String, value: Option[String]): URIBuilder = {
//TODO: Maybe the None case should remove all values?
val values = params.get(name).getOrElse(Nil) ++ value.toList
if (value.isEmpty) this else copy(params = params + (name -> values))
}
def replaceSegments(segments: Segment*) = copy(path = segments.toList)
def replaceQueryParam(name: String, value:String) = copy(params = params + (name -> List(value)))
def build() = {
def mkParamString() = {
params.map{case (k, v) => v.map(i => "%s=%s".format(k, i)).mkString("&")}.mkString("&")
}
val par = if (params.isEmpty) None else Some(mkParamString()).filterNot(_.isEmpty)
new URI(
scheme.getOrElse(null),
null,
host.getOrElse(null),
port.getOrElse(-1),
if (path.isEmpty) null else path.map(_.encoded).mkString("/", "/", if (pathEndsWithSlash) "/" else ""),
par.getOrElse(null),
null
)
}
override def toString = build().toString
}
object URIBuilder {
val KeyValue = """(?i)(\w+)=(.*)?""".r
def apply(input: String): URIBuilder = apply(URI.create(input))
def apply(uri: URI): URIBuilder = {
val (path, endsWithSlash) = decodePath(uri.getPath)
def buildMap: (String) => Map[String, scala.List[String]] = s => {
val arr: Array[String] = s.split("&")
arr.foldLeft(Map[String, List[String]]()) {
case (m, part) => part match {
case KeyValue(k, "") => m + (k -> m.get(k).getOrElse(Nil))
case KeyValue(k, v) => m + (k -> (m.get(k).getOrElse(Nil) ++ List(v)))
}
}
}
val params = Option(uri.getQuery).map(buildMap).getOrElse(Map[String, Seq[String]]())
new URIBuilder(Option(uri.getScheme), Option(uri.getHost), Option(uri.getPort).filterNot(_ == -1), path, params, endsWithSlash)
}
def fromPath(path: String): URIBuilder = {
empty.path(path)
}
private def decodePath(path: String): (List[Segment], Boolean) = {
Option(path).filterNot(_.trim.isEmpty).map{ p =>
if(p.startsWith("/")) p.substring(1) else p
}.map(p => p.split("/").map(Segment.decoded(_)).toList -> p.endsWith("/")).getOrElse(Nil -> false)
}
def empty = new URIBuilder(None, None, None, Nil, Map())
}
case class Segment(seg: String) {
def encoded = URLEncoder.encode(seg, "UTF-8")
}
object Segment {
def decoded(seg: String) = Segment(URLDecoder.decode(seg, "UTF-8"))
}
|
chrissearle/ems-redux
|
src/main/scala/ems/util/URIBuilder.scala
|
Scala
|
apache-2.0
| 3,487 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.expressions.scalalang.typed
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{BooleanType, IntegerType, StringType, StructType}
object ComplexResultAgg extends Aggregator[(String, Int), (Long, Long), (Long, Long)] {
override def zero: (Long, Long) = (0, 0)
override def reduce(countAndSum: (Long, Long), input: (String, Int)): (Long, Long) = {
(countAndSum._1 + 1, countAndSum._2 + input._2)
}
override def merge(b1: (Long, Long), b2: (Long, Long)): (Long, Long) = {
(b1._1 + b2._1, b1._2 + b2._2)
}
override def finish(reduction: (Long, Long)): (Long, Long) = reduction
override def bufferEncoder: Encoder[(Long, Long)] = Encoders.product[(Long, Long)]
override def outputEncoder: Encoder[(Long, Long)] = Encoders.product[(Long, Long)]
}
case class AggData(a: Int, b: String)
object ClassInputAgg extends Aggregator[AggData, Int, Int] {
override def zero: Int = 0
override def reduce(b: Int, a: AggData): Int = b + a.a
override def finish(reduction: Int): Int = reduction
override def merge(b1: Int, b2: Int): Int = b1 + b2
override def bufferEncoder: Encoder[Int] = Encoders.scalaInt
override def outputEncoder: Encoder[Int] = Encoders.scalaInt
}
object ClassBufferAggregator extends Aggregator[AggData, AggData, Int] {
override def zero: AggData = AggData(0, "")
override def reduce(b: AggData, a: AggData): AggData = AggData(b.a + a.a, "")
override def finish(reduction: AggData): Int = reduction.a
override def merge(b1: AggData, b2: AggData): AggData = AggData(b1.a + b2.a, "")
override def bufferEncoder: Encoder[AggData] = Encoders.product[AggData]
override def outputEncoder: Encoder[Int] = Encoders.scalaInt
}
object ComplexBufferAgg extends Aggregator[AggData, (Int, AggData), Int] {
override def zero: (Int, AggData) = 0 -> AggData(0, "0")
override def reduce(b: (Int, AggData), a: AggData): (Int, AggData) = (b._1 + 1, a)
override def finish(reduction: (Int, AggData)): Int = reduction._1
override def merge(b1: (Int, AggData), b2: (Int, AggData)): (Int, AggData) =
(b1._1 + b2._1, b1._2)
override def bufferEncoder: Encoder[(Int, AggData)] = Encoders.product[(Int, AggData)]
override def outputEncoder: Encoder[Int] = Encoders.scalaInt
}
object MapTypeBufferAgg extends Aggregator[Int, Map[Int, Int], Int] {
override def zero: Map[Int, Int] = Map.empty
override def reduce(b: Map[Int, Int], a: Int): Map[Int, Int] = b
override def finish(reduction: Map[Int, Int]): Int = 1
override def merge(b1: Map[Int, Int], b2: Map[Int, Int]): Map[Int, Int] = b1
override def bufferEncoder: Encoder[Map[Int, Int]] = ExpressionEncoder()
override def outputEncoder: Encoder[Int] = ExpressionEncoder()
}
object NameAgg extends Aggregator[AggData, String, String] {
def zero: String = ""
def reduce(b: String, a: AggData): String = a.b + b
def merge(b1: String, b2: String): String = b1 + b2
def finish(r: String): String = r
override def bufferEncoder: Encoder[String] = Encoders.STRING
override def outputEncoder: Encoder[String] = Encoders.STRING
}
object SeqAgg extends Aggregator[AggData, Seq[Int], Seq[(Int, Int)]] {
def zero: Seq[Int] = Nil
def reduce(b: Seq[Int], a: AggData): Seq[Int] = a.a +: b
def merge(b1: Seq[Int], b2: Seq[Int]): Seq[Int] = b1 ++ b2
def finish(r: Seq[Int]): Seq[(Int, Int)] = r.map(i => i -> i)
override def bufferEncoder: Encoder[Seq[Int]] = ExpressionEncoder()
override def outputEncoder: Encoder[Seq[(Int, Int)]] = ExpressionEncoder()
}
class ParameterizedTypeSum[IN, OUT : Numeric : Encoder](f: IN => OUT)
extends Aggregator[IN, OUT, OUT] {
private val numeric = implicitly[Numeric[OUT]]
override def zero: OUT = numeric.zero
override def reduce(b: OUT, a: IN): OUT = numeric.plus(b, f(a))
override def merge(b1: OUT, b2: OUT): OUT = numeric.plus(b1, b2)
override def finish(reduction: OUT): OUT = reduction
override def bufferEncoder: Encoder[OUT] = implicitly[Encoder[OUT]]
override def outputEncoder: Encoder[OUT] = implicitly[Encoder[OUT]]
}
object RowAgg extends Aggregator[Row, Int, Int] {
def zero: Int = 0
def reduce(b: Int, a: Row): Int = a.getInt(0) + b
def merge(b1: Int, b2: Int): Int = b1 + b2
def finish(r: Int): Int = r
override def bufferEncoder: Encoder[Int] = Encoders.scalaInt
override def outputEncoder: Encoder[Int] = Encoders.scalaInt
}
object NullResultAgg extends Aggregator[AggData, AggData, AggData] {
override def zero: AggData = AggData(0, "")
override def reduce(b: AggData, a: AggData): AggData = AggData(b.a + a.a, b.b + a.b)
override def finish(reduction: AggData): AggData = {
if (reduction.a % 2 == 0) null else reduction
}
override def merge(b1: AggData, b2: AggData): AggData = AggData(b1.a + b2.a, b1.b + b2.b)
override def bufferEncoder: Encoder[AggData] = Encoders.product[AggData]
override def outputEncoder: Encoder[AggData] = Encoders.product[AggData]
}
case class ComplexAggData(d1: AggData, d2: AggData)
object VeryComplexResultAgg extends Aggregator[Row, String, ComplexAggData] {
override def zero: String = ""
override def reduce(buffer: String, input: Row): String = buffer + input.getString(1)
override def merge(b1: String, b2: String): String = b1 + b2
override def finish(reduction: String): ComplexAggData = {
ComplexAggData(AggData(reduction.length, reduction), AggData(reduction.length, reduction))
}
override def bufferEncoder: Encoder[String] = Encoders.STRING
override def outputEncoder: Encoder[ComplexAggData] = Encoders.product[ComplexAggData]
}
case class OptionBooleanData(name: String, isGood: Option[Boolean])
case class OptionBooleanIntData(name: String, isGood: Option[(Boolean, Int)])
case class OptionBooleanAggregator(colName: String)
extends Aggregator[Row, Option[Boolean], Option[Boolean]] {
override def zero: Option[Boolean] = None
override def reduce(buffer: Option[Boolean], row: Row): Option[Boolean] = {
val index = row.fieldIndex(colName)
val value = if (row.isNullAt(index)) {
Option.empty[Boolean]
} else {
Some(row.getBoolean(index))
}
merge(buffer, value)
}
override def merge(b1: Option[Boolean], b2: Option[Boolean]): Option[Boolean] = {
if ((b1.isDefined && b1.get) || (b2.isDefined && b2.get)) {
Some(true)
} else if (b1.isDefined) {
b1
} else {
b2
}
}
override def finish(reduction: Option[Boolean]): Option[Boolean] = reduction
override def bufferEncoder: Encoder[Option[Boolean]] = OptionalBoolEncoder
override def outputEncoder: Encoder[Option[Boolean]] = OptionalBoolEncoder
def OptionalBoolEncoder: Encoder[Option[Boolean]] = ExpressionEncoder()
}
case class OptionBooleanIntAggregator(colName: String)
extends Aggregator[Row, Option[(Boolean, Int)], Option[(Boolean, Int)]] {
override def zero: Option[(Boolean, Int)] = None
override def reduce(buffer: Option[(Boolean, Int)], row: Row): Option[(Boolean, Int)] = {
val index = row.fieldIndex(colName)
val value = if (row.isNullAt(index)) {
Option.empty[(Boolean, Int)]
} else {
val nestedRow = row.getStruct(index)
Some((nestedRow.getBoolean(0), nestedRow.getInt(1)))
}
merge(buffer, value)
}
override def merge(
b1: Option[(Boolean, Int)],
b2: Option[(Boolean, Int)]): Option[(Boolean, Int)] = {
if ((b1.isDefined && b1.get._1) || (b2.isDefined && b2.get._1)) {
val newInt = b1.map(_._2).getOrElse(0) + b2.map(_._2).getOrElse(0)
Some((true, newInt))
} else if (b1.isDefined) {
b1
} else {
b2
}
}
override def finish(reduction: Option[(Boolean, Int)]): Option[(Boolean, Int)] = reduction
override def bufferEncoder: Encoder[Option[(Boolean, Int)]] = OptionalBoolIntEncoder
override def outputEncoder: Encoder[Option[(Boolean, Int)]] = OptionalBoolIntEncoder
def OptionalBoolIntEncoder: Encoder[Option[(Boolean, Int)]] = ExpressionEncoder()
}
class DatasetAggregatorSuite extends QueryTest with SharedSQLContext {
import testImplicits._
private implicit val ordering = Ordering.by((c: AggData) => c.a -> c.b)
test("typed aggregation: TypedAggregator") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDataset(
ds.groupByKey(_._1).agg(typed.sum(_._2)),
("a", 30.0), ("b", 3.0), ("c", 1.0))
}
test("typed aggregation: TypedAggregator, expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDataset(
ds.groupByKey(_._1).agg(
typed.sum(_._2),
expr("sum(_2)").as[Long],
count("*")),
("a", 30.0, 30L, 2L), ("b", 3.0, 3L, 2L), ("c", 1.0, 1L, 1L))
}
test("typed aggregation: complex result type") {
val ds = Seq("a" -> 1, "a" -> 3, "b" -> 3).toDS()
checkDataset(
ds.groupByKey(_._1).agg(
expr("avg(_2)").as[Double],
ComplexResultAgg.toColumn),
("a", 2.0, (2L, 4L)), ("b", 3.0, (1L, 3L)))
}
test("typed aggregation: in project list") {
val ds = Seq(1, 3, 2, 5).toDS()
checkDataset(
ds.select(typed.sum((i: Int) => i)),
11.0)
checkDataset(
ds.select(typed.sum((i: Int) => i), typed.sum((i: Int) => i * 2)),
11.0 -> 22.0)
}
test("typed aggregation: class input") {
val ds = Seq(AggData(1, "one"), AggData(2, "two")).toDS()
checkDataset(
ds.select(ClassInputAgg.toColumn),
3)
}
test("typed aggregation: class input with reordering") {
val ds = sql("SELECT 'one' AS b, 1 as a").as[AggData]
checkDataset(
ds.select(ClassInputAgg.toColumn),
1)
checkDataset(
ds.select(expr("avg(a)").as[Double], ClassInputAgg.toColumn),
(1.0, 1))
checkDataset(
ds.groupByKey(_.b).agg(ClassInputAgg.toColumn),
("one", 1))
}
test("Typed aggregation using aggregator") {
// based on Dataset complex Aggregator test of DatasetBenchmark
val ds = Seq(AggData(1, "x"), AggData(2, "y"), AggData(3, "z")).toDS()
checkDataset(
ds.select(ClassBufferAggregator.toColumn),
6)
}
test("typed aggregation: complex input") {
val ds = Seq(AggData(1, "one"), AggData(2, "two")).toDS()
checkDataset(
ds.select(ComplexBufferAgg.toColumn),
2
)
checkDataset(
ds.select(expr("avg(a)").as[Double], ComplexBufferAgg.toColumn),
(1.5, 2))
checkDatasetUnorderly(
ds.groupByKey(_.b).agg(ComplexBufferAgg.toColumn),
("one", 1), ("two", 1))
}
test("typed aggregate: avg, count, sum") {
val ds = Seq("a" -> 1, "a" -> 3, "b" -> 3).toDS()
checkDataset(
ds.groupByKey(_._1).agg(
typed.avg(_._2), typed.count(_._2), typed.sum(_._2), typed.sumLong(_._2)),
("a", 2.0, 2L, 4.0, 4L), ("b", 3.0, 1L, 3.0, 3L))
}
test("generic typed sum") {
val ds = Seq("a" -> 1, "a" -> 3, "b" -> 3).toDS()
checkDataset(
ds.groupByKey(_._1)
.agg(new ParameterizedTypeSum[(String, Int), Double](_._2.toDouble).toColumn),
("a", 4.0), ("b", 3.0))
checkDataset(
ds.groupByKey(_._1)
.agg(new ParameterizedTypeSum((x: (String, Int)) => x._2.toInt).toColumn),
("a", 4), ("b", 3))
}
test("SPARK-12555 - result should not be corrupted after input columns are reordered") {
val ds = sql("SELECT 'Some String' AS b, 1279869254 AS a").as[AggData]
checkDataset(
ds.groupByKey(_.a).agg(NameAgg.toColumn),
(1279869254, "Some String"))
}
test("aggregator in DataFrame/Dataset[Row]") {
val df = Seq(1 -> "a", 2 -> "b", 3 -> "b").toDF("i", "j")
checkAnswer(df.groupBy($"j").agg(RowAgg.toColumn), Row("a", 1) :: Row("b", 5) :: Nil)
}
test("SPARK-14675: ClassFormatError when use Seq as Aggregator buffer type") {
val ds = Seq(AggData(1, "a"), AggData(2, "a")).toDS()
checkDataset(
ds.groupByKey(_.b).agg(SeqAgg.toColumn),
"a" -> Seq(1 -> 1, 2 -> 2)
)
}
test("spark-15051 alias of aggregator in DataFrame/Dataset[Row]") {
val df1 = Seq(1 -> "a", 2 -> "b", 3 -> "b").toDF("i", "j")
checkAnswer(df1.agg(RowAgg.toColumn as "b"), Row(6) :: Nil)
val df2 = Seq(1 -> "a", 2 -> "b", 3 -> "b").toDF("i", "j")
checkAnswer(df2.agg(RowAgg.toColumn as "b").select("b"), Row(6) :: Nil)
}
test("spark-15114 shorter system generated alias names") {
val ds = Seq(1, 3, 2, 5).toDS()
assert(ds.select(typed.sum((i: Int) => i)).columns.head === "TypedSumDouble(int)")
val ds2 = ds.select(typed.sum((i: Int) => i), typed.avg((i: Int) => i))
assert(ds2.columns.head === "TypedSumDouble(int)")
assert(ds2.columns.last === "TypedAverage(int)")
val df = Seq(1 -> "a", 2 -> "b", 3 -> "b").toDF("i", "j")
assert(df.groupBy($"j").agg(RowAgg.toColumn).columns.last ==
"RowAgg(org.apache.spark.sql.Row)")
assert(df.groupBy($"j").agg(RowAgg.toColumn as "agg1").columns.last == "agg1")
}
test("SPARK-15814 Aggregator can return null result") {
val ds = Seq(AggData(1, "one"), AggData(2, "two")).toDS()
checkDatasetUnorderly(
ds.groupByKey(_.a).agg(NullResultAgg.toColumn),
1 -> AggData(1, "one"), 2 -> null)
}
test("SPARK-16100: use Map as the buffer type of Aggregator") {
val ds = Seq(1, 2, 3).toDS()
checkDataset(ds.select(MapTypeBufferAgg.toColumn), 1)
}
test("SPARK-15204 improve nullability inference for Aggregator") {
val ds1 = Seq(1, 3, 2, 5).toDS()
assert(ds1.select(typed.sum((i: Int) => i)).schema.head.nullable === false)
val ds2 = Seq(AggData(1, "a"), AggData(2, "a")).toDS()
assert(ds2.select(SeqAgg.toColumn).schema.head.nullable === true)
val ds3 = sql("SELECT 'Some String' AS b, 1279869254 AS a").as[AggData]
assert(ds3.select(NameAgg.toColumn).schema.head.nullable === true)
}
test("SPARK-18147: very complex aggregator result type") {
val df = Seq(1 -> "a", 2 -> "b", 2 -> "c").toDF("i", "j")
checkAnswer(
df.groupBy($"i").agg(VeryComplexResultAgg.toColumn),
Row(1, Row(Row(1, "a"), Row(1, "a"))) :: Row(2, Row(Row(2, "bc"), Row(2, "bc"))) :: Nil)
}
test("SPARK-24569: Aggregator with output type Option[Boolean] creates column of type Row") {
val df = Seq(
OptionBooleanData("bob", Some(true)),
OptionBooleanData("bob", Some(false)),
OptionBooleanData("bob", None)).toDF()
val group = df
.groupBy("name")
.agg(OptionBooleanAggregator("isGood").toColumn.alias("isGood"))
assert(df.schema == group.schema)
checkAnswer(group, Row("bob", true) :: Nil)
checkDataset(group.as[OptionBooleanData], OptionBooleanData("bob", Some(true)))
}
test("SPARK-24569: groupByKey with Aggregator of output type Option[Boolean]") {
val df = Seq(
OptionBooleanData("bob", Some(true)),
OptionBooleanData("bob", Some(false)),
OptionBooleanData("bob", None)).toDF()
val grouped = df.groupByKey((r: Row) => r.getString(0))
.agg(OptionBooleanAggregator("isGood").toColumn).toDF("name", "isGood")
assert(grouped.schema == df.schema)
checkDataset(grouped.as[OptionBooleanData], OptionBooleanData("bob", Some(true)))
}
test("SPARK-24762: Aggregator should be able to use Option of Product encoder") {
val df = Seq(
OptionBooleanIntData("bob", Some((true, 1))),
OptionBooleanIntData("bob", Some((false, 2))),
OptionBooleanIntData("bob", None)).toDF()
val group = df
.groupBy("name")
.agg(OptionBooleanIntAggregator("isGood").toColumn.alias("isGood"))
val expectedSchema = new StructType()
.add("name", StringType, nullable = true)
.add("isGood",
new StructType()
.add("_1", BooleanType, nullable = false)
.add("_2", IntegerType, nullable = false),
nullable = true)
assert(df.schema == expectedSchema)
assert(group.schema == expectedSchema)
checkAnswer(group, Row("bob", Row(true, 3)) :: Nil)
checkDataset(group.as[OptionBooleanIntData], OptionBooleanIntData("bob", Some((true, 3))))
}
}
|
WindCanDie/spark
|
sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala
|
Scala
|
apache-2.0
| 17,010 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.arrow.tools.stats
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.arrow.data.ArrowDataStore
import org.locationtech.geomesa.arrow.tools.ArrowDataStoreCommand
import org.locationtech.geomesa.arrow.tools.ArrowDataStoreCommand.UrlParam
import org.locationtech.geomesa.arrow.tools.stats.ArrowStatsCountCommand.ArrowStatsCountParams
import org.locationtech.geomesa.tools.ProvidedTypeNameParam
import org.locationtech.geomesa.tools.stats.StatsCountCommand
import org.locationtech.geomesa.tools.stats.StatsCountCommand.StatsCountParams
class ArrowStatsCountCommand extends StatsCountCommand[ArrowDataStore] with ArrowDataStoreCommand {
override val params = new ArrowStatsCountParams
override def execute(): Unit = {
params.exact = true
super.execute()
}
}
object ArrowStatsCountCommand {
@Parameters(commandDescription = "Calculate feature counts in a GeoMesa feature type")
class ArrowStatsCountParams extends StatsCountParams with UrlParam with ProvidedTypeNameParam
}
|
locationtech/geomesa
|
geomesa-arrow/geomesa-arrow-tools/src/main/scala/org/locationtech/geomesa/arrow/tools/stats/ArrowStatsCountCommand.scala
|
Scala
|
apache-2.0
| 1,509 |
package io.iohk.ethereum.crypto
import akka.util.ByteString
import org.bouncycastle.crypto.BufferedBlockCipher
import org.bouncycastle.crypto.engines.AESEngine
import org.bouncycastle.crypto.modes.{CBCBlockCipher, SICBlockCipher}
import org.bouncycastle.crypto.paddings.{PKCS7Padding, PaddedBufferedBlockCipher}
import org.bouncycastle.crypto.params.{KeyParameter, ParametersWithIV}
import scala.util.Try
trait SymmetricCipher {
def encrypt(secret: ByteString, iv: ByteString, message: ByteString): ByteString =
process(true, secret, iv, message)
def decrypt(secret: ByteString, iv: ByteString, encrypted: ByteString): Option[ByteString] =
Try(process(false, secret, iv, encrypted)).toOption
protected def getCipher: BufferedBlockCipher
protected def process(forEncryption: Boolean, secret: ByteString, iv: ByteString, data: ByteString): ByteString = {
val cipher = getCipher
cipher.reset()
val params = new ParametersWithIV(new KeyParameter(secret.toArray), iv.toArray)
cipher.init(forEncryption, params)
val size = cipher.getOutputSize(data.size)
val output = Array.ofDim[Byte](size)
val offset = cipher.processBytes(data.toArray, 0, data.size, output, 0)
val len = cipher.doFinal(output, offset)
ByteString(output).take(offset + len)
}
}
object AES_CBC extends SymmetricCipher {
protected def getCipher =
new PaddedBufferedBlockCipher(new CBCBlockCipher(new AESEngine), new PKCS7Padding)
}
object AES_CTR extends SymmetricCipher {
protected def getCipher =
new BufferedBlockCipher(new SICBlockCipher(new AESEngine))
}
|
input-output-hk/etc-client
|
src/main/scala/io/iohk/ethereum/crypto/SymmetricCipher.scala
|
Scala
|
mit
| 1,598 |
package dotty.tools.scaladoc
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Success,Failure}
import org.scalajs.dom._
import org.scalajs.dom.ext._
import scala.scalajs.js.annotation.JSExportTopLevel
import org.scalajs.dom.ext.Ajax
import scala.scalajs.js
import scala.scalajs.js.JSON
import utils.HTML._
trait Versions extends js.Object:
def versions: js.Dictionary[String]
class DropdownHandler:
val KEY = "versions-json"
val UNDEFINED_VERSIONS = "undefined_versions"
private def addVersionsList(json: String) =
val ver = JSON.parse(json).asInstanceOf[Versions]
val ddc = document.getElementById("dropdown-content")
for (k, v) <- ver.versions do
var child = a(href := v)(k)
ddc.appendChild(child)
val arrow = span(cls := "ar")()
document.getElementById("dropdown-button").appendChild(arrow)
private def disableButton() =
val btn = document.getElementById("dropdown-button").asInstanceOf[html.Button]
btn.disabled = true
btn.classList.remove("dropdownbtnactive")
private def getURLContent(url: String): Future[String] = Ajax.get(url).map(_.responseText)
window.sessionStorage.getItem(KEY) match
case null => // If no key, returns null
js.typeOf(Globals.versionsDictionaryUrl) match
case "undefined" =>
window.sessionStorage.setItem(KEY, UNDEFINED_VERSIONS)
disableButton()
case _ =>
getURLContent(Globals.versionsDictionaryUrl).onComplete {
case Success(json: String) =>
window.sessionStorage.setItem(KEY, json)
addVersionsList(json)
case Failure(_) =>
window.sessionStorage.setItem(KEY, UNDEFINED_VERSIONS)
disableButton()
}
case value => value match
case UNDEFINED_VERSIONS =>
disableButton()
case json =>
addVersionsList(json)
document.addEventListener("click", (e: Event) => {
document.getElementById("dropdown-content").classList.remove("show")
document.getElementById("dropdown-button").classList.remove("expanded")
})
document.getElementById("version").asInstanceOf[html.Span].addEventListener("click", (e: Event) => e.stopPropagation())
end DropdownHandler
@JSExportTopLevel("dropdownHandler")
def dropdownHandler() =
if document.getElementById("dropdown-content").getElementsByTagName("a").size > 0 &&
window.getSelection.toString.length == 0 then
document.getElementById("dropdown-content").classList.toggle("show")
document.getElementById("dropdown-button").classList.toggle("expanded")
document.getElementById("dropdown-input").asInstanceOf[html.Input].focus()
@JSExportTopLevel("filterFunction")
def filterFunction() =
val input = document.getElementById("dropdown-input").asInstanceOf[html.Input]
val filter = input.value.toUpperCase
val div = document.getElementById("dropdown-content")
val as = div.getElementsByTagName("a")
as.foreach { a =>
val txtValue = a.innerText
val cl = a.asInstanceOf[html.Anchor].classList
if txtValue.toUpperCase.indexOf(filter) > -1 then
cl.remove("filtered")
else
cl.add("filtered")
}
|
lampepfl/dotty
|
scaladoc-js/main/src/versions-dropdown/DropdownHandler.scala
|
Scala
|
apache-2.0
| 3,216 |
package com.gwf.datalake.util
import java.io.StringWriter
import java.io.PrintWriter
object ScalaUtil {
def getStackTraceAsStr(t: Throwable): String = {
val sw = new StringWriter
t.printStackTrace(new PrintWriter(sw))
sw.toString()
}
}
|
k-ayada/SparkETL
|
pub/ayada/scala/sparkUtils/cmn/ScalaUtil.scala
|
Scala
|
apache-2.0
| 254 |
package mesosphere.mesos
import mesosphere.marathon.Protos.Constraint
import mesosphere.marathon.Protos.Constraint.Operator
import mesosphere.marathon.state.DiskSource
import mesosphere.mesos.ResourceUtil.RichResource
import org.apache.mesos.Protos.Resource
object VolumeConstraints {
class VolumeConstraintsMatcher(diskResource: Resource, constraint: Constraint) {
val field = constraint.getField
val value = constraint.getValue
def isMatch: Boolean = {
if (field == "path") {
checkPath
} else {
false
}
}
val diskSource = DiskSource.fromMesos(diskResource.diskSourceOption)
private def getPath: String =
diskSource.path.getOrElse("")
private def checkPath: Boolean =
constraint.getOperator match {
case Operator.LIKE => getPath.matches(value)
case Operator.UNLIKE => !getPath.matches(value)
case _ => false
}
}
def meetsConstraint(diskResource: Resource, constraint: Constraint): Boolean = {
new VolumeConstraintsMatcher(diskResource: Resource, constraint: Constraint).isMatch
}
def meetsAllConstraints(diskResource: Resource, constraints: Set[Constraint]): Boolean = {
constraints.forall(meetsConstraint(diskResource, _))
}
}
|
timcharper/marathon
|
src/main/scala/mesosphere/mesos/VolumeConstraints.scala
|
Scala
|
apache-2.0
| 1,261 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio.ByteBuffer
import kafka.api.ApiUtils._
import kafka.utils.{SystemTime, Logging}
import kafka.network.{RequestChannel, BoundedByteBufferSend}
import kafka.common.{OffsetMetadata, OffsetAndMetadata, ErrorMapping, TopicAndPartition}
import kafka.network.RequestChannel.Response
import scala.collection._
object OffsetCommitRequest extends Logging {
val CurrentVersion: Short = 2
val DefaultClientId = ""
def readFrom(buffer: ByteBuffer): OffsetCommitRequest = {
// Read values from the envelope
val versionId = buffer.getShort
assert(versionId == 0 || versionId == 1 || versionId == 2,
"Version " + versionId + " is invalid for OffsetCommitRequest. Valid versions are 0, 1 or 2.")
val correlationId = buffer.getInt
val clientId = readShortString(buffer)
// Read the OffsetRequest
val consumerGroupId = readShortString(buffer)
// version 1 and 2 specific fields
val groupGenerationId: Int =
if (versionId >= 1)
buffer.getInt
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_GENERATION_ID
val consumerId: String =
if (versionId >= 1)
readShortString(buffer)
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_CONSUMER_ID
// version 2 specific fields
val retentionMs: Long =
if (versionId >= 2)
buffer.getLong
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_RETENTION_TIME
val topicCount = buffer.getInt
val pairs = (1 to topicCount).flatMap(_ => {
val topic = readShortString(buffer)
val partitionCount = buffer.getInt
(1 to partitionCount).map(_ => {
val partitionId = buffer.getInt
val offset = buffer.getLong
val timestamp = {
if (versionId <= 1)
buffer.getLong
else
org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_TIMESTAMP
}
val metadata = readShortString(buffer)
(TopicAndPartition(topic, partitionId), OffsetAndMetadata(offset, metadata, timestamp))
})
})
OffsetCommitRequest(consumerGroupId, immutable.Map(pairs:_*), versionId, correlationId, clientId, groupGenerationId, consumerId, retentionMs)
}
}
case class OffsetCommitRequest(groupId: String,
requestInfo: immutable.Map[TopicAndPartition, OffsetAndMetadata],
versionId: Short = OffsetCommitRequest.CurrentVersion,
correlationId: Int = 0,
clientId: String = OffsetCommitRequest.DefaultClientId,
groupGenerationId: Int = org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_GENERATION_ID,
consumerId: String = org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_CONSUMER_ID,
retentionMs: Long = org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_RETENTION_TIME)
extends RequestOrResponse(Some(RequestKeys.OffsetCommitKey)) {
assert(versionId == 0 || versionId == 1 || versionId == 2,
"Version " + versionId + " is invalid for OffsetCommitRequest. Valid versions are 0, 1 or 2.")
lazy val requestInfoGroupedByTopic = requestInfo.groupBy(_._1.topic)
def writeTo(buffer: ByteBuffer) {
// Write envelope
buffer.putShort(versionId)
buffer.putInt(correlationId)
writeShortString(buffer, clientId)
// Write OffsetCommitRequest
writeShortString(buffer, groupId) // consumer group
// version 1 and 2 specific data
if (versionId >= 1) {
buffer.putInt(groupGenerationId)
writeShortString(buffer, consumerId)
}
// version 2 or above specific data
if (versionId >= 2) {
buffer.putLong(retentionMs)
}
buffer.putInt(requestInfoGroupedByTopic.size) // number of topics
requestInfoGroupedByTopic.foreach( t1 => { // topic -> Map[TopicAndPartition, OffsetMetadataAndError]
writeShortString(buffer, t1._1) // topic
buffer.putInt(t1._2.size) // number of partitions for this topic
t1._2.foreach( t2 => {
buffer.putInt(t2._1.partition)
buffer.putLong(t2._2.offset)
// version 0 and 1 specific data
if (versionId <= 1)
buffer.putLong(t2._2.commitTimestamp)
writeShortString(buffer, t2._2.metadata)
})
})
}
override def sizeInBytes =
2 + /* versionId */
4 + /* correlationId */
shortStringLength(clientId) +
shortStringLength(groupId) +
(if (versionId >= 1) 4 /* group generation id */ + shortStringLength(consumerId) else 0) +
(if (versionId >= 2) 8 /* retention time */ else 0) +
4 + /* topic count */
requestInfoGroupedByTopic.foldLeft(0)((count, topicAndOffsets) => {
val (topic, offsets) = topicAndOffsets
count +
shortStringLength(topic) + /* topic */
4 + /* number of partitions */
offsets.foldLeft(0)((innerCount, offsetAndMetadata) => {
innerCount +
4 /* partition */ +
8 /* offset */ +
(if (versionId <= 1) 8 else 0) /* timestamp */ +
shortStringLength(offsetAndMetadata._2.metadata)
})
})
override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = {
val errorCode = ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
val commitStatus = requestInfo.mapValues(_ => errorCode)
val commitResponse = OffsetCommitResponse(commitStatus, correlationId)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(commitResponse)))
}
override def describe(details: Boolean): String = {
val offsetCommitRequest = new StringBuilder
offsetCommitRequest.append("Name: " + this.getClass.getSimpleName)
offsetCommitRequest.append("; Version: " + versionId)
offsetCommitRequest.append("; CorrelationId: " + correlationId)
offsetCommitRequest.append("; ClientId: " + clientId)
offsetCommitRequest.append("; GroupId: " + groupId)
offsetCommitRequest.append("; GroupGenerationId: " + groupGenerationId)
offsetCommitRequest.append("; ConsumerId: " + consumerId)
offsetCommitRequest.append("; RetentionMs: " + retentionMs)
if(details)
offsetCommitRequest.append("; RequestInfo: " + requestInfo.mkString(","))
offsetCommitRequest.toString()
}
override def toString = {
describe(details = true)
}
}
|
WillCh/cs286A
|
dataMover/kafka/core/src/main/scala/kafka/api/OffsetCommitRequest.scala
|
Scala
|
bsd-2-clause
| 7,345 |
package org.allenai.plugins.archetypes
import org.allenai.plugins.CoreSettingsPlugin
import org.allenai.plugins.CoreDependencies._
import org.allenai.plugins.DeployPlugin
import spray.revolver.RevolverPlugin.Revolver
import sbt._
import sbt.Keys._
object WebServicePlugin extends AutoPlugin {
override def requires: Plugins = DeployPlugin && CoreSettingsPlugin
override def projectSettings: Seq[Setting[_]] =
Revolver.settings ++ Seq(
libraryDependencies ++= Seq(
akkaActor,
akkaLogging,
sprayCan,
sprayRouting,
sprayCaching,
sprayJson,
typesafeConfig,
allenAiCommon,
allenAiTestkit % "test"
))
}
|
non/sbt-plugins
|
src/main/scala/org/allenai/plugins/archetypes/WebServicePlugin.scala
|
Scala
|
apache-2.0
| 694 |
/*
* Copyright 2012-2014 Kieron Wilkinson.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package viper.ui
import ca.odell.glazedlists.gui.TableFormat
import viper.domain.{RecordPrototype, Record}
/**
* Create table format from record prototype.
* Column 0 is the Record object itself, so we can use it to style rows, etc.
*/
class RecordTableFormat(val prototype: RecordPrototype) extends TableFormat[Record] {
val fields = prototype.fields
def getColumnCount = fields.size + 1
def getColumnName(column: Int) =
if (column == 0) "base" else fields(column - 1).name
def getColumnValue(baseObject: Record, column: Int): AnyRef = {
if (column == 0) {
baseObject
} else {
val field = fields(column - 1)
field.value(baseObject)
}
}
def defaultSort: List[(String, Boolean)] = prototype.defaultSort.map { s =>
val (field, reverse) = s
(field.name, reverse)
}
}
|
vyadh/viper
|
ui/src/main/scala/viper/ui/RecordTableFormat.scala
|
Scala
|
apache-2.0
| 1,486 |
package models
import play.api.libs.json._
case class Category(
id: Long,
name: String,
description: String,
lastUpdated: String)
/**
* Companion object for Category class.
*/
object Category {
implicit val categoryFormat = Json.format[Category]
}
|
kelleyb/RPI-Tours-Backend
|
app/models/Category.scala
|
Scala
|
mit
| 267 |
package com.recursivity.commons.bean
import collection.immutable._
import collection.{TraversableLike}
import collection.mutable.{DoubleLinkedList, LinkedList, Builder, MutableList}
import java.lang.reflect.{Field, Constructor, ParameterizedType}
import scalap.{Member, ClassSignature}
/**
* Utility class that is able to create new instances of arbitrary objects and fill them with values/vars based on
* a map of values.
*/
object BeanUtils {
def instantiate[T](cls: Class[_]): T = {
var cons: Constructor[_] = null
cls.getConstructors.foreach(c =>{
if (cons == null) cons = c
if(c.getParameterTypes.size > cons.getParameterTypes.size) cons = c
})
val list = new MutableList[AnyRef]
cons.getParameterTypes.foreach(cls => {
cls.getName match {
case "long" => {
val l: Long = 0
list += l.asInstanceOf[AnyRef]
}
case "int" => {
val l: Int = 0
list += l.asInstanceOf[AnyRef]
}
case "float" => {
val f = 0.0f
list += f.asInstanceOf[AnyRef]
}
case "double" => {
val f = 0.0d
list += f.asInstanceOf[AnyRef]
}
case "boolean" => {
val b = false
list += b.asInstanceOf[AnyRef]
}
case "short" => {
val l: Short = 0
list += l.asInstanceOf[AnyRef]
}
case "byte" => {
val b = new java.lang.Byte("0")
list += b.asInstanceOf[AnyRef]
}
case "char" => {
val c = new java.lang.Character('c')
list += c
}
case "scala.Option" => list += None
case _ => list += null
}
})
return cons.newInstance(list.toArray: _*).asInstanceOf[T]
}
def setProperty(cls: Class[_], bean: Any, key: String, value: Any) {
try {
val field = cls.getDeclaredField(key)
val fieldCls = getClassForJavaPrimitive(field.getType)
field.setAccessible(true)
if (classOf[ParameterizedType].isAssignableFrom(field.getGenericType.getClass) || fieldCls.equals(classOf[Array[_]])) {
val parameterized = field.getGenericType.asInstanceOf[ParameterizedType]
setGenerified(field, bean, fieldCls, GenericTypeDefinition(parameterized), value)
} else {
val transformer = TransformerRegistry(fieldCls)
field.set(bean, transformer.getOrElse(throw new BeanTransformationException(fieldCls)).toValue(value.toString).getOrElse(null))
}
} catch {
case e: NoSuchFieldException => {
if (cls.getSuperclass != null)
setProperty(cls.getSuperclass, bean, key, value)
}case ie: IllegalArgumentException => {}// do nothing, do not set value with illegal argument
}
}
private def setGenerified(field: Field, bean: Any, fieldCls: Class[_], typeDef: GenericTypeDefinition, value: Any) {
if(!typeDefHasObject(typeDef))
field.set(bean, resolveGenerifiedValue(fieldCls, typeDef, value))
else{
val signature = ClassSignature(bean.asInstanceOf[AnyRef].getClass)
var member: Option[Member] = None
signature.members.foreach(f => {
if(f.name == field.getName)
member = Some(f)
})
field.set(bean, resolveGenerifiedValue(fieldCls, member.getOrElse(throw new
IllegalArgumentException("Could not resolve generic type for: " + member)).returnType, value))
}
}
private def typeDefHasObject(typeDef: GenericTypeDefinition): Boolean = {
var result = false
if(typeDef.clazz == "java.lang.Object")
return true
typeDef.genericTypes.getOrElse(return false).foreach(f => {
if(!result)
result = typeDefHasObject(f)
})
result
}
def resolveGenerifiedValue(cls: Class[_], genericType: GenericTypeDefinition, input: Any): Any = {
if (classOf[TraversableLike[_ <: Any, _ <: Any]].isAssignableFrom(cls)) {
val list = valueList(genericType, input)
return resolveTraversableOrArray(cls, list)
} else if (classOf[java.util.Collection[_ <: Any]].isAssignableFrom(cls)) {
val list = valueList(genericType, input)
return resolveJavaCollectionType(cls, list)
} else if (classOf[Option[_ <: Any]].isAssignableFrom(cls)) {
val c = genericType.genericTypes.get.head.definedClass
if (genericType.genericTypes.get.head.genericTypes.equals(None)) {
val transformer = TransformerRegistry(c)
return transformer.getOrElse(throw new BeanTransformationException(c)).toValue(input.toString)
} else {
val t = genericType.genericTypes.get.head
val targetCls = t.definedClass
return Some(resolveGenerifiedValue(targetCls, t, input))
}
} else {
return null
}
}
def resolveJavaCollectionType(cls: Class[_], list: scala.collection.Seq[_]): Any = {
if(classOf[java.util.Set[_]].isAssignableFrom(cls)){
var set: java.util.Set[Any] = null
try{
set = cls.newInstance.asInstanceOf[java.util.Set[Any]]
}catch{
case e: InstantiationException => set = new java.util.HashSet[Any]
}
list.foreach(b => set.add(b))
return set
}else{
var l: java.util.List[Any] = null
try{
l = cls.newInstance.asInstanceOf[java.util.List[Any]]
}catch{
case e: InstantiationException => l = new java.util.ArrayList[Any]
}
list.foreach(b => l.add(b))
return l
}
}
def valueList(genericType: GenericTypeDefinition, input: Any): MutableList[Any] = {
val c = genericType.genericTypes.get.head.definedClass
val transformer = TransformerRegistry(c)
val list = new MutableList[Any]
if (input.isInstanceOf[List[_]]) {
val l = input.asInstanceOf[List[_]]
l.foreach(f => list += transformer.getOrElse(throw new BeanTransformationException(c)).toValue(f.toString).getOrElse(null))
} else if (input.isInstanceOf[Array[_]]) {
val array = input.asInstanceOf[Array[_]]
array.foreach(f => list += transformer.getOrElse(throw new BeanTransformationException(c)).toValue(f.toString).getOrElse(null))
}
return list
}
// due to the trickiness of supporting immutable sets/lists, types are hard coded here with no support for extension
// of immutable Scala Sets/Lists, TreeSet is not supported
//
def resolveTraversableOrArray(cls: Class[_], list: scala.collection.Seq[_]): Any = {
if (cls.equals(classOf[List[_]])) {
return list.toList
} else if (cls.equals(classOf[Set[_]]))
return list.toSet
else if (cls.equals(classOf[Array[_]]))
return list.toArray
else if (cls.equals(classOf[ListSet[_]]))
return new ListSet ++ list.toList
else if (cls.equals(classOf[HashSet[_]]))
return new HashSet ++ list.toList
else if (cls.equals(classOf[scala.collection.Seq[_]]) || cls.equals(classOf[Seq[_]])) {
return list.toList
}else {
val listOrSet = cls.newInstance
if (classOf[Builder[Any, Any]].isAssignableFrom(cls)) {
val builder = listOrSet.asInstanceOf[Builder[Any, Any]]
list.foreach(b => builder += b)
return builder
} else if (classOf[LinkedList[_]].isAssignableFrom(cls)) {
var seq = listOrSet.asInstanceOf[LinkedList[_]]
list.foreach(elem => {
seq = seq :+ elem
})
return seq
} else if (classOf[DoubleLinkedList[_]].isAssignableFrom(cls)) {
var seq = listOrSet.asInstanceOf[DoubleLinkedList[_]]
list.foreach(elem => {
seq = seq :+ elem
})
return seq
}
}
}
def instantiate[T](cls: Class[_], properties: Map[String, Any]): T = {
val bean = instantiate[T](cls)
return setProperties[T](bean, properties)
}
def setProperties[T](bean: T, properties: Map[String, Any]): T = {
properties.keys.foreach(key => {
setProperty(bean.asInstanceOf[AnyRef].getClass, bean, key, properties(key))
})
return bean
}
private def getClassForJavaPrimitive(cls: Class[_]): Class[_] = {
var fieldCls: Class[_] = null
cls.getName match {
case "long" => fieldCls = classOf[Long]
case "int" => fieldCls = classOf[java.lang.Integer]
case "float" => fieldCls = classOf[java.lang.Float]
case "double" => fieldCls = classOf[java.lang.Double]
case "boolean" => fieldCls = classOf[Boolean]
case "short" => fieldCls = classOf[java.lang.Short]
case _ => fieldCls = cls
}
return fieldCls
}
}
|
bowler-framework/recursivity-commons
|
src/main/scala/com/recursivity/commons/bean/BeanUtils.scala
|
Scala
|
bsd-3-clause
| 8,497 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.detailquery
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.datastore.impl.FileFactory.FileType
class SubqueryWithFilterAndSortTestCase extends QueryTest with BeforeAndAfterAll {
val tempDirPath = s"$resourcesPath/temp"
val tempFilePath = s"$resourcesPath/temp/subqueryfilterwithsort.csv"
override def beforeAll {
FileFactory.mkdirs(tempDirPath,FileType.LOCAL)
sql("drop table if exists subqueryfilterwithsort")
sql("drop table if exists subqueryfilterwithsort_hive")
sql("CREATE TABLE subqueryfilterwithsort (name String, id int) STORED BY 'org.apache.carbondata.format'")
sql("CREATE TABLE subqueryfilterwithsort_hive (name String, id int)row format delimited fields terminated by ','")
val data ="name_a,1\\nname_b,2\\nname_c,3\\nname_d,4\\nname_e,5\\nname_f,6"
writedata(tempFilePath, data)
sql(s"LOAD data local inpath '${tempFilePath}' into table subqueryfilterwithsort options('fileheader'='name,id')")
sql(s"LOAD data local inpath '${tempFilePath}' into table subqueryfilterwithsort_hive")
}
test("When the query has sub-query with sort and has '=' filter") {
try {
checkAnswer(sql("select name,id from (select * from subqueryfilterwithsort order by id)t where name='name_c' "),
sql("select name,id from (select * from subqueryfilterwithsort_hive order by id)t where name='name_c'"))
} catch{
case ex:Exception => ex.printStackTrace()
assert(false)
}
}
test("When the query has sub-query with sort and has 'like' filter") {
try {
checkAnswer(sql("select name,id from (select * from subqueryfilterwithsort order by id)t where name like 'name%' "),
sql("select name,id from (select * from subqueryfilterwithsort_hive order by id)t where name like 'name%'"))
} catch{
case ex:Exception => ex.printStackTrace()
assert(false)
}
}
def writedata(filePath: String, data: String) = {
val dis = FileFactory.getDataOutputStream(filePath, FileFactory.getFileType(filePath))
dis.writeBytes(data.toString())
dis.close()
}
def deleteFile(filePath: String) {
val file = FileFactory.getCarbonFile(filePath, FileFactory.getFileType(filePath))
file.delete()
}
override def afterAll {
sql("drop table if exists subqueryfilterwithsort")
sql("drop table if exists subqueryfilterwithsort_hive")
deleteFile(tempFilePath)
deleteFile(tempDirPath)
}
}
|
ksimar/incubator-carbondata
|
integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SubqueryWithFilterAndSortTestCase.scala
|
Scala
|
apache-2.0
| 3,401 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.tez
import java.io.Serializable
import scala.reflect.ClassTag
import org.apache.spark.broadcast.Broadcast
import java.util.UUID
import org.apache.tez.dag.api.TezConfiguration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import java.net.URL
import java.io.FileOutputStream
import java.io.Closeable
import java.io.InputStream
/**
* Tez specific implementation of Broadcast which uses HDFS as its broadcasting mechanism.
*/
class TezBroadcast[T: ClassTag](@transient var broadcastedValue: T, applicationName: String) extends Broadcast[T](0L) {
private val path: String = applicationName + "/broadcast/" + UUID.randomUUID().toString() + ".ser"
/**
*
*/
private[tez] def broadcast() {
val fs = FileSystem.get(new TezConfiguration)
if (!fs.exists(new Path(path))) {
this.saveToHdfs()
}
}
/**
*
*/
override protected def getValue() = {
if (this.broadcastedValue == null) {
val fs = FileSystem.get(new TezConfiguration)
var bValis = fs.open(new Path(path))
this.broadcastedValue = SparkUtils.deserialize(bValis).asInstanceOf[T]
}
this.broadcastedValue
}
/**
*
*/
override protected def doUnpersist(blocking: Boolean) {
this.remove
}
/**
*
*/
override protected def doDestroy(blocking: Boolean) {
this.remove
}
/**
*
*/
private def remove() {
val fs = FileSystem.get(new TezConfiguration)
fs.delete(new Path(path))
}
/**
*
*/
private def saveToHdfs() {
val fs = FileSystem.get(new TezConfiguration)
SparkUtils.serializeToFs(value, fs, new Path(path))
}
}
|
sequenceiq/spark-native-yarn
|
src/main/scala/org/apache/spark/tez/TezBroadcast.scala
|
Scala
|
apache-2.0
| 2,455 |
package play.api.cache.redis.impl
import java.util.Optional
import scala.concurrent.duration.Duration
import play.api.cache.redis._
import play.cache.redis._
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mutable.Specification
class AsyncJavaRedisSpec(implicit ee: ExecutionEnv) extends Specification with ReducedMockito {
import Implicits._
import JavaCompatibility._
import RedisCacheImplicits._
import org.mockito.ArgumentMatchers._
"Java Redis Cache" should {
"get and miss" in new MockedJavaRedis {
async.get[String](anyString)(anyClassTag) returns None
cache.get[String](key).asScala must beEqualTo(Optional.empty).await
}
"get and hit" in new MockedJavaRedis {
async.get[String](beEq(key))(anyClassTag) returns Some(value)
async.get[String](beEq(classTagKey))(anyClassTag) returns Some(classTag)
cache.get[String](key).asScala must beEqualTo(Optional.of(value)).await
}
"get null" in new MockedJavaRedis {
async.get[String](beEq(classTagKey))(anyClassTag) returns Some("null")
cache.get[String](key).asScala must beEqualTo(Optional.empty).await
there was one(async).get[String](classTagKey)
}
"set" in new MockedJavaRedis {
async.set(anyString, anyString, any[Duration]) returns execDone
cache.set(key, value).asScala must beDone.await
there was one(async).set(key, value, Duration.Inf)
there was one(async).set(classTagKey, classTag, Duration.Inf)
}
"set with expiration" in new MockedJavaRedis {
async.set(anyString, anyString, any[Duration]) returns execDone
cache.set(key, value, expiration.toSeconds.toInt).asScala must beDone.await
there was one(async).set(key, value, expiration)
there was one(async).set(classTagKey, classTag, expiration)
}
"set null" in new MockedJavaRedis {
async.set(anyString, any, any[Duration]) returns execDone
cache.set(key, null: AnyRef).asScala must beDone.await
there was one(async).set(key, null, Duration.Inf)
there was one(async).set(classTagKey, "null", Duration.Inf)
}
"get or else (hit)" in new MockedJavaRedis with OrElse {
async.get[String](beEq(key))(anyClassTag) returns Some(value)
async.get[String](beEq(classTagKey))(anyClassTag) returns Some(classTag)
cache.getOrElse(key, doElse(value)).asScala must beEqualTo(value).await
cache.getOrElseUpdate(key, doFuture(value).asJava).asScala must beEqualTo(value).await
orElse mustEqual 0
there was two(async).get[String](key)
there was two(async).get[String](classTagKey)
}
"get or else (miss)" in new MockedJavaRedis with OrElse {
async.get[String](beEq(classTagKey))(anyClassTag) returns None
async.set(anyString, anyString, any[Duration]) returns execDone
cache.getOrElse(key, doElse(value)).asScala must beEqualTo(value).await
cache.getOrElseUpdate(key, doFuture(value).asJava).asScala must beEqualTo(value).await
orElse mustEqual 2
there was two(async).get[String](classTagKey)
there was two(async).set(key, value, Duration.Inf)
there was two(async).set(classTagKey, classTag, Duration.Inf)
}
"get or else with expiration (hit)" in new MockedJavaRedis with OrElse {
async.get[String](beEq(key))(anyClassTag) returns Some(value)
async.get[String](beEq(classTagKey))(anyClassTag) returns Some(classTag)
cache.getOrElse(key, doElse(value), expiration.toSeconds.toInt).asScala must beEqualTo(value).await
cache.getOrElseUpdate(key, doFuture(value).asJava, expiration.toSeconds.toInt).asScala must beEqualTo(value).await
orElse mustEqual 0
there was two(async).get[String](key)
}
"get or else with expiration (miss)" in new MockedJavaRedis with OrElse {
async.get[String](beEq(classTagKey))(anyClassTag) returns None
async.set(anyString, anyString, any[Duration]) returns execDone
cache.getOrElse(key, doElse(value), expiration.toSeconds.toInt).asScala must beEqualTo(value).await
cache.getOrElseUpdate(key, doFuture(value).asJava, expiration.toSeconds.toInt).asScala must beEqualTo(value).await
orElse mustEqual 2
there was two(async).get[String](classTagKey)
there was two(async).set(key, value, expiration)
there was two(async).set(classTagKey, classTag, expiration)
}
"get optional (none)" in new MockedJavaRedis {
async.get[String](anyString)(anyClassTag) returns None
cache.getOptional[String](key).asScala must beEqualTo(Optional.ofNullable(null)).await
}
"get optional (some)" in new MockedJavaRedis {
async.get[String](anyString)(anyClassTag) returns Some("value")
async.get[String](beEq(classTagKey))(anyClassTag) returns Some(classTag)
cache.getOptional[String](key).asScala must beEqualTo(Optional.ofNullable("value")).await
}
"remove" in new MockedJavaRedis {
async.remove(anyString) returns execDone
cache.remove(key).asScala must beDone.await
there was one(async).remove(key)
there was one(async).remove(classTagKey)
}
"remove all" in new MockedJavaRedis {
async.invalidate() returns execDone
cache.removeAll().asScala must beDone.await
there was one(async).invalidate()
}
"get and set 'byte'" in new MockedJavaRedis {
val byte = JavaTypes.byteValue
// set a value
// note: there should be hit on "byte" but the value is wrapped instead
async.set(anyString, beEq(byte), any[Duration]) returns execDone
async.set(anyString, beEq("byte"), any[Duration]) returns execDone
async.set(anyString, beEq("java.lang.Byte"), any[Duration]) returns execDone
cache.set(key, byte).asScala must beDone.await
// hit on GET
async.get[Byte](beEq(key))(anyClassTag) returns Some(byte)
async.get[String](beEq(classTagKey))(anyClassTag) returns Some("java.lang.Byte")
cache.get[Byte](key).asScala must beEqualTo(Optional.ofNullable(byte)).await
}
"get and set 'byte[]'" in new MockedJavaRedis {
val bytes = JavaTypes.bytesValue
// set a value
async.set(anyString, beEq(bytes), any[Duration]) returns execDone
async.set(anyString, beEq("byte[]"), any[Duration]) returns execDone
cache.set(key, bytes).asScala must beDone.await
// hit on GET
async.get[Array[Byte]](beEq(key))(anyClassTag) returns Some(bytes)
async.get[String](beEq(classTagKey))(anyClassTag) returns Some("byte[]")
cache.get[Array[Byte]](key).asScala must beEqualTo(Optional.ofNullable(bytes)).await
}
"get all" in new MockedJavaRedis {
async.getAll[String](beEq(Iterable(key, key, key)))(anyClassTag) returns Seq(Some(value), None, None)
cache.getAll(classOf[String], key, key, key).asScala.map(_.asScala) must beEqualTo(Seq(Optional.of(value), Optional.empty, Optional.empty)).await
}
"get all (keys in a collection)" in new MockedJavaRedis {
async.getAll[String](beEq(Iterable(key, key, key)))(anyClassTag) returns Seq(Some(value), None, None)
cache.getAll(classOf[String], JavaList(key, key, key)).asScala.map(_.asScala) must beEqualTo(Seq(Optional.of(value), Optional.empty, Optional.empty)).await
}
"set if not exists (exists)" in new MockedJavaRedis {
async.setIfNotExists(beEq(key), beEq(value), any[Duration]) returns false
async.setIfNotExists(beEq(classTagKey), beEq(classTag), any[Duration]) returns false
cache.setIfNotExists(key, value).asScala.map(Boolean.unbox) must beFalse.await
there was one(async).setIfNotExists(key, value, null)
there was one(async).setIfNotExists(classTagKey, classTag, null)
}
"set if not exists (not exists)" in new MockedJavaRedis {
async.setIfNotExists(beEq(key), beEq(value), any[Duration]) returns true
async.setIfNotExists(beEq(classTagKey), beEq(classTag), any[Duration]) returns true
cache.setIfNotExists(key, value).asScala.map(Boolean.unbox) must beTrue.await
there was one(async).setIfNotExists(key, value, null)
there was one(async).setIfNotExists(classTagKey, classTag, null)
}
"set if not exists (exists) with expiration" in new MockedJavaRedis {
async.setIfNotExists(beEq(key), beEq(value), any[Duration]) returns false
async.setIfNotExists(beEq(classTagKey), beEq(classTag), any[Duration]) returns false
cache.setIfNotExists(key, value, expirationInt).asScala.map(Boolean.unbox) must beFalse.await
there was one(async).setIfNotExists(key, value, expiration)
there was one(async).setIfNotExists(classTagKey, classTag, expiration)
}
"set if not exists (not exists) with expiration" in new MockedJavaRedis {
async.setIfNotExists(beEq(key), beEq(value), any[Duration]) returns true
async.setIfNotExists(beEq(classTagKey), beEq(classTag), any[Duration]) returns true
cache.setIfNotExists(key, value, expirationInt).asScala.map(Boolean.unbox) must beTrue.await
there was one(async).setIfNotExists(key, value, expiration)
there was one(async).setIfNotExists(classTagKey, classTag, expiration)
}
"set all" in new MockedJavaRedis {
async.setAll(anyVarArgs) returns Done
cache.setAll(new KeyValue(key, value), new KeyValue(other, value)).asScala must beDone.await
there was one(async).setAll((key, value), (classTagKey, classTag), (other, value), (classTagOther, classTag))
}
"set all if not exists (exists)" in new MockedJavaRedis {
async.setAllIfNotExist(anyVarArgs) returns false
cache.setAllIfNotExist(new KeyValue(key, value), new KeyValue(other, value)).asScala.map(Boolean.unbox) must beFalse.await
there was one(async).setAllIfNotExist((key, value), (classTagKey, classTag), (other, value), (classTagOther, classTag))
}
"set all if not exists (not exists)" in new MockedJavaRedis {
async.setAllIfNotExist(anyVarArgs) returns true
cache.setAllIfNotExist(new KeyValue(key, value), new KeyValue(other, value)).asScala.map(Boolean.unbox) must beTrue.await
there was one(async).setAllIfNotExist((key, value), (classTagKey, classTag), (other, value), (classTagOther, classTag))
}
"append" in new MockedJavaRedis {
async.append(anyString, anyString, any[Duration]) returns Done
async.setIfNotExists(anyString, anyString, any[Duration]) returns false
cache.append(key, value).asScala must beDone.await
there was one(async).append(key, value, null)
there was one(async).setIfNotExists(classTagKey, classTag, null)
}
"append with expiration" in new MockedJavaRedis {
async.append(anyString, anyString, any[Duration]) returns Done
async.setIfNotExists(anyString, anyString, any[Duration]) returns false
cache.append(key, value, expirationInt).asScala must beDone.await
there was one(async).append(key, value, expiration)
there was one(async).setIfNotExists(classTagKey, classTag, expiration)
}
"expire" in new MockedJavaRedis {
async.expire(anyString, any[Duration]) returns Done
cache.expire(key, expirationInt).asScala must beDone.await
there was one(async).expire(key, expiration)
there was one(async).expire(classTagKey, expiration)
}
"expires in (defined)" in new MockedJavaRedis {
async.expiresIn(anyString) returns Some(expiration)
cache.expiresIn(key).asScala must beEqualTo(Optional.of(expirationLong)).await
there was one(async).expiresIn(key)
there was no(async).expiresIn(classTagKey)
}
"expires in (undefined)" in new MockedJavaRedis {
async.expiresIn(anyString) returns None
cache.expiresIn(key).asScala must beEqualTo(Optional.empty).await
there was one(async).expiresIn(key)
there was no(async).expiresIn(classTagKey)
}
"matching" in new MockedJavaRedis {
async.matching(anyString) returns Seq(key)
cache.matching("pattern").asScala.map(_.asScala) must beEqualTo(Seq(key)).await
there was one(async).matching("pattern")
}
"remove multiple" in new MockedJavaRedis {
async.removeAll(anyVarArgs) returns Done
cache.remove(key, key, key, key).asScala must beDone.await
there was one(async).removeAll(key, classTagKey, key, classTagKey, key, classTagKey, key, classTagKey)
}
"remove all" in new MockedJavaRedis {
async.removeAll(anyVarArgs) returns Done
cache.removeAllKeys(key, key, key, key).asScala must beDone.await
there was one(async).removeAll(key, classTagKey, key, classTagKey, key, classTagKey, key, classTagKey)
}
"remove matching" in new MockedJavaRedis {
async.removeMatching(anyString) returns Done
cache.removeMatching("pattern").asScala must beDone.await
there was one(async).removeMatching("pattern")
there was one(async).removeMatching("classTag::pattern")
}
"exists" in new MockedJavaRedis {
async.exists(beEq(key)) returns true
cache.exists(key).asScala.map(Boolean.unbox) must beTrue.await
there was one(async).exists(key)
there was no(async).exists(classTagKey)
}
"increment" in new MockedJavaRedis {
async.increment(beEq(key), anyLong) returns 10L
cache.increment(key).asScala.map(Long.unbox) must beEqualTo(10L).await
cache.increment(key, 2L).asScala.map(Long.unbox) must beEqualTo(10L).await
there was one(async).increment(key, by = 1L)
there was one(async).increment(key, by = 2L)
}
"decrement" in new MockedJavaRedis {
async.decrement(beEq(key), anyLong) returns 10L
cache.decrement(key).asScala.map(Long.unbox) must beEqualTo(10L).await
cache.decrement(key, 2L).asScala.map(Long.unbox) must beEqualTo(10L).await
there was one(async).decrement(key, by = 1L)
there was one(async).decrement(key, by = 2L)
}
"create list" in new MockedJavaRedis {
private val list = mock[RedisList[String, Future]]
async.list(beEq(key))(anyClassTag[String]) returns list
cache.list(key, classOf[String]) must beAnInstanceOf[AsyncRedisList[String]]
there was one(async).list[String](key)
}
"create set" in new MockedJavaRedis {
private val set = mock[RedisSet[String, Future]]
async.set(beEq(key))(anyClassTag[String]) returns set
cache.set(key, classOf[String]) must beAnInstanceOf[AsyncRedisSet[String]]
there was one(async).set[String](key)
}
"create map" in new MockedJavaRedis {
private val map = mock[RedisMap[String, Future]]
async.map(beEq(key))(anyClassTag[String]) returns map
cache.map(key, classOf[String]) must beAnInstanceOf[AsyncRedisMap[String]]
there was one(async).map[String](key)
}
}
}
|
KarelCemus/play-redis
|
src/test/scala/play/api/cache/redis/impl/AsyncJavaRedisSpec.scala
|
Scala
|
mpl-2.0
| 14,771 |
package com.krux.hyperion.expression
case class ParameterFields(
id: String,
description: Option[String] = None
)(implicit val pv: ParameterValues)
|
hoangelos/hyperion
|
core/src/main/scala/com/krux/hyperion/expression/ParameterFields.scala
|
Scala
|
apache-2.0
| 153 |
/**
* Copyright (C) 2014-2015 Really Inc. <http://really.io>
*/
package io.really.gorilla
import scala.collection.mutable.Map
import akka.actor._
import _root_.io.really.RequestContext
import _root_.io.really.rql.RQL.Query
import _root_.io.really.Result
import _root_.io.really.model.FieldKey
import _root_.io.really.protocol.SubscriptionFailure
import _root_.io.really.{ R, ReallyGlobals }
import _root_.io.really.Request.{ SubscribeOnObjects, UnsubscribeFromObjects }
import io.really.Result.SubscribeResult
import io.really.protocol.SubscriptionOpResult
/**
* SubscriptionManager is one actor per node and responsible for managing the subscriptions on objects, rooms and
* queries
* @param globals
*/
class SubscriptionManager(globals: ReallyGlobals) extends Actor with ActorLogging {
type SubscriberIdentifier = ActorPath
import SubscriptionManager._
private[gorilla] var rSubscriptions: Map[(SubscriberIdentifier, R), InternalRSubscription] = Map.empty
private[gorilla] var roomSubscriptions: Map[SubscriberIdentifier, InternalRSubscription] = Map.empty
def failedToRegisterNewSubscription(originalSender: ActorRef, r: R, newSubscriber: ActorRef, reason: String) = {
newSubscriber ! SubscriptionFailure(r, 500, reason)
sender() ! SubscriptionFailure(r, 500, reason)
log.error(reason)
}
def receive = commonHandler orElse objectSubscriptionsHandler orElse roomSubscriptionsHandler
def commonHandler: Receive = {
case Terminated(actor) =>
//TODO Handle death of subscribers
log.info("Actor Terminated" + actor)
}
/**
* Handles the messages of Objects subscriptions
* case `SubscribeOnObject` is expected to come externally as a request to subscribe on an object
* case `UnsubscribeFromObject` is expected to come externally as a request to unsubscribe on an object
* case `SubscribeOnR` is expected to come internally from the Subscribe request aggregator
* case `UnubscribeFromR` is expected to come internally from the Unubscribe request aggregator
*/
def objectSubscriptionsHandler: Receive = {
case request: SubscribeOnObjects =>
request.body.subscriptions.length match {
case 1 =>
val subscriptionOp = request.body.subscriptions.head
self ! SubscribeOnR(RSubscription(
request.ctx,
subscriptionOp.r,
subscriptionOp.fields,
subscriptionOp.rev,
sender(),
request.pushChannel
))
case len if len > 1 =>
val delegate = sender()
context.actorOf(Props(new SubscribeAggregator(request, delegate, self, globals)))
}
case UnsubscribeFromObjects(ctx, body, pushChannel) =>
body.subscriptions.foreach {
r =>
???
}
???
case SubscribeOnR(subData) =>
val replyTo = sender()
rSubscriptions.get((subData.pushChannel.path, subData.r)).map {
rSub =>
rSub.objectSubscriber ! UpdateSubscriptionFields(subData.fields)
}.getOrElse {
globals.gorillaEventCenter ! NewSubscription(replyTo, subData)
}
case ObjectSubscribed(subData, replyTo, objectSubscriber) =>
rSubscriptions += (subData.pushChannel.path, subData.r) -> InternalRSubscription(objectSubscriber, subData.r)
context.watch(objectSubscriber) //TODO handle death
context.watch(subData.pushChannel) //TODO handle death
if (replyTo == self) {
subData.requestDelegate ! SubscribeResult(Set(SubscriptionOpResult(subData.r, subData.fields)))
} else {
replyTo ! SubscriptionDone(subData.r)
}
case UnsubscribeFromR(subData) => //TODO Ack the delegate
rSubscriptions.get((subData.pushChannel.path, subData.r)).map {
rSub =>
rSub.objectSubscriber ! Unsubscribe
rSubscriptions -= ((subData.pushChannel.path, subData.r))
}
}
def roomSubscriptionsHandler: Receive = {
case SubscribeOnRoom(subData) => ??? //TODO Handle Room subscriptions
case UnsubscribeFromRoom(subData) =>
roomSubscriptions.get(subData.pushChannel.path).map {
roomSub =>
roomSub.objectSubscriber ! Unsubscribe
roomSubscriptions -= subData.pushChannel.path
}
}
}
object SubscriptionManager {
case class InternalRSubscription(objectSubscriber: ActorRef, r: R)
case class SubscribeOnR(rSubscription: RSubscription)
case class SubscribeOnQuery(requester: ActorRef, ctx: RequestContext, query: Query, passOnResults: Result.ReadResult)
case class SubscribeOnRoom(rSubscription: RoomSubscription)
case class UnsubscribeFromR(roomSubscription: RSubscription)
case class UnsubscribeFromRoom(roomSubscription: RoomSubscription)
case class UpdateSubscriptionFields(fields: Set[FieldKey])
case object Unsubscribe
case class ObjectSubscribed(subData: RSubscription, replyTo: ActorRef, objectSubscriber: ActorRef)
case class SubscriptionDone(r: R)
}
|
reallylabs/really
|
modules/really-core/src/main/scala/io/really/gorilla/SubscriptionManager.scala
|
Scala
|
apache-2.0
| 4,959 |
package zzb.srvdemo.test
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfter, BeforeAndAfterEach, WordSpec}
import org.scalatest.MustMatchers
import com.typesafe.config.ConfigFactory
import zzb.srvdemo.schema._
import zzb.srvdemo.entites._
import zzb.db.{DBPools, DBAccess}
import zzb.srvdemo.DBOperate
import spray.json._
/**
* Created with IntelliJ IDEA.
* User: Simon Xiao
* Date: 13-8-21
* Time: 下午1:48
* Copyright baoxian.com 2012~2020
*/
class DBOperatorTest extends WordSpec with MustMatchers
with BeforeAndAfterAll with DBAccess with DefaultJsonProtocol{
val config = ConfigFactory.load("demo")
"db config file " must {
"has db config block" in {
config.hasPath("db") must equal(true)
config.hasPath("db.userdb") must equal(true)
config.hasPath("db.infodb") must equal(true)
}
}
"db operate" must {
var firstid = 0L
var user:User=null
"can add user" in {
DBOperate.addUser(new User("[email protected]", "p1"))
val users = DBOperate.listUsers()
users.head.email must equal("[email protected]")
firstid = users.head.id
user= users.head
}
"can json user" in {
import User._
val json =user.toJson
json.asInstanceOf[JsObject].getFields("email").head must equal(JsString("[email protected]"))
val user_ = json.convertTo[User]
user_.email must equal("[email protected]")
}
"can del user" in {
DBOperate.delUser(firstid)
val users = DBOperate.listUsers()
users must equal(Nil)
}
"can add company" in {
DBOperate.addCompany(new Company("保网", "水荫路"))
val comps = DBOperate.listCompany()
comps.head.name must equal("保网")
firstid = comps.head.id
}
"can del company" in {
DBOperate.delCompany(firstid)
val comps = DBOperate.listCompany()
comps must equal(Nil)
}
}
override def beforeAll = {
DBPools.openDB("userdb", config.getConfig("db.userdb"))
DBPools.openDB("infodb", config.getConfig("db.infodb"))
DBOperate.reCreateDb()
}
override def afterAll() = {
DBPools.closeAllDB()
}
}
|
stepover/zzb
|
examples/srvbox-demoService/src/test/scala/zzb/srvdemo/test/DBOperatorTest.scala
|
Scala
|
mit
| 2,132 |
class IndexedText(val text: Vector[Int],
val indexToWord: Vector[String],
val wordToIndex: Map[String, Int],
val wordIndexToOccurrences: Vector[Vector[Int]]) {
def numWords = indexToWord.length
def uniqueWords = indexToWord
def wordIndex(word: String) = wordToIndex(word)
def wordFromIndex(index: Int) = indexToWord(index)
def wordOccurrences(wordIndex: Int) = wordIndexToOccurrences(wordIndex)
println("length (#word): " + text.length)
println("# unique words: " + uniqueWords.length)
println("most common words: " + wordIndexToOccurrences
.sortBy(-_.length)
.take(300)
.map(inds => wordFromIndex(text(inds.head)) + " (%d)".format(inds.length)))
}
object TextIndexer {
def index(text: String): IndexedText = {
val cleanedText = cleanUpText(text)
// Now we have iterator over all words.
val wordsIterator = """\\s""".r split cleanedText
var wordToIndex = Map.empty[String, Int]
var indexToWord = Vector.empty[String]
var textWordsIndexes = Vector.empty[Int]
var wordIndexToOccurrences: Vector[Vector[Int]] = Vector.empty[Vector[Int]]
var cursor: Int = 0
for (word <- wordsIterator) {
val indexOpt: Option[Int] = wordToIndex.get(word)
val index: Int = if (indexOpt isEmpty) {
val index = indexToWord.length
wordToIndex = wordToIndex updated(word, index)
indexToWord = indexToWord :+ word
wordIndexToOccurrences = wordIndexToOccurrences :+ Vector.empty[Int]
index
} else {
indexOpt get
}
textWordsIndexes = textWordsIndexes :+ index
wordIndexToOccurrences = wordIndexToOccurrences updated (index, wordIndexToOccurrences(index) :+ cursor)
cursor = cursor + 1
}
new IndexedText(textWordsIndexes, indexToWord, wordToIndex, wordIndexToOccurrences)
}
def cleanUpText(text: String): String = {
var cleanedText = text.toLowerCase
.replaceAll("i\\\\'m", "i is")
.replaceAll("\\\\bam\\\\b", "is")
.replaceAll("\\\\bare\\\\b", "is")
.replaceAll("\\\\bwere\\\\b", "is")
.replaceAll("\\\\bwas\\\\b", "is")
.replaceAll("it\\'s", "it is")
.replaceAll("\\\\'s", " has")
.replaceAll("n\\'t", " ")
.replaceAll("\\\\bnot\\\\b", "")
// Let's get rid of apostrophes.
.replaceAll("\\'", "")
cleanedText = """[^a-z]+""".r replaceAllIn(cleanedText, " ")
// 2 or more sequential spaces will collapse to one.
cleanedText = """\\s{2}""".r replaceAllIn(cleanedText, " ")
cleanedText
}
}
|
Leksyk/ml
|
similar-words/src/main/scala/IndexedText.scala
|
Scala
|
gpl-2.0
| 2,543 |
package org.loudkicks.console
import org.loudkicks._
import org.loudkicks.service.{PostSubscriber, Walls}
class FollowCommandSpec extends UnitSpec {
"FollowCommand" when {
"parsing a valid command line" should {
var bobFollowing: Set[User] = Set.empty
val walls = new Walls with NoPostsShouldBeReceived {
def wall(user: User) = fail("No wall should be accessed")
def follower(user: User, following: User) = {
user should be(Bob)
bobFollowing = bobFollowing + following
following
}
}
"have one user follow another" in {
FollowCommand(Bob, following = Alice, walls).execute should be (Subscriber(Bob, following = Alice))
FollowCommand(Bob, following = Charlie, walls).execute should be (Subscriber(Bob, following = Charlie))
bobFollowing should contain allOf(Alice, Charlie)
}
}
}
trait NoPostsShouldBeReceived extends PostSubscriber {
def posted(post: Post) = fail("No posts should be received")
}
}
|
timothygordon32/loudkicks
|
src/test/scala/org/loudkicks/console/FollowCommandSpec.scala
|
Scala
|
apache-2.0
| 1,036 |
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import support._
import support.CanTraverseValues.ValuesVisitor
import breeze.collection.mutable.SparseArray
import operators._
import breeze.math._
import breeze.storage.Zero
import breeze.util.ArrayUtil
import scala.{specialized=>spec}
import scala.collection.mutable
import scala.reflect.ClassTag
/**
* A vector backed by binary search (with [[breeze.collection.mutable.SparseArray]]).
* There is a parallel array of ints (in 0 until length) and values, sorted by index value.
* To quickly access all stored values use the following loop:
*
* {{{
* var offset = 0
* while( offset < v.activeSize) {
* val index: Int = v.indexAt(offset)
* val value: E = v.valueAt(offset)
*
* offset += 1
* }
* }}}
*
*@author dlwh
*/
@SerialVersionUID(1)
class SparseVector[@spec(Double, Int, Float, Long) V](val array: SparseArray[V])
(implicit zero: Zero[V])
extends StorageVector[V]
with VectorLike[V, SparseVector[V]] with Serializable {
/** This auxiliary constructor assumes that the index array is already sorted. */
def this(index: Array[Int], data: Array[V], activeSize: Int, length: Int)(implicit value: Zero[V]) = this(new SparseArray(index, data, activeSize, length, value.zero))
/** This auxiliary constructor assumes that the index array is already sorted. */
def this(index: Array[Int], data: Array[V], length: Int)(implicit value: Zero[V]) = this(index, data, index.length, length)
// Don't delete
SparseVector.init()
def data: Array[V] = array.data
def index: Array[Int] = array.index
def activeSize = array.activeSize
def used = activeSize
def length = array.length
def repr: SparseVector[V] = this
def contains(i: Int) = array.contains(i)
def apply(i: Int): V = {
if(i < 0 || i >= size) throw new IndexOutOfBoundsException(i + " not in [0,"+size+")")
array(i)
}
def update(i: Int, v: V): Unit = {
if(i < 0 || i >= size) throw new IndexOutOfBoundsException(i + " not in [0,"+size+")")
array(i) = v
}
def activeIterator: Iterator[(Int, V)] = activeKeysIterator zip activeValuesIterator
def activeValuesIterator: Iterator[V] = data.iterator.take(activeSize)
def activeKeysIterator: Iterator[Int] = index.iterator.take(activeSize)
// TODO: allow this to vary
/** This is always assumed to be equal to 0, for now. */
def default: V = zero.zero
override def equals(p1: Any) = p1 match {
case x: Vector[_] =>
this.length == x.length &&
(valuesIterator sameElements x.valuesIterator)
case _ => false
}
def isActive(rawIndex: Int) = array.isActive(rawIndex)
override def toString = {
activeIterator.mkString("SparseVector(",", ", ")")
}
def copy: SparseVector[V] = {
new SparseVector[V](ArrayUtil.copyOf(index, index.length), ArrayUtil.copyOf(data, index.length), activeSize, size)
}
def reserve(nnz: Int): Unit = {
array.reserve(nnz)
}
def compact(): Unit = {
//ToDo 3: will require changes if non-zero defaults are implemented
array.compact()
}
/**
* Sets the underlying sparse array to use this data
* @param index must be a sorted list of indices
* @param data values corresponding to the index
* @param activeSize number of active elements. The first activeSize will be used.
*/
def use(index: Array[Int], data: Array[V], activeSize: Int): Unit = {
require(activeSize <= size, "Can't have more elements in the array than length!")
require(activeSize >= 0, "activeSize must be non-negative")
require(data.length >= activeSize, "activeSize must be no greater than array length...")
array.use(index, data, activeSize)
}
/**
* same as data(i). Gives the value at the underlying offset.
* @param i index into the data array
* @return
*/
def valueAt(i: Int): V = data(i)
/**
* Gives the logical index from the physical index.
* @param i
* @return
*/
def indexAt(i: Int): Int = index(i)
/**
* Only gives true if isActive would return true for all i. (May be false anyway)
* @return
*/
def allVisitableIndicesActive: Boolean = true
def asCSCMatrix()(implicit man: ClassTag[V]): CSCMatrix[V] = {
// zero SV
if (index.length == 0)
CSCMatrix.zeros[V](1, length)
else {
var ii = 0
val nIndex = Array.tabulate[Int](length + 1)( (cp: Int) =>
if (cp < length && cp == index(ii)) {ii += 1; ii - 1}
else ii )
new CSCMatrix[V](data, 1, length, nIndex, activeSize, Array.fill[Int](data.length)(0))
}
}
}
object SparseVector extends SparseVectorOps
with DenseVector_SparseVector_Ops
with SparseVector_DenseMatrixOps
with SparseVector_DenseVector_Ops {
def zeros[@spec(Double, Int, Float, Long) V: ClassTag:Zero](size: Int) = new SparseVector(Array.empty, Array.empty[V], 0, size)
def apply[@spec(Double, Int, Float, Long) V:Zero](values: Array[V]) = new SparseVector(Array.range(0,values.length), values, values.length, values.length)
def apply[V:ClassTag:Zero](values: V*):SparseVector[V] = apply(values.toArray)
def fill[@spec(Double, Int, Float, Long) V:ClassTag:Zero](size: Int)(v: =>V):SparseVector[V] = apply(Array.fill(size)(v))
def tabulate[@spec(Double, Int, Float, Long) V:ClassTag:Zero](size: Int)(f: Int=>V):SparseVector[V]= apply(Array.tabulate(size)(f))
def apply[V:ClassTag:Zero](length: Int)(values: (Int, V)*): SparseVector[V] = {
val r = zeros[V](length)
for( (i, v) <- values) {
r(i) = v
}
r
}
def vertcat[V:Zero:ClassTag](vectors: SparseVector[V]*): SparseVector[V] = {
val resultArray = vectors.map(_.array).foldLeft(new SparseArray[V](0))(_ concatenate _)
new SparseVector(resultArray)
}
def horzcat[V:Zero:ClassTag](vectors: SparseVector[V]*): CSCMatrix[V] ={
if(!vectors.forall(_.size==vectors(0).size))
throw new IllegalArgumentException("vector lengths must be equal, but got: " + vectors.map(_.length).mkString(", "))
val rows = vectors(0).length
val cols = vectors.length
val data = new Array[V](vectors.map(_.data.length).sum)
val rowIndices = new Array[Int](data.length)
val colPtrs = new Array[Int](vectors.length + 1)
val used = data.length
var vec = 0
var off = 0
while(vec < vectors.length) {
colPtrs(vec) = off
System.arraycopy(vectors(vec).data, 0, data, off, vectors(vec).activeSize)
System.arraycopy(vectors(vec).index, 0, rowIndices, off, vectors(vec).activeSize)
off += vectors(vec).activeSize
vec += 1
}
colPtrs(vec) = off
new CSCMatrix(data, rows, cols, colPtrs, used, rowIndices)
}
// implicits
class CanCopySparseVector[@spec(Double, Int, Float, Long) V:ClassTag:Zero] extends CanCopy[SparseVector[V]] {
def apply(v1: SparseVector[V]) = {
v1.copy
}
}
implicit def canCopySparse[@spec(Double, Int, Float, Long) V: ClassTag: Zero] = new CanCopySparseVector[V]
implicit def canMapValues[V, V2: ClassTag: Zero]:CanMapValues[SparseVector[V], V, V2, SparseVector[V2]] = {
new CanMapValues[SparseVector[V], V, V2, SparseVector[V2]] {
/**Maps all key-value pairs from the given collection. */
override def apply(from: SparseVector[V], fn: (V) => V2): SparseVector[V2] = {
SparseVector.tabulate(from.length)(i => fn(from(i)))
}
}
}
implicit def canMapActiveValues[V, V2: ClassTag: Zero]:CanMapActiveValues[SparseVector[V], V, V2, SparseVector[V2]] = {
new CanMapActiveValues[SparseVector[V], V, V2, SparseVector[V2]] {
/**Maps all active key-value pairs from the given collection. */
override def apply(from: SparseVector[V], fn: (V) => V2): SparseVector[V2] = {
val out = new Array[V2](from.activeSize)
var i = 0
while(i < from.activeSize) {
out(i) = fn(from.data(i))
i += 1
}
new SparseVector(from.index.take(from.activeSize), out, from.activeSize, from.length)
}
}
}
implicit def scalarOf[T]: ScalarOf[SparseVector[T], T] = ScalarOf.dummy
implicit def canIterateValues[V]: CanTraverseValues[SparseVector[V], V] = {
new CanTraverseValues[SparseVector[V],V] {
def isTraversableAgain(from: SparseVector[V]): Boolean = true
/** Iterates all key-value pairs from the given collection. */
def traverse(from: SparseVector[V], fn: ValuesVisitor[V]): Unit = {
fn.zeros(from.size - from.activeSize, from.default)
fn.visitArray(from.data, 0, from.activeSize, 1)
}
}
}
implicit def canTraverseKeyValuePairs[V]:CanTraverseKeyValuePairs[SparseVector[V], Int, V] = {
new CanTraverseKeyValuePairs[SparseVector[V], Int, V] {
def isTraversableAgain(from: SparseVector[V]): Boolean = true
/** Iterates all key-value pairs from the given collection. */
def traverse(from: SparseVector[V], fn: CanTraverseKeyValuePairs.KeyValuePairsVisitor[Int, V]): Unit = {
import from._
fn.visitArray(index, data, 0, activeSize, 1)
if(activeSize != size) {
fn.zeros(size - activeSize, Iterator.range(0, size).filterNot(index contains _), from.default)
}
}
}
}
implicit def canCreateZeros[V:ClassTag:Zero]: CanCreateZeros[SparseVector[V], Int] = {
new CanCreateZeros[SparseVector[V], Int] {
def apply(d: Int): SparseVector[V] = {
zeros[V](d)
}
}
}
implicit def canCreateZerosLike[V:ClassTag:Zero]: CanCreateZerosLike[SparseVector[V], SparseVector[V]] = {
new CanCreateZerosLike[SparseVector[V], SparseVector[V]] {
def apply(d: SparseVector[V]): SparseVector[V] = {
zeros[V](d.length)
}
}
}
implicit def canTransformValues[V:Zero:ClassTag]:CanTransformValues[SparseVector[V], V] = {
new CanTransformValues[SparseVector[V], V] {
val z = implicitly[Zero[V]]
/**Transforms all key-value pairs from the given collection. */
def transform(from: SparseVector[V], fn: (V) => V): Unit = {
val newData = mutable.ArrayBuilder.make[V]()
val newIndex = mutable.ArrayBuilder.make[Int]()
var used = 0
var i = 0
while(i < from.length) {
val vv = fn(from(i))
if(vv != z) {
newData += vv
newIndex += i
used += 1
}
i += 1
}
from.array.use(newIndex.result(), newData.result(), used)
}
/**Transforms all active key-value pairs from the given collection. */
def transformActive(from: SparseVector[V], fn: (V) => V): Unit = {
var i = 0
while(i < from.activeSize) {
from.data(i) = fn(from.data(i))
i += 1
}
}
}
}
implicit def canMapPairs[V, V2: ClassTag: Zero]:CanMapKeyValuePairs[SparseVector[V], Int, V, V2, SparseVector[V2]] = {
new CanMapKeyValuePairs[SparseVector[V], Int, V, V2, SparseVector[V2]] {
/**Maps all key-value pairs from the given collection. */
def map(from: SparseVector[V], fn: (Int, V) => V2): SparseVector[V2] = {
SparseVector.tabulate(from.length)(i => fn(i, from(i)))
}
/**Maps all active key-value pairs from the given collection. */
def mapActive(from: SparseVector[V], fn: (Int, V) => V2): SparseVector[V2] = {
val out = new Array[V2](from.used)
var i = 0
while(i < from.used) {
out(i) = fn(from.index(i), from.data(i))
i += 1
}
new SparseVector(from.index.take(from.used), out, from.used, from.length)
}
}
}
// implicit def canTranspose[V:ClassTag:Zero]: CanTranspose[SparseVector[V], CSCMatrix[V]] = {
// new CanTranspose[SparseVector[V], CSCMatrix[V]] {
// def apply(from: SparseVector[V]): CSCMatrix[V] = {
// val transposedMtx: CSCMatrix[V] = CSCMatrix.zeros[V](1, from.length)
// var i = 0
// while (i < from.activeSize) {
// val c = from.index(i)
// transposedMtx(0, c) = from.data(i)
// i += 1
// }
// transposedMtx
// }
// }
// }
implicit def canTransposeComplex: CanTranspose[SparseVector[Complex], CSCMatrix[Complex]] = {
new CanTranspose[SparseVector[Complex], CSCMatrix[Complex]] {
def apply(from: SparseVector[Complex]) = {
val transposedMtx: CSCMatrix[Complex] = CSCMatrix.zeros[Complex](1, from.length)
var i = 0
while (i < from.activeSize) {
val c = from.index(i)
transposedMtx(0, c) = from.data(i).conjugate
i += 1
}
transposedMtx
}
}
}
implicit def canDim[E]: dim.Impl[SparseVector[E],Int] = new dim.Impl[SparseVector[E],Int] {
def apply(v: SparseVector[E]): Int = v.size
}
implicit def canTabulate[E:ClassTag:Zero]: CanTabulate[Int, SparseVector[E], E] = new CanTabulate[Int,SparseVector[E],E] {
def apply(d: Int, f: (Int) => E): SparseVector[E] = tabulate[E](d)(f)
}
implicit def space[E: Field : ClassTag : Zero]: MutableFiniteCoordinateField[SparseVector[E], Int, E] = {
MutableFiniteCoordinateField.make[SparseVector[E], Int, E]
}
@noinline
private def init() = {}
}
|
sheide/breeze
|
math/src/main/scala/breeze/linalg/SparseVector.scala
|
Scala
|
apache-2.0
| 13,836 |
package com.scalableQuality.quick.surface.commandLineOptions
import com.scalableQuality.quick.core.fileComponentDescriptions.OrderedRowDescription
import com.scalableQuality.quick.core.fileProcessingPhase.{
MatchRows,
RowsProcessingPhase,
CheckAndValidateAndMatchRows
}
import com.scalableQuality.quick.mantle.parsing.RawRow
import QuickState.{RowsProcessingPhaseConstructor, validateAndMatchRows}
// quick -d desc.xml -i loadCardHolder -l label1,Label2 file1 file2
case class QuickState(
descriptionFile: String = "",
descriptionId: Option[String] = None,
leftFileLabel: Option[String] = None,
rightFileLabel: Option[String] = None,
leftFile: String = "",
rightFile: String = "",
rowsProcessingPhase: RowsProcessingPhaseConstructor = validateAndMatchRows,
ignoreUnknownRows: Boolean = false
) {
def addLabel(label: String): QuickState = leftFileLabel match {
case None =>
this.copy(leftFileLabel = Some(label))
case _ =>
this.copy(rightFileLabel = Some(label))
}
def addFile(file: String): QuickState = leftFile match {
case "" =>
this.copy(leftFile = file)
case _ =>
this.copy(rightFile = file)
}
}
object QuickState {
type RowsProcessingPhaseConstructor = (OrderedRowDescription,
List[RawRow],
List[RawRow],
Option[String],
Option[String]) => RowsProcessingPhase
def validateAndMatchRows: RowsProcessingPhaseConstructor =
CheckAndValidateAndMatchRows(_, _, _, _, _)
def matchRows: RowsProcessingPhaseConstructor = MatchRows(_, _, _, _, _)
}
|
MouslihAbdelhakim/Quick
|
src/main/scala/com/scalableQuality/quick/surface/commandLineOptions/QuickState.scala
|
Scala
|
apache-2.0
| 1,708 |
package com.bitmotif.part_1
object Exercise_2_1 {
def fib(n: Int): Int = {
@annotation.tailrec
def loop(end: Int, loopCount: Int, fibN_Minus1: Int, fibN_Minus2: Int): Int =
if (end == loopCount) fibN_Minus1 + fibN_Minus2 else loop(end, loopCount + 1, fibN_Minus1 + fibN_Minus2, fibN_Minus1)
if(n < 1) {
throw new IllegalArgumentException("The argument must be 1 or greater.")
}
else if (n == 1) {
0
}
else if (n == 2) {
1
}
else {
loop(n, 3, 1, 0)
}
}
}
|
pjberry/functional-programming-in-scala
|
src/com/bitmotif/part_1/Exercise_2_1.scala
|
Scala
|
mit
| 529 |
package com.benkolera.Rt.Parser
import scalaz._
import syntax.applicative._
object NewTicket {
val ticketIdRe = """# Ticket (\\d+) created.""".r
def parseId( responseStr: String ):Parser[Int] = {
parseResponse( responseStr ).flatMap( lines =>
lines match {
case ticketIdRe(id)::xs => id.toInt.point[Parser]
case firstLine::xs => parserFail(MissingField("Created Ticket Id"))
}
)
}
}
|
benkolera/scala-rt
|
src/main/scala/Rt/Parser/NewTicket.scala
|
Scala
|
mit
| 428 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import util.Arrays.asList
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.UpdateMetadataRequest
import org.apache.kafka.common.requests.UpdateMetadataRequest.{Broker, EndPoint}
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.junit.Test
import org.junit.Assert._
import scala.collection.JavaConverters._
class MetadataCacheTest {
@Test
def getTopicMetadataNonExistingTopics() {
val topic = "topic"
val cache = new MetadataCache(1)
val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))
assertTrue(topicMetadata.isEmpty)
}
@Test
def getTopicMetadata() {
val topic0 = "topic-0"
val topic1 = "topic-1"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
def endPoints(brokerId: Int): Seq[EndPoint] = {
val host = s"foo-$brokerId"
Seq(
new EndPoint(host, 9092, SecurityProtocol.PLAINTEXT, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)),
new EndPoint(host, 9093, SecurityProtocol.SSL, ListenerName.forSecurityProtocol(SecurityProtocol.SSL))
)
}
val brokers = (0 to 4).map { brokerId =>
new Broker(brokerId, endPoints(brokerId).asJava, "rack1")
}.toSet
val partitionStates = Map(
new TopicPartition(topic0, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, 0, 0, asList(0, 1, 3), zkVersion, asList(0, 1, 3), asList()),
new TopicPartition(topic0, 1) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, 1, 1, asList(1, 0), zkVersion, asList(1, 2, 0, 4), asList()),
new TopicPartition(topic1, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, 2, 2, asList(2, 1), zkVersion, asList(2, 1, 3), asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
for (securityProtocol <- Seq(SecurityProtocol.PLAINTEXT, SecurityProtocol.SSL)) {
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
def checkTopicMetadata(topic: String): Unit = {
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
assertEquals(topic, topicMetadata.topic)
val topicPartitionStates = partitionStates.filter { case (tp, _) => tp.topic == topic }
val partitionMetadatas = topicMetadata.partitionMetadata.asScala.sortBy(_.partition)
assertEquals(s"Unexpected partition count for topic $topic", topicPartitionStates.size, partitionMetadatas.size)
partitionMetadatas.zipWithIndex.foreach { case (partitionMetadata, partitionId) =>
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(partitionId, partitionMetadata.partition)
val leader = partitionMetadata.leader
val partitionState = topicPartitionStates(new TopicPartition(topic, partitionId))
assertEquals(partitionState.basePartitionState.leader, leader.id)
assertEquals(partitionState.basePartitionState.isr, partitionMetadata.isr.asScala.map(_.id).asJava)
assertEquals(partitionState.basePartitionState.replicas, partitionMetadata.replicas.asScala.map(_.id).asJava)
val endPoint = endPoints(partitionMetadata.leader.id).find(_.listenerName == listenerName).get
assertEquals(endPoint.host, leader.host)
assertEquals(endPoint.port, leader.port)
}
}
checkTopicMetadata(topic0)
checkTopicMetadata(topic1)
}
}
@Test
def getTopicMetadataPartitionLeaderNotAvailable() {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Set(new Broker(0, Seq(new EndPoint("foo", 9092, securityProtocol, listenerName)).asJava, null))
val leader = 1
val leaderEpoch = 1
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, asList(0), zkVersion, asList(0), asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.LEADER_NOT_AVAILABLE, partitionMetadata.error)
assertTrue(partitionMetadata.isr.isEmpty)
assertEquals(1, partitionMetadata.replicas.size)
assertEquals(0, partitionMetadata.replicas.get(0).id)
}
@Test
def getTopicMetadataReplicaNotAvailable() {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Set(new Broker(0, Seq(new EndPoint("foo", 9092, securityProtocol, listenerName)).asJava, null))
// replica 1 is not available
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0, 1)
val isr = asList[Integer](0)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, zkVersion, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
// Validate errorUnavailableEndpoints = false
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(Set(0, 1), partitionMetadata.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadata.isr.asScala.map(_.id).toSet)
// Validate errorUnavailableEndpoints = true
val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true)
assertEquals(1, topicMetadatasWithError.size)
val topicMetadataWithError = topicMetadatasWithError.head
assertEquals(Errors.NONE, topicMetadataWithError.error)
val partitionMetadatasWithError = topicMetadataWithError.partitionMetadata
assertEquals(1, partitionMetadatasWithError.size)
val partitionMetadataWithError = partitionMetadatasWithError.get(0)
assertEquals(0, partitionMetadataWithError.partition)
assertEquals(Errors.REPLICA_NOT_AVAILABLE, partitionMetadataWithError.error)
assertEquals(Set(0), partitionMetadataWithError.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadataWithError.isr.asScala.map(_.id).toSet)
}
@Test
def getTopicMetadataIsrNotAvailable() {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Set(new Broker(0, Seq(new EndPoint("foo", 9092, securityProtocol, listenerName)).asJava, "rack1"))
// replica 1 is not available
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, zkVersion, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
// Validate errorUnavailableEndpoints = false
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(Set(0), partitionMetadata.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0, 1), partitionMetadata.isr.asScala.map(_.id).toSet)
// Validate errorUnavailableEndpoints = true
val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true)
assertEquals(1, topicMetadatasWithError.size)
val topicMetadataWithError = topicMetadatasWithError.head
assertEquals(Errors.NONE, topicMetadataWithError.error)
val partitionMetadatasWithError = topicMetadataWithError.partitionMetadata
assertEquals(1, partitionMetadatasWithError.size)
val partitionMetadataWithError = partitionMetadatasWithError.get(0)
assertEquals(0, partitionMetadataWithError.partition)
assertEquals(Errors.REPLICA_NOT_AVAILABLE, partitionMetadataWithError.error)
assertEquals(Set(0), partitionMetadataWithError.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadataWithError.isr.asScala.map(_.id).toSet)
}
@Test
def getTopicMetadataWithNonSupportedSecurityProtocol() {
val topic = "topic"
val cache = new MetadataCache(1)
val securityProtocol = SecurityProtocol.PLAINTEXT
val brokers = Set(new Broker(0,
Seq(new EndPoint("foo", 9092, securityProtocol, ListenerName.forSecurityProtocol(securityProtocol))).asJava, ""))
val controllerEpoch = 1
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, 3, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, partitionStates.asJava,
brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.SSL))
assertEquals(1, topicMetadata.size)
assertEquals(1, topicMetadata.head.partitionMetadata.size)
assertEquals(-1, topicMetadata.head.partitionMetadata.get(0).leaderId)
}
@Test
def getAliveBrokersShouldNotBeMutatedByUpdateCache() {
val topic = "topic"
val cache = new MetadataCache(1)
def updateCache(brokerIds: Set[Int]) {
val brokers = brokerIds.map { brokerId =>
val securityProtocol = SecurityProtocol.PLAINTEXT
new Broker(brokerId, Seq(
new EndPoint("foo", 9092, securityProtocol, ListenerName.forSecurityProtocol(securityProtocol))).asJava, "")
}
val controllerEpoch = 1
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, 3, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, partitionStates.asJava,
brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
}
val initialBrokerIds = (0 to 2).toSet
updateCache(initialBrokerIds)
val aliveBrokersFromCache = cache.getAliveBrokers
// This should not change `aliveBrokersFromCache`
updateCache((0 to 3).toSet)
assertEquals(initialBrokerIds, aliveBrokersFromCache.map(_.id).toSet)
}
}
|
sebadiaz/kafka
|
core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala
|
Scala
|
apache-2.0
| 14,231 |
package com.nextgendata.framework.maps
/**
* Created by Craig on 2016-04-26.
*
* This is the generic trait (or interface) for all ref & map lookup objects. To make the Mapper
* objects more natural and flexible it extends the built-in Scala immutable Map interface and
* adds additional functions required to provide a convenient ref & map API.
*
* This is the base class for all mappers which allows for generic stackable traits to be mixed in
* or decorate mapper implementations is a reusable and composable way.
*/
trait Mapper[K, V] extends Map[K, V] with Serializable {
/**
* Mappers must provide getDefault implementation which returns a default value record.
*
* This is useful when a source value which is being mapped is not provided and the application
* must use defaulted mapping values.
*
* @return
*/
def getDefault: V
/**
* Mappers must provide a getInvalid implementation which returns an invalid value record.
*
* This is useful when a source value which is being mapped does not find a matching value
* in the lookup and the application needs to treat this as an invalid mapping values.
*
* @return
*/
def getInvalid: V
/**
* As the key value is a generic Type K, this function assist with generic code so that
* stackable traits or decorators can be mixed in and match provided source keys in the
* lookups with an "empty" value and then provide default return values instead for example.
*
* @return
*/
def getEmptyKey: K
}
|
craigjar/nextgendata
|
src/main/scala/com/nextgendata/framework/maps/Mapper.scala
|
Scala
|
apache-2.0
| 1,560 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc.connection
import java.sql.{Connection, Driver}
import java.util.Properties
import scala.collection.JavaConverters._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.jdbc.JdbcConnectionProvider
private[jdbc] class BasicConnectionProvider extends JdbcConnectionProvider with Logging {
/**
* Additional properties for data connection (Data source property takes precedence).
*/
def getAdditionalProperties(options: JDBCOptions): Properties = new Properties()
override val name: String = "basic"
override def canHandle(driver: Driver, options: Map[String, String]): Boolean = {
val jdbcOptions = new JDBCOptions(options)
jdbcOptions.keytab == null || jdbcOptions.principal == null
}
override def getConnection(driver: Driver, options: Map[String, String]): Connection = {
val jdbcOptions = new JDBCOptions(options)
val properties = getAdditionalProperties(jdbcOptions)
jdbcOptions.asConnectionProperties.asScala.foreach { case(k, v) =>
properties.put(k, v)
}
logDebug(s"JDBC connection initiated with URL: ${jdbcOptions.url} and properties: $properties")
driver.connect(jdbcOptions.url, properties)
}
override def modifiesSecurityContext(
driver: Driver,
options: Map[String, String]
): Boolean = {
// BasicConnectionProvider is the default unsecure connection provider, so just return false
false
}
}
|
vinodkc/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/BasicConnectionProvider.scala
|
Scala
|
apache-2.0
| 2,332 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.regex.java8
import java.time.Instant
import kantan.regex.{GroupDecoder, MatchDecoder}
import kantan.regex.java8.arbitrary._
import kantan.regex.laws.discipline.{DisciplineSuite, GroupDecoderTests, MatchDecoderTests, SerializableTests}
class InstantDecoderTests extends DisciplineSuite {
checkAll("GroupDecoder[Instant]", GroupDecoderTests[Instant].decoder[Int, Int])
checkAll("GroupDecoder[Instant]", SerializableTests[GroupDecoder[Instant]].serializable)
checkAll("MatchDecoder[Instant]", MatchDecoderTests[Instant].decoder[Int, Int])
checkAll("MatchDecoder[Instant]", SerializableTests[MatchDecoder[Instant]].serializable)
}
|
nrinaudo/kantan.regex
|
java8/src/test/scala/kantan/regex/java8/InstantDecoderTests.scala
|
Scala
|
apache-2.0
| 1,255 |
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels}
class UnsafeTraversableMethods
extends Inspection(
text = "Use of unsafe Traversable methods.",
defaultLevel = Levels.Error,
description = "Checks for use of unsafe methods on Traversable.",
explanation =
"The following methods on Traversable are considered to be unsafe (head, tail, init, last, reduce, reduceLeft, reduceRight, max, maxBy, min, minBy)."
) {
private val unsafeMethods = Set(
"head",
"tail",
"init",
"last",
"reduce",
"reduceLeft",
"reduceRight",
"max",
"maxBy",
"min",
"minBy"
)
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser =
new context.Traverser {
import context.global._
override def inspect(tree: Tree): Unit = {
tree match {
case Select(left, TermName(method)) =>
if (isTraversable(left) && unsafeMethods.contains(method))
context.warn(tree.pos, self, tree.toString.take(500))
case _ => continue(tree)
}
}
}
}
}
|
sksamuel/scalac-scapegoat-plugin
|
src/main/scala/com/sksamuel/scapegoat/inspections/collections/UnsafeTraversableMethods.scala
|
Scala
|
apache-2.0
| 1,279 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.nio.ByteBuffer
import org.apache.spark.TaskState.TaskState
import org.apache.spark.resource.ResourceInformation
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler.ExecutorLossReason
import org.apache.spark.util.SerializableBuffer
private[spark] sealed trait CoarseGrainedClusterMessage extends Serializable
private[spark] object CoarseGrainedClusterMessages {
case object RetrieveSparkAppConfig extends CoarseGrainedClusterMessage
case class SparkAppConfig(
sparkProperties: Seq[(String, String)],
ioEncryptionKey: Option[Array[Byte]],
hadoopDelegationCreds: Option[Array[Byte]])
extends CoarseGrainedClusterMessage
case object RetrieveLastAllocatedExecutorId extends CoarseGrainedClusterMessage
// Driver to executors
case class LaunchTask(data: SerializableBuffer) extends CoarseGrainedClusterMessage
case class KillTask(taskId: Long, executor: String, interruptThread: Boolean, reason: String)
extends CoarseGrainedClusterMessage
case class KillExecutorsOnHost(host: String)
extends CoarseGrainedClusterMessage
sealed trait RegisterExecutorResponse
case object RegisteredExecutor extends CoarseGrainedClusterMessage with RegisterExecutorResponse
case class RegisterExecutorFailed(message: String) extends CoarseGrainedClusterMessage
with RegisterExecutorResponse
case class UpdateDelegationTokens(tokens: Array[Byte])
extends CoarseGrainedClusterMessage
// Executors to driver
case class RegisterExecutor(
executorId: String,
executorRef: RpcEndpointRef,
hostname: String,
cores: Int,
logUrls: Map[String, String],
attributes: Map[String, String],
resources: Map[String, ResourceInformation])
extends CoarseGrainedClusterMessage
case class StatusUpdate(
executorId: String,
taskId: Long,
state: TaskState,
data: SerializableBuffer,
resources: Map[String, ResourceInformation] = Map.empty)
extends CoarseGrainedClusterMessage
object StatusUpdate {
/** Alternate factory method that takes a ByteBuffer directly for the data field */
def apply(executorId: String, taskId: Long, state: TaskState, data: ByteBuffer,
resources: Map[String, ResourceInformation]): StatusUpdate = {
StatusUpdate(executorId, taskId, state, new SerializableBuffer(data), resources)
}
}
// Internal messages in driver
case object ReviveOffers extends CoarseGrainedClusterMessage
case object StopDriver extends CoarseGrainedClusterMessage
case object StopExecutor extends CoarseGrainedClusterMessage
case object StopExecutors extends CoarseGrainedClusterMessage
case class RemoveExecutor(executorId: String, reason: ExecutorLossReason)
extends CoarseGrainedClusterMessage
case class RemoveWorker(workerId: String, host: String, message: String)
extends CoarseGrainedClusterMessage
case class SetupDriver(driver: RpcEndpointRef) extends CoarseGrainedClusterMessage
// Exchanged between the driver and the AM in Yarn client mode
case class AddWebUIFilter(
filterName: String, filterParams: Map[String, String], proxyBase: String)
extends CoarseGrainedClusterMessage
// Messages exchanged between the driver and the cluster manager for executor allocation
// In Yarn mode, these are exchanged between the driver and the AM
case class RegisterClusterManager(am: RpcEndpointRef) extends CoarseGrainedClusterMessage
// Used by YARN's client mode AM to retrieve the current set of delegation tokens.
object RetrieveDelegationTokens extends CoarseGrainedClusterMessage
// Request executors by specifying the new total number of executors desired
// This includes executors already pending or running
case class RequestExecutors(
requestedTotal: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: Map[String, Int],
nodeBlacklist: Set[String])
extends CoarseGrainedClusterMessage
// Check if an executor was force-killed but for a reason unrelated to the running tasks.
// This could be the case if the executor is preempted, for instance.
case class GetExecutorLossReason(executorId: String) extends CoarseGrainedClusterMessage
case class KillExecutors(executorIds: Seq[String]) extends CoarseGrainedClusterMessage
// Used internally by executors to shut themselves down.
case object Shutdown extends CoarseGrainedClusterMessage
}
|
pgandhi999/spark
|
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
|
Scala
|
apache-2.0
| 5,283 |
package main
class Region(
val id: Int,
val superRegion: SuperRegion,
playerNameInit: String,
armiesInit: Int
) {
def this(id: Int, superRegion: SuperRegion) = {
this(id, superRegion, "", 0)
}
private var myNeighbors = Set.empty[Region]
var playerName: String = ""
var armies: Int = 0
def addNeighbor(region: Region): Unit = {
myNeighbors += region
}
def neighbors = myNeighbors
def ownedByPlayer(playerName: String): Boolean =
playerName == this.playerName
}
|
ramn/warlight-starterbot-scala
|
src/main/scala/main/Region.scala
|
Scala
|
apache-2.0
| 505 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.io.{IOException, ObjectOutputStream}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.existentials
import scala.reflect.ClassTag
import org.apache.spark._
import org.apache.spark.util.Utils
/**
* Class that captures a coalesced RDD by essentially keeping track of parent partitions
* @param index of this coalesced partition
* @param rdd which it belongs to
* @param parentsIndices list of indices in the parent that have been coalesced into this partition
* @param preferredLocation the preferred location for this partition
*/
private[spark] case class CoalescedRDDPartition(
index: Int,
@transient rdd: RDD[_],
parentsIndices: Array[Int],
@transient preferredLocation: Option[String] = None) extends Partition {
var parents: Seq[Partition] = parentsIndices.map(rdd.partitions(_))
@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException {
// Update the reference to parent partition at the time of task serialization
parents = parentsIndices.map(rdd.partitions(_))
oos.defaultWriteObject()
}
/**
* Computes the fraction of the parents' partitions containing preferredLocation within
* their getPreferredLocs.
* @return locality of this coalesced partition between 0 and 1
*/
def localFraction: Double = {
val loc = parents.count { p =>
val parentPreferredLocations = rdd.context.getPreferredLocs(rdd, p.index).map(_.host)
preferredLocation.exists(parentPreferredLocations.contains)
}
if (parents.size == 0) 0.0 else (loc.toDouble / parents.size.toDouble)
}
}
/**
* Represents a coalesced RDD that has fewer partitions than its parent RDD
* This class uses the PartitionCoalescer class to find a good partitioning of the parent RDD
* so that each new partition has roughly the same number of parent partitions and that
* the preferred location of each new partition overlaps with as many preferred locations of its
* parent partitions
* @param prev RDD to be coalesced
* @param maxPartitions number of desired partitions in the coalesced RDD (must be positive)
* @param balanceSlack used to trade-off balance and locality. 1.0 is all locality, 0 is all balance
*/
private[spark] class CoalescedRDD[T: ClassTag](
@transient var prev: RDD[T],
maxPartitions: Int,
balanceSlack: Double = 0.10)
extends RDD[T](prev.context, Nil) { // Nil since we implement getDependencies
require(maxPartitions > 0 || maxPartitions == prev.partitions.length,
s"Number of partitions ($maxPartitions) must be positive.")
override def getPartitions: Array[Partition] = {
val pc = new PartitionCoalescer(maxPartitions, prev, balanceSlack)
pc.run().zipWithIndex.map {
case (pg, i) =>
val ids = pg.arr.map(_.index).toArray
new CoalescedRDDPartition(i, prev, ids, pg.prefLoc)
}
}
override def compute(partition: Partition, context: TaskContext): Iterator[T] = {
partition.asInstanceOf[CoalescedRDDPartition].parents.iterator.flatMap { parentPartition =>
firstParent[T].iterator(parentPartition, context)
}
}
override def getDependencies: Seq[Dependency[_]] = {
Seq(new NarrowDependency(prev) {
def getParents(id: Int): Seq[Int] =
partitions(id).asInstanceOf[CoalescedRDDPartition].parentsIndices
})
}
override def clearDependencies() {
super.clearDependencies()
prev = null
}
/**
* Returns the preferred machine for the partition. If split is of type CoalescedRDDPartition,
* then the preferred machine will be one which most parent splits prefer too.
* @param partition
* @return the machine most preferred by split
*/
override def getPreferredLocations(partition: Partition): Seq[String] = {
partition.asInstanceOf[CoalescedRDDPartition].preferredLocation.toSeq
}
}
/**
* Coalesce the partitions of a parent RDD (`prev`) into fewer partitions, so that each partition of
* this RDD computes one or more of the parent ones. It will produce exactly `maxPartitions` if the
* parent had more than maxPartitions, or fewer if the parent had fewer.
*
* This transformation is useful when an RDD with many partitions gets filtered into a smaller one,
* or to avoid having a large number of small tasks when processing a directory with many files.
*
* If there is no locality information (no preferredLocations) in the parent, then the coalescing
* is very simple: chunk parents that are close in the Array in chunks.
* If there is locality information, it proceeds to pack them with the following four goals:
*
* (1) Balance the groups so they roughly have the same number of parent partitions
* (2) Achieve locality per partition, i.e. find one machine which most parent partitions prefer
* (3) Be efficient, i.e. O(n) algorithm for n parent partitions (problem is likely NP-hard)
* (4) Balance preferred machines, i.e. avoid as much as possible picking the same preferred machine
*
* Furthermore, it is assumed that the parent RDD may have many partitions, e.g. 100 000.
* We assume the final number of desired partitions is small, e.g. less than 1000.
*
* The algorithm tries to assign unique preferred machines to each partition. If the number of
* desired partitions is greater than the number of preferred machines (can happen), it needs to
* start picking duplicate preferred machines. This is determined using coupon collector estimation
* (2n log(n)). The load balancing is done using power-of-two randomized bins-balls with one twist:
* it tries to also achieve locality. This is done by allowing a slack (balanceSlack) between two
* bins. If two bins are within the slack in terms of balance, the algorithm will assign partitions
* according to locality. (contact alig for questions)
*
*/
private class PartitionCoalescer(maxPartitions: Int, prev: RDD[_], balanceSlack: Double) {
def compare(o1: PartitionGroup, o2: PartitionGroup): Boolean = o1.size < o2.size
def compare(o1: Option[PartitionGroup], o2: Option[PartitionGroup]): Boolean =
if (o1 == None) false else if (o2 == None) true else compare(o1.get, o2.get)
val rnd = new scala.util.Random(7919) // keep this class deterministic
// each element of groupArr represents one coalesced partition
val groupArr = ArrayBuffer[PartitionGroup]()
// hash used to check whether some machine is already in groupArr
val groupHash = mutable.Map[String, ArrayBuffer[PartitionGroup]]()
// hash used for the first maxPartitions (to avoid duplicates)
val initialHash = mutable.Set[Partition]()
// determines the tradeoff between load-balancing the partitions sizes and their locality
// e.g. balanceSlack=0.10 means that it allows up to 10% imbalance in favor of locality
val slack = (balanceSlack * prev.partitions.length).toInt
var noLocality = true // if true if no preferredLocations exists for parent RDD
// gets the *current* preferred locations from the DAGScheduler (as opposed to the static ones)
def currPrefLocs(part: Partition): Seq[String] = {
prev.context.getPreferredLocs(prev, part.index).map(tl => tl.host)
}
// this class just keeps iterating and rotating infinitely over the partitions of the RDD
// next() returns the next preferred machine that a partition is replicated on
// the rotator first goes through the first replica copy of each partition, then second, third
// the iterators return type is a tuple: (replicaString, partition)
class LocationIterator(prev: RDD[_]) extends Iterator[(String, Partition)] {
var it: Iterator[(String, Partition)] = resetIterator()
override val isEmpty = !it.hasNext
// initializes/resets to start iterating from the beginning
def resetIterator(): Iterator[(String, Partition)] = {
val iterators = (0 to 2).map( x =>
prev.partitions.iterator.flatMap(p => {
if (currPrefLocs(p).size > x) Some((currPrefLocs(p)(x), p)) else None
} )
)
iterators.reduceLeft((x, y) => x ++ y)
}
// hasNext() is false iff there are no preferredLocations for any of the partitions of the RDD
override def hasNext: Boolean = { !isEmpty }
// return the next preferredLocation of some partition of the RDD
override def next(): (String, Partition) = {
if (it.hasNext) {
it.next()
} else {
it = resetIterator() // ran out of preferred locations, reset and rotate to the beginning
it.next()
}
}
}
/**
* Sorts and gets the least element of the list associated with key in groupHash
* The returned PartitionGroup is the least loaded of all groups that represent the machine "key"
* @param key string representing a partitioned group on preferred machine key
* @return Option of PartitionGroup that has least elements for key
*/
def getLeastGroupHash(key: String): Option[PartitionGroup] = {
groupHash.get(key).map(_.sortWith(compare).head)
}
def addPartToPGroup(part: Partition, pgroup: PartitionGroup): Boolean = {
if (!initialHash.contains(part)) {
pgroup.arr += part // already assign this element
initialHash += part // needed to avoid assigning partitions to multiple buckets
true
} else { false }
}
/**
* Initializes targetLen partition groups and assigns a preferredLocation
* This uses coupon collector to estimate how many preferredLocations it must rotate through
* until it has seen most of the preferred locations (2 * n log(n))
* @param targetLen
*/
def setupGroups(targetLen: Int) {
val rotIt = new LocationIterator(prev)
// deal with empty case, just create targetLen partition groups with no preferred location
if (!rotIt.hasNext) {
(1 to targetLen).foreach(x => groupArr += PartitionGroup())
return
}
noLocality = false
// number of iterations needed to be certain that we've seen most preferred locations
val expectedCoupons2 = 2 * (math.log(targetLen)*targetLen + targetLen + 0.5).toInt
var numCreated = 0
var tries = 0
// rotate through until either targetLen unique/distinct preferred locations have been created
// OR we've rotated expectedCoupons2, in which case we have likely seen all preferred locations,
// i.e. likely targetLen >> number of preferred locations (more buckets than there are machines)
while (numCreated < targetLen && tries < expectedCoupons2) {
tries += 1
val (nxt_replica, nxt_part) = rotIt.next()
if (!groupHash.contains(nxt_replica)) {
val pgroup = PartitionGroup(nxt_replica)
groupArr += pgroup
addPartToPGroup(nxt_part, pgroup)
groupHash.put(nxt_replica, ArrayBuffer(pgroup)) // list in case we have multiple
numCreated += 1
}
}
while (numCreated < targetLen) { // if we don't have enough partition groups, create duplicates
var (nxt_replica, nxt_part) = rotIt.next()
val pgroup = PartitionGroup(nxt_replica)
groupArr += pgroup
groupHash.getOrElseUpdate(nxt_replica, ArrayBuffer()) += pgroup
var tries = 0
while (!addPartToPGroup(nxt_part, pgroup) && tries < targetLen) { // ensure at least one part
nxt_part = rotIt.next()._2
tries += 1
}
numCreated += 1
}
}
/**
* Takes a parent RDD partition and decides which of the partition groups to put it in
* Takes locality into account, but also uses power of 2 choices to load balance
* It strikes a balance between the two use the balanceSlack variable
* @param p partition (ball to be thrown)
* @return partition group (bin to be put in)
*/
def pickBin(p: Partition): PartitionGroup = {
val pref = currPrefLocs(p).map(getLeastGroupHash(_)).sortWith(compare) // least loaded pref locs
val prefPart = if (pref == Nil) None else pref.head
val r1 = rnd.nextInt(groupArr.size)
val r2 = rnd.nextInt(groupArr.size)
val minPowerOfTwo = if (groupArr(r1).size < groupArr(r2).size) groupArr(r1) else groupArr(r2)
if (prefPart.isEmpty) {
// if no preferred locations, just use basic power of two
return minPowerOfTwo
}
val prefPartActual = prefPart.get
if (minPowerOfTwo.size + slack <= prefPartActual.size) { // more imbalance than the slack allows
minPowerOfTwo // prefer balance over locality
} else {
prefPartActual // prefer locality over balance
}
}
def throwBalls() {
if (noLocality) { // no preferredLocations in parent RDD, no randomization needed
if (maxPartitions > groupArr.size) { // just return prev.partitions
for ((p, i) <- prev.partitions.zipWithIndex) {
groupArr(i).arr += p
}
} else { // no locality available, then simply split partitions based on positions in array
for (i <- 0 until maxPartitions) {
val rangeStart = ((i.toLong * prev.partitions.length) / maxPartitions).toInt
val rangeEnd = (((i.toLong + 1) * prev.partitions.length) / maxPartitions).toInt
(rangeStart until rangeEnd).foreach{ j => groupArr(i).arr += prev.partitions(j) }
}
}
} else {
for (p <- prev.partitions if (!initialHash.contains(p))) { // throw every partition into group
pickBin(p).arr += p
}
}
}
def getPartitions: Array[PartitionGroup] = groupArr.filter( pg => pg.size > 0).toArray
/**
* Runs the packing algorithm and returns an array of PartitionGroups that if possible are
* load balanced and grouped by locality
* @return array of partition groups
*/
def run(): Array[PartitionGroup] = {
setupGroups(math.min(prev.partitions.length, maxPartitions)) // setup the groups (bins)
throwBalls() // assign partitions (balls) to each group (bins)
getPartitions
}
}
private case class PartitionGroup(prefLoc: Option[String] = None) {
var arr = mutable.ArrayBuffer[Partition]()
def size: Int = arr.size
}
private object PartitionGroup {
def apply(prefLoc: String): PartitionGroup = {
require(prefLoc != "", "Preferred location must not be empty")
PartitionGroup(Some(prefLoc))
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
|
Scala
|
apache-2.0
| 15,001 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.api
import scala.AnyVal
import java.lang.String
/** A semantic, human consumable description.
*
* In contrast to `Show`, which is more representation focused, `Label`
* focuses on semantics and aims to be suitable for display to an end-user
* of the system.
*/
trait Label[A] {
def label(a: A): String
}
object Label {
def apply[A](implicit A: Label[A]): Label[A] = A
def label[A](f: A => String): Label[A] =
new Label[A] { def label(a: A) = f(a) }
object Syntax {
implicit final class EnrichedA[A](val self: A) extends AnyVal {
def label(implicit A: Label[A]): String = A.label(self)
}
}
}
|
slamdata/quasar
|
api/src/main/scala/quasar/api/Label.scala
|
Scala
|
apache-2.0
| 1,256 |
package net.liftmodules.staticsitemap
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.FunSpec
import net.liftweb.sitemap.Loc.LocParam
import net.liftweb.common.Full
import net.liftweb.sitemap.NormalLocPath
import path._
import path.PathBuilder._
// TODO: Test url param escaping
class TestSubRoutes extends FunSpec
with ShouldMatchers
with RouteConverterBehaviors {
describe("A ParameterlessSubRoute") {
val ParameterlessSiteMap = new StaticSiteMap {
val default = new ParameterlessSubRoute("/bogus")
val singleString = new ParameterlessSubRoute("custom" -> "/custom")
val singleList = new ParameterlessSubRoute(^ / "better" -> "/better")
val multiple = new ParameterlessSubRoute(^ / "multiple" / "parts" -> "/multiple/parts")
}
it should behave like aDefaultUnitSubRoute(ParameterlessSiteMap.default)
it should behave like allUnitSubRoutes(ParameterlessSiteMap.singleString)
it should behave like allUnitSubRoutes(ParameterlessSiteMap.singleList)
it should behave like allUnitSubRoutes(ParameterlessSiteMap.multiple)
it should behave like aRelativeRouteBuilder(
(mapping: (PathParts, String)) => {
val sm = new StaticSiteMap {
val subroute = new ParameterlessSubRoute(mapping._1 -> mapping._2)
}
sm.subroute
})
}
describe("Relative Snails") {
val SlashSiteMap = new StaticSiteMap {
val part = @/("a")
val customPart = @/("b" -> "/c")
val list = @/(List("a", "b"))
val customList = @/(^ / "x" / "y" -> "/z")
}
it should behave like aDefaultUnitSubRoute(SlashSiteMap.part)
it should behave like allUnitSubRoutes(SlashSiteMap.customPart)
it should behave like aDefaultUnitSubRoute(SlashSiteMap.list)
it should behave like allUnitSubRoutes(SlashSiteMap.customList)
it("should produce a url from a single string with the url as the default template path") {
SlashSiteMap.part.url should be("/a")
SlashSiteMap.part.templateParts should be(^ / "a")
}
it("should accept a mapping from a single string to a custom template") {
SlashSiteMap.customPart.url should be("/b")
SlashSiteMap.customPart.templateParts should be(^ / "c")
}
it("should accept a mapping from a list of string with the url as the default template path") {
SlashSiteMap.list.url should be("/a/b")
SlashSiteMap.list.templateParts should be(^ / "a" / "b")
}
it("should accept a mapping from a list of string to a custom template") {
SlashSiteMap.customList.url should be("/x/y")
SlashSiteMap.customList.templateParts should be(^ / "z")
}
it("should prevent matching on a path containing a slash") {
evaluating {
new StaticSiteMap {
val invalid = @/("/a/b/c")
}
} should produce[PathPartConstructionException]
}
it("(when given a custom template path) should prevent matching on a path containing a slash") {
evaluating {
new StaticSiteMap {
val invalid = @/("/a/b/c" -> "/custom")
}
} should produce[PathPartConstructionException]
}
it("should prevent matching on a list of paths if any contains a slash") {
evaluating {
new StaticSiteMap {
val invalid = @/(^ / "a" / "b/c")
}
} should produce[PathPartConstructionException]
}
it("(when given a custom template path) should prevent matching on a list of paths if any contains a slash") {
evaluating {
new StaticSiteMap {
val invalid = @/(^ / "a" / "b/c" -> "/custom")
}
} should produce[PathPartConstructionException]
}
}
describe("A SubRoute") {
val ParamSiteMap = new StaticSiteMap {
val nilPrefix = new String_@/(^ -> "/root") {
def url(id: String) = ^** / id
def paramForUrl = {
case ^** / id => Full(id)
case what => {
val thing = what
val shouldbe = ^**
Full(thing.toString)
}
}
}
val singlePrefix = new String_@/("prefix" -> "/root") {
def url(id: String) = ^** / id
def paramForUrl = {
case ^** / id => Full(id)
}
}
val doublePrefix = new String_@/(^** / "first" / "second" -> "/root") {
def url(id: String) = ^** / id
def paramForUrl = {
case ^** / param => Full(param)
}
}
}
ParamSiteMap.toSiteMap
it should behave like aStringParamSubRoute(ParamSiteMap.nilPrefix)
it should behave like aStringParamSubRoute(ParamSiteMap.singlePrefix)
it should behave like aStringParamSubRoute(ParamSiteMap.doublePrefix)
it("should also be able to construct a url with a parameter and some constant value") {
val ConstAndParamSiteMap = new StaticSiteMap {
val sub = new String_@/ {
def url(id: String) = ^** / id / "constant"
def paramForUrl = {
case ^** / id / "constant" => Full(id)
}
}
}
ConstAndParamSiteMap.sub.url("x") should be("/x/constant")
ConstAndParamSiteMap.sub.paramForUrl(^ / "x" / "constant") should be(Full("x"))
}
it("should also be able to construct a url with multiple constants") {
val ConstAndParamSiteMap = new StaticSiteMap {
val sub = new String_@/ {
def url(y: String) = ^** / "x" / y / "z"
def paramForUrl = {
case ^** / "x" / y / "z" => Full(y)
}
}
}
ConstAndParamSiteMap.sub.url("y") should be("/x/y/z")
ConstAndParamSiteMap.sub.paramForUrl(^ / "x" / "y" / "z") should be(Full("y"))
}
}
describe("Fixed Urls") {
case object ParamA extends LocParam[Any]
case object ParamB extends LocParam[Any]
val SimpleRoutes = new StaticSiteMap {
val plain = :/("/page")
val tuple = :/("/customPage" -> "/customTemplate")
val restricted = :/("/restricted", ParamA)
val container = new @/("wrong", ParamA) {
val absolute = :/("/absolute", ParamB)
}
}
it should behave like aDefaultUnitSubRoute(SimpleRoutes.plain)
it should behave like allUnitSubRoutes(SimpleRoutes.tuple)
it("should allow constructing root urls with custom template paths") {
val SampleRoutes = new StaticSiteMap {
val customPath = :/("/page1" -> "/customTemplate")
}
SampleRoutes.customPath.url should be("/page1")
SampleRoutes.customPath.toRoute should have('path(List(NormalLocPath("customTemplate"))))
}
it("should accept LocParams") {
SimpleRoutes.restricted.locParams should be(List(ParamA))
}
it("^ should be /index.html") {
val sitemap = new StaticSiteMap {
val Home = :/(^)
}
// There is a natural
sitemap.Home.templateParts.parts should equal (^)
sitemap.Home.toRoute.templatePath.parts should equal (^)
sitemap.Home.toRoute.toMenu.loc.calcDefaultHref should be ("/")
}
it("should prevent constructing root urls without a leading /") {
evaluating {
new StaticSiteMap {
val noSlash = :/("wrong" -> "wrong")
}
} should produce[PathPartSplitException]
}
it("should prevent constructing template paths without a leading /") {
evaluating {
new StaticSiteMap {
val noSlash = :/("/okay" -> "wrong")
}
} should produce[PathPartSplitException]
}
it("should prevent constructing root urls with empty path parts (ie. //)") {
evaluating {
new StaticSiteMap {
val noSlash = :/("/not/a/good//url" -> "/okay")
}
} should produce[PathPartConstructionException]
}
it("should prevent constructing template paths with empty path parts (ie. //)") {
evaluating {
new StaticSiteMap {
val noSlash = :/("/a/fine/url" -> "/not/a/good//template/path")
}
} should produce[PathPartConstructionException]
}
it("should not contain the parent container's prefix") {
SimpleRoutes.container.absolute.url should be("/absolute")
}
it("should not contain the parent container's loc param") {
SimpleRoutes.container.absolute.locParams should be(List(ParamB))
}
}
}
|
jeffmay/lift-staticsitemap
|
src/test/scala/net/liftmodules/staticsitemap/TestSubRoutes.scala
|
Scala
|
apache-2.0
| 8,283 |
package chess
package opening
final class FullOpening(
val eco: String,
val name: String,
val fen: String
) {
def ecoName = s"$eco $name"
override def toString = ecoName
def atPly(ply: Int) = FullOpening.AtPly(this, ply)
}
object FullOpening {
case class AtPly(opening: FullOpening, ply: Int)
}
|
niklasf/scalachess
|
src/main/scala/opening/FullOpening.scala
|
Scala
|
mit
| 321 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import scala.math._
import java.io.File
import kafka.message._
import kafka.utils._
/**
* A segment of the log. Each segment has two components: a log and an index. The log is a FileMessageSet containing
* the actual messages. The index is an OffsetIndex that maps from logical offsets to physical file positions. Each
* segment has a base offset which is an offset <= the least offset of any message in this segment and > any offset in
* any previous segment.
*
* A segment with a base offset of [base_offset] would be stored in two files, a [base_offset].index and a [base_offset].log file.
*/
@nonthreadsafe
class LogSegment(val messageSet: FileMessageSet,
val index: OffsetIndex,
val start: Long,
val indexIntervalBytes: Int,
time: Time) extends Range with Logging {
var firstAppendTime: Option[Long] =
if (messageSet.sizeInBytes > 0)
Some(time.milliseconds)
else
None
/* the number of bytes since we last added an entry in the offset index */
var bytesSinceLastIndexEntry = 0
@volatile var deleted = false
def this(dir: File, startOffset: Long, indexIntervalBytes: Int, maxIndexSize: Int) =
this(new FileMessageSet(file = Log.logFilename(dir, startOffset)),
new OffsetIndex(file = Log.indexFilename(dir, startOffset), baseOffset = startOffset, maxIndexSize = maxIndexSize),
startOffset,
indexIntervalBytes,
SystemTime)
/* Return the size in bytes of this log segment */
def size: Long = messageSet.sizeInBytes()
def updateFirstAppendTime() {
if (firstAppendTime.isEmpty)
firstAppendTime = Some(time.milliseconds)
}
/**
* Append the given messages starting with the given offset. Add
* an entry to the index if needed.
*
* It is assumed this method is being called from within a lock
*/
def append(offset: Long, messages: ByteBufferMessageSet) {
if (messages.sizeInBytes > 0) {
trace("Inserting %d bytes at offset %d at position %d".format(messages.sizeInBytes, offset, messageSet.sizeInBytes()))
// append an entry to the index (if needed)
if(bytesSinceLastIndexEntry > indexIntervalBytes) {
index.append(offset, messageSet.sizeInBytes())
this.bytesSinceLastIndexEntry = 0
}
// append the messages
messageSet.append(messages)
updateFirstAppendTime()
this.bytesSinceLastIndexEntry += messages.sizeInBytes
}
}
/**
* Find the physical file position for the least offset >= the given offset. If no offset is found
* that meets this criteria before the end of the log, return null.
*/
private def translateOffset(offset: Long): OffsetPosition = {
val mapping = index.lookup(offset)
messageSet.searchFor(offset, mapping.position)
}
/**
* Read a message set from this segment beginning with the first offset
* greater than or equal to the startOffset. The message set will include
* no more than maxSize bytes and will end before maxOffset if a maxOffset is specified.
*/
def read(startOffset: Long, maxSize: Int, maxOffset: Option[Long]): MessageSet = {
if(maxSize < 0)
throw new IllegalArgumentException("Invalid max size for log read (%d)".format(maxSize))
if(maxSize == 0)
return MessageSet.Empty
val logSize = messageSet.sizeInBytes // this may change, need to save a consistent copy
val startPosition = translateOffset(startOffset)
// if the start position is already off the end of the log, return MessageSet.Empty
if(startPosition == null)
return MessageSet.Empty
// calculate the length of the message set to read based on whether or not they gave us a maxOffset
val length =
maxOffset match {
case None =>
// no max offset, just use the max size they gave unmolested
maxSize
case Some(offset) => {
// there is a max offset, translate it to a file position and use that to calculate the max read size
if(offset < startOffset)
throw new IllegalArgumentException("Attempt to read with a maximum offset (%d) less than the start offset (%d).".format(offset, startOffset))
val mapping = translateOffset(offset)
val endPosition =
if(mapping == null)
logSize // the max offset is off the end of the log, use the end of the file
else
mapping.position
min(endPosition - startPosition.position, maxSize)
}
}
messageSet.read(startPosition.position, length)
}
override def toString() = "LogSegment(start=" + start + ", size=" + size + ")"
/**
* Truncate off all index and log entries with offsets greater than or equal to the current offset.
*/
def truncateTo(offset: Long) {
val mapping = translateOffset(offset)
if(mapping == null)
return
index.truncateTo(offset)
// after truncation, reset and allocate more space for the (new currently active) index
index.resize(index.maxIndexSize)
messageSet.truncateTo(mapping.position)
if (messageSet.sizeInBytes == 0)
firstAppendTime = None
}
/**
* Calculate the offset that would be used for the next message to be append to this segment.
* Note that this is expensive.
*/
def nextOffset(): Long = {
val ms = read(index.lastOffset, messageSet.sizeInBytes, None)
ms.lastOption match {
case None => start
case Some(last) => last.nextOffset
}
}
/**
* Flush this log segment to disk
*/
def flush() {
messageSet.flush()
index.flush()
}
/**
* Close this log segment
*/
def close() {
Utils.swallow(index.close)
Utils.swallow(messageSet.close)
}
}
|
kavink92/kafka-0.8.0-beta1-src
|
core/src/main/scala/kafka/log/LogSegment.scala
|
Scala
|
apache-2.0
| 6,623 |
import collection.mutable._
object Euler32 {
var answers = Set[Int]()
def main(args: Array[String]) {
for {
i1 <- 0 to 8
i2 <- 0 to 7
i3 <- 0 to 6
i4 <- 0 to 5
i5 <- 0 to 4
i6 <- 0 to 3
i7 <- 0 to 2
i8 <- 0 to 1
i9 <- 0 to 0
} {
val avail = ListBuffer[Int](1, 2, 3, 4, 5, 6, 7, 8, 9)
val v1 = avail.remove(i1)
val v2 = avail.remove(i2)
val v3 = avail.remove(i3)
val v4 = avail.remove(i4)
val v5 = avail.remove(i5)
val v6 = avail.remove(i6)
val v7 = avail.remove(i7)
val v8 = avail.remove(i8)
val v9 = avail.remove(i9)
tryPoss(v1, v2, v3, v4, v5, v6, v7, v8, v9)
}
println("Total is: " + (0 /: answers)(_ + _))
}
def tryPoss(i1: Int, i2: Int, i3: Int, i4: Int, i5: Int,
i6: Int, i7: Int, i8: Int, i9: Int) {
check(num(i1), num(i2), num(i3, i4, i5, i6, i7, i8, i9))
check(num(i1), num(i2, i3), num(i4, i5, i6, i7, i8, i9))
check(num(i1), num(i2, i3, i4), num(i5, i6, i7, i8, i9))
check(num(i1), num(i2, i3, i4, i5), num(i6, i7, i8, i9))
check(num(i1, i2), num(i3), num(i4, i5, i6, i7, i8, i9))
check(num(i1, i2), num(i3, i4), num(i5, i6, i7, i8, i9))
check(num(i1, i2), num(i3, i4, i5), num(i6, i7, i8, i9))
check(num(i1, i2), num(i3, i4, i5, i6), num(i7, i8, i9))
check(num(i1, i2, i3), num(i4), num(i5, i6, i7, i8, i9))
check(num(i1, i2, i3), num(i4, i5), num(i6, i7, i8, i9))
check(num(i1, i2, i3), num(i4, i5, i6), num(i7, i8, i9))
check(num(i1, i2, i3), num(i4, i5, i6, i7), num(i8, i9))
check(num(i1, i2, i3, i4), num(i5), num(i6, i7, i8, i9))
check(num(i1, i2, i3, i4), num(i5, i6), num(i7, i8, i9))
check(num(i1, i2, i3, i4), num(i5, i6, i7), num(i8, i9))
check(num(i1, i2, i3, i4), num(i5, i6, i7, i8), num(i9))
}
def check(x: Int, y: Int, z: Int) {
if (z == x * y) {
println("Got " + x + " * " + y + " = " + z)
answers += z
}
}
def num(xs: Int*) = (0 /: xs)(10 * _ + _)
}
|
pdbartlett/misc-stuff
|
euler/scala/Euler32.scala
|
Scala
|
apache-2.0
| 1,937 |
package sttp.client3.impl.zio
import sttp.ws.{WebSocket, WebSocketClosed, WebSocketFrame}
import zio.stream.{Stream, ZStream}
import zio.{Ref, ZIO}
object ZioWebSockets {
def compilePipe[R](
ws: WebSocket[ZIO[R, Throwable, *]],
pipe: ZStream[R, Throwable, WebSocketFrame.Data[_]] => ZStream[R, Throwable, WebSocketFrame]
): ZIO[R, Throwable, Unit] =
Ref.make(true).flatMap { open =>
val onClose = Stream.fromZIO(open.set(false).map(_ => None: Option[WebSocketFrame.Data[_]]))
pipe(
Stream
.repeatZIO(ws.receive())
.flatMap {
case WebSocketFrame.Close(_, _) => onClose
case WebSocketFrame.Ping(payload) =>
Stream.fromZIO(ws.send(WebSocketFrame.Pong(payload))).flatMap(_ => Stream.empty)
case WebSocketFrame.Pong(_) => Stream.empty
case in: WebSocketFrame.Data[_] => Stream(Some(in))
}
.catchSome { case _: WebSocketClosed => onClose }
.collectWhileSome
)
.mapZIO(ws.send(_))
.foreach(_ => ZIO.unit)
.ensuring(ws.close().catchAll(_ => ZIO.unit))
}
type PipeR[R, A, B] = ZStream[R, Throwable, A] => ZStream[R, Throwable, B]
def fromTextPipe[R]: (String => WebSocketFrame) => PipeR[R, WebSocketFrame, WebSocketFrame] =
f => fromTextPipeF(_.map(f))
def fromTextPipeF[R]: PipeR[R, String, WebSocketFrame] => PipeR[R, WebSocketFrame, WebSocketFrame] =
p => p.compose(combinedTextFrames)
def combinedTextFrames[R]: PipeR[R, WebSocketFrame, String] = { input =>
input
.collect { case tf: WebSocketFrame.Text => tf }
.mapConcat { tf =>
if (tf.finalFragment) {
Seq(tf.copy(finalFragment = false), tf.copy(payload = ""))
} else {
Seq(tf)
}
}
.split(_.finalFragment)
.map(_.map(_.payload).mkString)
}
}
|
softwaremill/sttp
|
effects/zio/src/main/scala/sttp/client3/impl/zio/ZioWebSockets.scala
|
Scala
|
apache-2.0
| 1,882 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.roaringbitmap.RoaringBitmap
import org.apache.spark.SparkEnv
import org.apache.spark.internal.config
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.Utils
/**
* Result returned by a ShuffleMapTask to a scheduler. Includes the block manager address that the
* task ran on as well as the sizes of outputs for each reducer, for passing on to the reduce tasks.
*/
private[spark] sealed trait MapStatus {
/** Location where this task was run. */
def location: BlockManagerId
/**
* Estimated size for the reduce block, in bytes.
*
* If a block is non-empty, then this method MUST return a non-zero size. This invariant is
* necessary for correctness, since block fetchers are allowed to skip zero-size blocks.
*/
def getSizeForBlock(reduceId: Int): Long
}
private[spark] object MapStatus {
def apply(loc: BlockManagerId, uncompressedSizes: Array[Long]): MapStatus = {
if (uncompressedSizes.length > 2000) {
HighlyCompressedMapStatus(loc, uncompressedSizes)
} else {
new CompressedMapStatus(loc, uncompressedSizes)
}
}
private[this] val LOG_BASE = 1.1
/**
* Compress a size in bytes to 8 bits for efficient reporting of map output sizes.
* We do this by encoding the log base 1.1 of the size as an integer, which can support
* sizes up to 35 GB with at most 10% error.
*/
def compressSize(size: Long): Byte = {
if (size == 0) {
0
} else if (size <= 1L) {
1
} else {
math.min(255, math.ceil(math.log(size) / math.log(LOG_BASE)).toInt).toByte
}
}
/**
* Decompress an 8-bit encoded block size, using the reverse operation of compressSize.
*/
def decompressSize(compressedSize: Byte): Long = {
if (compressedSize == 0) {
0
} else {
math.pow(LOG_BASE, compressedSize & 0xFF).toLong
}
}
}
/**
* A [[MapStatus]] implementation that tracks the size of each block. Size for each block is
* represented using a single byte.
*
* @param loc location where the task is being executed.
* @param compressedSizes size of the blocks, indexed by reduce partition id.
*/
private[spark] class CompressedMapStatus(
private[this] var loc: BlockManagerId,
private[this] var compressedSizes: Array[Byte])
extends MapStatus with Externalizable {
protected def this() = this(null, null.asInstanceOf[Array[Byte]]) // For deserialization only
def this(loc: BlockManagerId, uncompressedSizes: Array[Long]) {
this(loc, uncompressedSizes.map(MapStatus.compressSize))
}
override def location: BlockManagerId = loc
override def getSizeForBlock(reduceId: Int): Long = {
MapStatus.decompressSize(compressedSizes(reduceId))
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
out.writeInt(compressedSizes.length)
out.write(compressedSizes)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
val len = in.readInt()
compressedSizes = new Array[Byte](len)
in.readFully(compressedSizes)
}
}
/**
* A [[MapStatus]] implementation that stores the accurate size of huge blocks, which are larger
* than spark.shuffle.accurateBlockThreshold. It stores the average size of other non-empty blocks,
* plus a bitmap for tracking which blocks are empty.
*
* @param loc location where the task is being executed
* @param numNonEmptyBlocks the number of non-empty blocks
* @param emptyBlocks a bitmap tracking which blocks are empty
* @param avgSize average size of the non-empty and non-huge blocks
* @param hugeBlockSizes sizes of huge blocks by their reduceId.
*/
private[spark] class HighlyCompressedMapStatus private (
private[this] var loc: BlockManagerId,
private[this] var numNonEmptyBlocks: Int,
private[this] var emptyBlocks: RoaringBitmap,
private[this] var avgSize: Long,
private var hugeBlockSizes: Map[Int, Byte])
extends MapStatus with Externalizable {
// loc could be null when the default constructor is called during deserialization
require(loc == null || avgSize > 0 || hugeBlockSizes.size > 0 || numNonEmptyBlocks == 0,
"Average size can only be zero for map stages that produced no output")
protected def this() = this(null, -1, null, -1, null) // For deserialization only
override def location: BlockManagerId = loc
override def getSizeForBlock(reduceId: Int): Long = {
assert(hugeBlockSizes != null)
if (emptyBlocks.contains(reduceId)) {
0
} else {
hugeBlockSizes.get(reduceId) match {
case Some(size) => MapStatus.decompressSize(size)
case None => avgSize
}
}
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
emptyBlocks.writeExternal(out)
out.writeLong(avgSize)
out.writeInt(hugeBlockSizes.size)
hugeBlockSizes.foreach { kv =>
out.writeInt(kv._1)
out.writeByte(kv._2)
}
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
emptyBlocks = new RoaringBitmap()
emptyBlocks.readExternal(in)
avgSize = in.readLong()
val count = in.readInt()
val hugeBlockSizesArray = mutable.ArrayBuffer[Tuple2[Int, Byte]]()
(0 until count).foreach { _ =>
val block = in.readInt()
val size = in.readByte()
hugeBlockSizesArray += Tuple2(block, size)
}
hugeBlockSizes = hugeBlockSizesArray.toMap
}
}
private[spark] object HighlyCompressedMapStatus {
def apply(loc: BlockManagerId, uncompressedSizes: Array[Long]): HighlyCompressedMapStatus = {
// We must keep track of which blocks are empty so that we don't report a zero-sized
// block as being non-empty (or vice-versa) when using the average block size.
var i = 0
var numNonEmptyBlocks: Int = 0
var numSmallBlocks: Int = 0
var totalSmallBlockSize: Long = 0
// From a compression standpoint, it shouldn't matter whether we track empty or non-empty
// blocks. From a performance standpoint, we benefit from tracking empty blocks because
// we expect that there will be far fewer of them, so we will perform fewer bitmap insertions.
val emptyBlocks = new RoaringBitmap()
val totalNumBlocks = uncompressedSizes.length
val threshold = Option(SparkEnv.get)
.map(_.conf.get(config.SHUFFLE_ACCURATE_BLOCK_THRESHOLD))
.getOrElse(config.SHUFFLE_ACCURATE_BLOCK_THRESHOLD.defaultValue.get)
val hugeBlockSizesArray = ArrayBuffer[Tuple2[Int, Byte]]()
while (i < totalNumBlocks) {
val size = uncompressedSizes(i)
if (size > 0) {
numNonEmptyBlocks += 1
// Huge blocks are not included in the calculation for average size, thus size for smaller
// blocks is more accurate.
if (size < threshold) {
totalSmallBlockSize += size
numSmallBlocks += 1
} else {
hugeBlockSizesArray += Tuple2(i, MapStatus.compressSize(uncompressedSizes(i)))
}
} else {
emptyBlocks.add(i)
}
i += 1
}
val avgSize = if (numSmallBlocks > 0) {
totalSmallBlockSize / numSmallBlocks
} else {
0
}
emptyBlocks.trim()
emptyBlocks.runOptimize()
new HighlyCompressedMapStatus(loc, numNonEmptyBlocks, emptyBlocks, avgSize,
hugeBlockSizesArray.toMap)
}
}
|
esi-mineset/spark
|
core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala
|
Scala
|
apache-2.0
| 8,390 |
/*
* Copyright (C) 2016-2020 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.persistence.cassandra.query
import java.nio.ByteBuffer
import java.time.{ LocalDateTime, ZoneOffset }
import java.util.UUID
import akka.actor.ActorSystem
import akka.persistence.PersistentRepr
import akka.persistence.cassandra.BucketSize
import akka.persistence.cassandra.EventsByTagSettings
import akka.persistence.cassandra.PluginSettings
import akka.persistence.cassandra.formatOffset
import akka.persistence.cassandra.journal._
import akka.serialization.Serialization
import akka.serialization.Serializers
import com.datastax.oss.driver.api.core.CqlSession
import com.datastax.oss.driver.api.core.uuid.Uuids
private[akka] trait TestTagWriter {
def system: ActorSystem
def cluster: CqlSession
val serialization: Serialization
val settings: PluginSettings
final def journalSettings: JournalSettings = settings.journalSettings
final def eventsByTagSettings: EventsByTagSettings = settings.eventsByTagSettings
lazy val (preparedWriteTagMessage, preparedWriteTagMessageWithMeta) = {
val writeStatements: CassandraJournalStatements = new CassandraJournalStatements(settings)
(cluster.prepare(writeStatements.writeTags(false)), cluster.prepare(writeStatements.writeTags(true)))
}
def clearAllEvents(): Unit = {
cluster.execute(s"truncate ${journalSettings.keyspace}.${eventsByTagSettings.tagTable.name}")
}
def writeTaggedEvent(
time: LocalDateTime,
pr: PersistentRepr,
tags: Set[String],
tagPidSequenceNr: Long,
bucketSize: BucketSize): Unit = {
val timestamp = time.toInstant(ZoneOffset.UTC).toEpochMilli
write(pr, tags, tagPidSequenceNr, uuid(timestamp), bucketSize)
}
def writeTaggedEvent(
persistent: PersistentRepr,
tags: Set[String],
tagPidSequenceNr: Long,
bucketSize: BucketSize): Unit = {
val nowUuid = Uuids.timeBased()
write(persistent, tags, tagPidSequenceNr, nowUuid, bucketSize)
}
def writeTaggedEvent(
persistent: PersistentRepr,
tags: Set[String],
tagPidSequenceNr: Long,
uuid: UUID,
bucketSize: BucketSize): Unit =
write(persistent, tags, tagPidSequenceNr, uuid, bucketSize)
private def write(
pr: PersistentRepr,
tags: Set[String],
tagPidSequenceNr: Long,
uuid: UUID,
bucketSize: BucketSize): Unit = {
val event = pr.payload.asInstanceOf[AnyRef]
val serializer = serialization.findSerializerFor(event)
val serialized = ByteBuffer.wrap(serialization.serialize(event).get)
val serManifest = Serializers.manifestFor(serializer, pr)
val timeBucket = TimeBucket(Uuids.unixTimestamp(uuid), bucketSize)
tags.foreach(tag => {
val bs = preparedWriteTagMessage
.bind()
.setString("tag_name", tag)
.setLong("timebucket", timeBucket.key)
.setUuid("timestamp", uuid)
.setLong("tag_pid_sequence_nr", tagPidSequenceNr)
.setByteBuffer("event", serialized)
.setString("event_manifest", pr.manifest)
.setString("persistence_id", pr.persistenceId)
.setInt("ser_id", serializer.identifier)
.setString("ser_manifest", serManifest)
.setString("writer_uuid", "ManualWrite")
.setLong("sequence_nr", pr.sequenceNr)
cluster.execute(bs)
})
system.log.debug(
"Written event: {} Uuid: {} Timebucket: {} TagPidSeqNr: {}",
pr.payload,
formatOffset(uuid),
timeBucket,
tagPidSequenceNr)
}
}
|
chbatey/akka-persistence-cassandra
|
core/src/test/scala/akka/persistence/cassandra/query/TestTagWriter.scala
|
Scala
|
apache-2.0
| 3,526 |
/*
* Copyright (c) 2012 Roberto Tyley
*
* This file is part of 'BFG Repo-Cleaner' - a tool for removing large
* or troublesome blobs from Git repositories.
*
* BFG Repo-Cleaner is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BFG Repo-Cleaner is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see http://www.gnu.org/licenses/ .
*/
package com.madgag.git.bfg
import com.madgag.git.bfg.model.{FileName, Tree}
import org.eclipse.jgit.lib.FileMode
import org.eclipse.jgit.lib.FileMode._
import org.eclipse.jgit.lib.ObjectId.zeroId
import org.specs2.mutable._
class TreeEntrySpec extends Specification {
def a(mode: FileMode, name: String) = Tree.Entry(FileName(name), mode, zeroId)
"Tree entry ordering" should {
"match ordering used by Git" in {
a(TREE, "agit-test-utils") should be < (a(TREE, "agit"))
}
}
}
|
rbramwell/bfg-repo-cleaner
|
bfg-library/src/test/scala/com/madgag/git/bfg/TreeEntrySpec.scala
|
Scala
|
gpl-3.0
| 1,319 |
/*
* Copyright 2010 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.fusesource.fabric.webui.util
import java.io._
import java.util.concurrent.{ThreadFactory, LinkedBlockingQueue, TimeUnit, ThreadPoolExecutor}
import java.util.Properties
import java.util.regex.{Matcher, Pattern}
import java.lang.reflect.{Method, Field}
import collection.mutable.ListBuffer
import java.security.AccessControlException
import java.beans.{PropertyEditor, PropertyEditorManager}
object IOSupport {
def read_bytes(in: InputStream) = {
val out = new ByteArrayOutputStream()
copy(in, out)
out.toByteArray
}
/**
* Returns the number of bytes copied.
*/
def copy(in: InputStream, out: OutputStream): Long = {
var bytesCopied: Long = 0
val buffer = new Array[Byte](8192)
var bytes = in.read(buffer)
while (bytes >= 0) {
out.write(buffer, 0, bytes)
bytesCopied += bytes
bytes = in.read(buffer)
}
bytesCopied
}
def using[R, C <: Closeable](closable: C)(proc: C => R) = {
try {
proc(closable)
} finally {
try {
closable.close
} catch {
case ignore =>
}
}
}
def write_text(out: OutputStream, value: String, charset: String = "UTF-8"): Unit = {
write_bytes(out, value.getBytes(charset))
}
def write_bytes(out: OutputStream, data: Array[Byte]): Unit = {
copy(new ByteArrayInputStream(data), out)
}
}
object FileSupport {
import IOSupport._
implicit def to_rich_file(file: File): RichFile = new RichFile(file)
val file_separator = System.getProperty("file.separator")
def fix_file_separator(command: String) = command.replaceAll( """/|\\\\""", Matcher.quoteReplacement(file_separator))
case class RichFile(self: File) {
def /(path: String) = new File(self, path)
def copy_to(target: File) = {
using(new FileOutputStream(target)) {
os =>
using(new FileInputStream(self)) {
is =>
IOSupport.copy(is, os)
}
}
}
def recursive_list: List[File] = {
if (self.isDirectory) {
self :: self.listFiles.toList.flatten(_.recursive_list)
} else {
self :: Nil
}
}
def recursive_delete: Unit = {
if (self.exists) {
if (self.isDirectory) {
self.listFiles.foreach(_.recursive_delete)
}
self.delete
}
}
def recursive_copy_to(target: File): Unit = {
if (self.isDirectory) {
target.mkdirs
self.listFiles.foreach(file => file.recursive_copy_to(target / file.getName))
} else {
self.copy_to(target)
}
}
def read_bytes: Array[Byte] = {
using(new FileInputStream(self)) {
in =>
IOSupport.read_bytes(in)
}
}
def read_text(charset: String = "UTF-8"): String = new String(this.read_bytes, charset)
def write_bytes(data: Array[Byte]): Unit = {
using(new FileOutputStream(self)) {
out =>
IOSupport.write_bytes(out, data)
}
}
def write_text(data: String, charset: String = "UTF-8"): Unit = {
using(new FileOutputStream(self)) {
out =>
IOSupport.write_text(out, data, charset)
}
}
}
}
|
janstey/fuse
|
fmc/fmc-rest/src/main/scala/org/fusesource/fabric/webui/util/Support.scala
|
Scala
|
apache-2.0
| 3,811 |
package net.rrm.ehour.persistence.mail.dao
import net.rrm.ehour.domain.MailLog
import net.rrm.ehour.persistence.dao.{AbstractGenericDaoHibernateImpl, GenericDao}
import org.springframework.stereotype.Repository
trait MailLogDao extends GenericDao[Integer, MailLog] {
def find(event: String): List[MailLog]
def find(mailTo: String, event: String): List[MailLog]
}
@Repository
class MailLogDaoHibernateImpl extends AbstractGenericDaoHibernateImpl[Integer, MailLog](classOf[MailLog]) with MailLogDao {
import scala.collection.JavaConversions._
override def find(event: String):List[MailLog] = findByNamedQuery("MailLog.findOnEvent", "event", event).toList
override def find(mailTo: String, event: String):List[MailLog] = findByNamedQuery("MailLog.findOnEventAndMailTo", List("event", "mailTo"), List(event, mailTo)).toList
}
|
momogentoo/ehour
|
eHour-persistence/src/main/scala/net/rrm/ehour/persistence/mail/dao/MailLogDao.scala
|
Scala
|
gpl-2.0
| 835 |
package model
import skinny.DBSettings
import skinny.test._
import org.scalatest.fixture.FlatSpec
import org.scalatest._
import scalikejdbc._
import scalikejdbc.scalatest._
import org.joda.time._
class SchoolContentSpec extends FlatSpec with Matchers with DBSettings with AutoRollback {
}
|
yoshitakes/skinny-task-example
|
src/test/scala/model/SchoolContentSpec.scala
|
Scala
|
mit
| 291 |
package pl.touk.nussknacker.engine.util.config
import java.io.File
import java.net.URI
trait URIExtensions {
implicit class ExtendedURI(uri: URI) {
def withFileSchemeDefault: URI = if (uri.isAbsolute) uri else new File(uri.getSchemeSpecificPart).toURI
def withTrailingSlash: URI = if (uri.getPath.endsWith("/")) uri else URI.create(uri.toString + "/")
}
}
|
TouK/nussknacker
|
utils/utils/src/main/scala/pl/touk/nussknacker/engine/util/config/URIExtensions.scala
|
Scala
|
apache-2.0
| 369 |
import com.ketalo.EmberJsKeys
import sbt._
object ApplicationBuild extends Build with EmberJsKeys {
val appName = "play-emberjs-sample"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq.empty
val main = play.Project(appName, appVersion, appDependencies).settings(
emberJsVersion := "1.0.0-rc.4"
)
}
|
ACenterAInc/play-emberjs
|
samples-pre-1.0/sample-1.0.0-rc.4/project/Build.scala
|
Scala
|
mit
| 339 |
/**
* CPSTextInterpreter - parses and interprets the CPSText DSL.
* Copyright (C) 2011 Max Leuthaeuser
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package de.qualitune.transformator
import de.qualitune.ast.role.{Role, EquivalenceConstraint, ImplicationConstraint, ProhibitionConstraint}
import de.qualitune.ast.ASTElement
/**
* @author Max Leuthaeuser
* @since 18.01.12
*/
class RoleTransformator extends ASTElementTransformator {
private def buildActMethod(name: String) = {
"def act() { while (true) { receive { case " + name + " => {behavior(); exit()} } } }\n"
}
override def apply[E <: ASTElement, T <: AnyRef](s: ExecutableString, elem: E, data: T) = {
elem match {
case r: Role => data match {
case d: List[Role] => {
s + ("case object token_" + r.name + "\n")
val si = if (r.singleton) ", true" else ""
s + "class " + r.name + "(core: Component) extends ComponentRole(core" + si + ") with DaemonActor {\n"
// act method to start the behaviour method when the context the role belongs to gets activated
s + buildActMethod("token_" + r.name)
// variables:
r.variables.foreach(new VariableTransformator()(s, _, null))
// behavior:
new CallableTransformator()(s, r.behavior, null)
// methods:
r.operations.foreach(new CallableTransformator()(s, _, null))
s + "\n}\n"
}
}
case ec: EquivalenceConstraint => s // TODO handle EquivalenceConstraint interpretation
case ic: ImplicationConstraint => s // TODO handle ImplicationConstraint interpretation
case pc: ProhibitionConstraint => s // TODO handle ProhibitionConstraint interpretation
case _ => throw new IllegalArgumentException("Unknown Role or RoleContraint type!")
}
}
}
|
max-leuthaeuser/CPSTextInterpreter
|
src/main/scala/de/qualitune/transformator/RoleTransformator.scala
|
Scala
|
gpl-3.0
| 2,492 |
package scarla.domain
import scala.collection.immutable.Vector
object State {
final case class Transition(s: State, aid: Int, r: Double, ns: State)
}
case class State(val values: Vector[Double],
val availableActions: Vector[Int],
val isTerminal: Boolean = false) extends Serializable
|
tspooner/scaRLa
|
src/main/scala/scarla/domain/State.scala
|
Scala
|
mit
| 323 |
package lean1
/**
* Created by oot on 2014/11/18.
* 重载方法
*/
class EvenMoreScientficCalculator(brand:String) extends Calculator(brand){
//def log(m:Int): Double = log(m,math.exp(1))
}
|
HYToop/Scala
|
Base1/src/lean1/EvenMoreScientficCalculator.scala
|
Scala
|
unlicense
| 197 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.libs.iteratee
import org.specs2.mutable._
import java.io.{ ByteArrayInputStream, File, FileOutputStream, OutputStream }
import java.util.concurrent.{ CountDownLatch, TimeUnit }
import java.util.concurrent.atomic.AtomicInteger
import play.api.libs.iteratee.Execution.Implicits.{ defaultExecutionContext => dec }
import scala.concurrent.{ Promise, Future, Await }
import scala.concurrent.duration.Duration
object EnumeratorsSpec extends Specification
with IterateeSpecification with ExecutionSpecification {
"Enumerator's interleave" should {
"mix it with another enumerator into one" in {
mustExecute(8) { foldEC =>
val e1 = Enumerator(List(1), List(3), List(5), List(7))
val e2 = Enumerator(List(2), List(4), List(6), List(8))
val e = e1 interleave e2
val kk = e |>>> Iteratee.fold(List.empty[Int])((r, e: List[Int]) => r ++ e)(foldEC)
val result = Await.result(kk, Duration.Inf)
println("interleaved enumerators result is: " + result)
result.diff(Seq(1, 2, 3, 4, 5, 6, 7, 8)) must equalTo(Seq())
}
}
"yield when both enumerators EOF" in {
mustExecute(8) { foldEC =>
val e1 = Enumerator(List(1), List(3), List(5), List(7)) >>> Enumerator.enumInput(Input.EOF)
val e2 = Enumerator(List(2), List(4), List(6), List(8)) >>> Enumerator.enumInput(Input.EOF)
val e = e1 interleave e2
val kk = e |>>> Iteratee.fold(List.empty[Int])((r, e: List[Int]) => r ++ e)(foldEC)
val result = Await.result(kk, Duration.Inf)
result.diff(Seq(1, 2, 3, 4, 5, 6, 7, 8)) must equalTo(Seq())
}
}
"yield when iteratee is done!" in {
mustExecute(7) { foldEC =>
val e1 = Enumerator(List(1), List(3), List(5), List(7))
val e2 = Enumerator(List(2), List(4), List(6), List(8))
val e = e1 interleave e2
val kk = e |>>> Enumeratee.take(7) &>> Iteratee.fold(List.empty[Int])((r, e: List[Int]) => r ++ e)(foldEC)
val result = Await.result(kk, Duration.Inf)
result.length must equalTo(7)
}
}
"not necessarily go alternatively between two enumerators" in {
mustExecute(1, 2) { (onDoneEC, unfoldEC) =>
val firstDone = Promise[Unit]
val e1 = Enumerator(1, 2, 3, 4).onDoneEnumerating(firstDone.success(Unit))(onDoneEC)
val e2 = Enumerator.unfoldM[Boolean, Int](true) { first => if (first) firstDone.future.map(_ => Some((false, 5))) else Future.successful(None) }(unfoldEC)
val result = Await.result((e1 interleave e2) |>>> Iteratee.getChunks[Int], Duration.Inf)
result must_== Seq(1, 2, 3, 4, 5)
}
}
}
"Enumerator.enumerate " should {
"generate an Enumerator from a singleton Iterator" in {
mustExecute(1) { foldEC =>
val iterator = scala.collection.Iterator.single[Int](3)
val futureOfResult = Enumerator.enumerate(iterator) |>>>
Enumeratee.take(1) &>>
Iteratee.fold(List.empty[Int])((r, e: Int) => e :: r)(foldEC)
val result = Await.result(futureOfResult, Duration.Inf)
result(0) must equalTo(3)
result.length must equalTo(1)
}
}
"take as much element as in the iterator in the right order" in {
mustExecute(50) { foldEC =>
val iterator = scala.collection.Iterator.range(0, 50)
val futureOfResult = Enumerator.enumerate(iterator) |>>>
Enumeratee.take(100) &>>
Iteratee.fold(Seq.empty[Int])((r, e: Int) => r :+ e)(foldEC)
val result = Await.result(futureOfResult, Duration.Inf)
result.length must equalTo(50)
result(0) must equalTo(0)
result(49) must equalTo(49)
}
}
"work with Seq too" in {
mustExecute(6) { foldEC =>
val seq = List(1, 2, 3, 7, 42, 666)
val futureOfResult = Enumerator.enumerate(seq) |>>>
Enumeratee.take(100) &>>
Iteratee.fold(Seq.empty[Int])((r, e: Int) => r :+ e)(foldEC)
val result = Await.result(futureOfResult, Duration.Inf)
result.length must equalTo(6)
result(0) must equalTo(1)
result(4) must equalTo(42)
}
}
}
/*"Enumerator's PatchPanel" should {
"allow to patch in different Enumerators" in {
import play.api.libs.concurrent.Promise
val pp = Promise[Concurrent.PatchPanel[Int]]()
val e = Concurrent.patchPanel[Int](p => pp.redeem(p))
val i1 = Iteratee.fold[Int,Int](0){(s,i) => println(i);s+i}
val sum = e |>> i1
val p = pp.future.await.get
p.patchIn(Enumerator(1,2,3,4))
p.patchIn(Enumerator.eof)
sum.flatMap(_.run).value1.get must equalTo(10)
}
}*/
"Enumerator.apply" should {
"enumerate zero args" in {
mustEnumerateTo()(Enumerator())
}
"enumerate 1 arg" in {
mustEnumerateTo(1)(Enumerator(1))
}
"enumerate more than 1 arg" in {
mustEnumerateTo(1, 2)(Enumerator(1, 2))
mustEnumerateTo(1, 2, 3)(Enumerator(1, 2, 3))
}
}
"Enumerator" should {
"call onDoneEnumerating callback" in {
mustExecute(1) { onDoneEC =>
val count = new java.util.concurrent.atomic.AtomicInteger()
mustEnumerateTo(1, 2, 3)(Enumerator(1, 2, 3).onDoneEnumerating(count.incrementAndGet())(onDoneEC))
count.get() must equalTo(1)
}
}
"call onDoneEnumerating callback when an error is encountered" in {
mustExecute(1) { onDoneEC =>
val count = new java.util.concurrent.atomic.AtomicInteger()
mustPropagateFailure(
Enumerator(1, 2, 3).onDoneEnumerating(count.incrementAndGet())(onDoneEC)
)
count.get() must_== 1
}
}
"transform input elements with map" in {
mustExecute(3) { mapEC =>
mustEnumerateTo(2, 4, 6)(Enumerator(1, 2, 3).map(_ * 2)(mapEC))
}
}
"transform input with map" in {
mustExecute(3) { mapEC =>
mustEnumerateTo(2, 4, 6)(Enumerator(1, 2, 3).mapInput(_.map(_ * 2))(mapEC))
}
}
"be transformed to another Enumerator using flatMap" in {
mustExecute(3, 30) { (flatMapEC, foldEC) =>
val e = Enumerator(10, 20, 30).flatMap(i => Enumerator((i until i + 10): _*))(flatMapEC)
val it = Iteratee.fold[Int, Int](0)((sum, x) => sum + x)(foldEC)
Await.result(e |>>> it, Duration.Inf) must equalTo((10 until 40).sum)
}
}
}
"Enumerator.generateM" should {
"generate a stream of values until the expression is None" in {
mustExecute(12, 11) { (generateEC, foldEC) =>
val a = (0 to 10).toList
val it = a.iterator
val enumerator = Enumerator.generateM(Future(if (it.hasNext) Some(it.next()) else None))(generateEC)
Await.result(enumerator |>>> Iteratee.fold[Int, String]("")(_ + _)(foldEC), Duration.Inf) must equalTo("012345678910")
}
}
"Can be composed with another enumerator (doesn't send EOF)" in {
mustExecute(12, 12) { (generateEC, foldEC) =>
val a = (0 to 10).toList
val it = a.iterator
val enumerator = Enumerator.generateM(Future(if (it.hasNext) Some(it.next()) else None))(generateEC) >>> Enumerator(12)
Await.result(enumerator |>>> Iteratee.fold[Int, String]("")(_ + _)(foldEC), Duration.Inf) must equalTo("01234567891012")
}
}
}
"Enumerator.callback1" should {
"Call onError on iteratee's error state" in {
val it = Error[String]("foo", Input.Empty)
val errorCount = new AtomicInteger(0)
val enum = Enumerator.fromCallback1[String](
b => Future.successful(None),
() => (),
(msg, input) =>
errorCount.incrementAndGet()
)
val result = enum |>>> it
Await.ready(result, Duration(30, TimeUnit.SECONDS))
errorCount.get() must equalTo(1)
}
"Call onError on future failure" in {
val it1 = Iteratee.fold1[String, String](Future.successful(""))((_, _) => Future.failed(new RuntimeException()))
val it2 = Iteratee.fold1[String, String](Future.failed(new RuntimeException()))((_, _) => Future.failed(new RuntimeException()))
val errorCount = new AtomicInteger(0)
val enum = Enumerator.fromCallback1[String](
b => Future.successful(Some("")),
() => (),
(msg, input) =>
errorCount.incrementAndGet()
)
val result1 = enum |>>> it1
val result2 = enum |>>> it2
Await.ready(result1.zip(result2), Duration(2, TimeUnit.SECONDS))
errorCount.get() must equalTo(2)
}
"generate a stream of values until the expression is None" in {
mustExecute(5) { callbackEC =>
val it = (1 to 3).iterator // FIXME: Probably not thread-safe
val completeCount = new AtomicInteger(0)
val completeDone = new CountDownLatch(1)
val errorCount = new AtomicInteger(0)
val enumerator = Enumerator.fromCallback1(
b => Future(if (it.hasNext) Some((b, it.next())) else None),
() => {
completeCount.incrementAndGet()
completeDone.countDown()
},
(_: String, _: Input[(Boolean, Int)]) => errorCount.incrementAndGet())(callbackEC)
mustEnumerateTo((true, 1), (false, 2), (false, 3))(enumerator)
completeDone.await(30, TimeUnit.SECONDS) must beTrue
completeCount.get() must equalTo(1)
errorCount.get() must equalTo(0)
}
}
}
"Enumerator.fromStream" should {
"read bytes from a stream" in {
mustExecute(3) { fromStreamEC =>
val s = "hello"
val enumerator = Enumerator.fromStream(new ByteArrayInputStream(s.getBytes))(fromStreamEC).map(new String(_))
mustEnumerateTo(s)(enumerator)
}
}
"close the stream" in {
class CloseableByteArrayInputStream(bytes: Array[Byte]) extends ByteArrayInputStream(bytes) {
@volatile var closed = false
override def close() = {
closed = true
}
}
"when done normally" in {
val stream = new CloseableByteArrayInputStream(Array.empty)
mustExecute(2) { fromStreamEC =>
Await.result(Enumerator.fromStream(stream)(fromStreamEC)(Iteratee.ignore), Duration.Inf)
stream.closed must beTrue
}
}
"when completed abnormally" in {
val stream = new CloseableByteArrayInputStream("hello".getBytes)
mustExecute(2) { fromStreamEC =>
mustPropagateFailure(Enumerator.fromStream(stream)(fromStreamEC))
stream.closed must beTrue
}
}
}
}
"Enumerator.fromFile" should {
"read bytes from a file" in {
mustExecute(3) { fromFileEC =>
val f = File.createTempFile("EnumeratorSpec", "fromFile")
try {
val s = "hello"
val out = new FileOutputStream(f)
out.write(s.getBytes)
out.close()
val enumerator = Enumerator.fromFile(f)(fromFileEC).map(new String(_))
mustEnumerateTo(s)(enumerator)
} finally {
f.delete()
}
}
}
}
"Enumerator.unfoldM" should {
"Can be composed with another enumerator (doesn't send EOF)" in {
mustExecute(12, 12) { (foldEC, unfoldEC) =>
val enumerator = Enumerator.unfoldM[Int, Int](0)(s => Future(if (s > 10) None else Some((s + 1, s + 1))))(unfoldEC) >>> Enumerator(12)
Await.result(enumerator |>>> Iteratee.fold[Int, String]("")(_ + _)(foldEC), Duration.Inf) must equalTo("123456789101112")
}
}
}
"Enumerator.unfold" should {
"unfolds a value into input for an enumerator" in {
mustExecute(5) { unfoldEC =>
val enumerator = Enumerator.unfold[Int, Int](0)(s => if (s > 3) None else Some((s + 1, s)))(unfoldEC)
mustEnumerateTo(0, 1, 2, 3)(enumerator)
}
}
}
"Enumerator.repeat" should {
"supply input from a by-name arg" in {
mustExecute(3) { repeatEC =>
val count = new AtomicInteger(0)
val fut = Enumerator.repeat(count.incrementAndGet())(repeatEC) |>>> (Enumeratee.take(3) &>> Iteratee.getChunks[Int])
Await.result(fut, Duration.Inf) must equalTo(List(1, 2, 3))
}
}
}
"Enumerator.repeatM" should {
"supply input from a by-name arg" in {
mustExecute(3) { repeatEC =>
val count = new AtomicInteger(0)
val fut = Enumerator.repeatM(Future.successful(count.incrementAndGet()))(repeatEC) |>>> (Enumeratee.take(3) &>> Iteratee.getChunks[Int])
Await.result(fut, Duration.Inf) must equalTo(List(1, 2, 3))
}
}
}
"Enumerator.outputStream" should {
"produce the same value written in the OutputStream" in {
mustExecute(1, 2) { (outputEC, foldEC) =>
val a = "FOO"
val b = "bar"
val enumerator = Enumerator.outputStream { outputStream =>
outputStream.write(a.toArray.map(_.toByte))
outputStream.write(b.toArray.map(_.toByte))
outputStream.close()
}(outputEC)
val promise = (enumerator |>>> Iteratee.fold[Array[Byte], Array[Byte]](Array[Byte]())(_ ++ _)(foldEC))
Await.result(promise, Duration.Inf).map(_.toChar).foldLeft("")(_ + _) must equalTo(a + b)
}
}
"not block" in {
mustExecute(1) { outputEC =>
var os: OutputStream = null
val osReady = new CountDownLatch(1)
val enumerator = Enumerator.outputStream { o => os = o; osReady.countDown() }(outputEC)
val promiseIteratee = Promise[Iteratee[Array[Byte], Array[Byte]]]
val future = enumerator |>>> Iteratee.flatten(promiseIteratee.future)
osReady.await(30, TimeUnit.SECONDS) must beTrue
// os should now be set
os.write("hello".getBytes)
os.write(" ".getBytes)
os.write("world".getBytes)
os.close()
promiseIteratee.success(Iteratee.consume[Array[Byte]]())
Await.result(future, Duration("10s")) must_== "hello world".getBytes
}
}
}
}
|
jyotikamboj/container
|
pf-framework/src/iteratees/src/test/scala/play/api/libs/iteratee/EnumeratorsSpec.scala
|
Scala
|
mit
| 13,972 |
// #Sireum
/*
MIT License
Copyright (c) 2020 brainhub
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package org.sireum.crypto
import org.sireum._
import org.sireum.U8._
import org.sireum.U64._
// Adapted from https://github.com/brainhub/SHA3IUF
object SHA3 {
val spongeWords: Z = 25
val rounds: Z = 24
// @formatter:off
val rndc: ISZ[U64] = ISZ(
u64"0x0000000000000001", u64"0x0000000000008082",
u64"0x800000000000808a", u64"0x8000000080008000",
u64"0x000000000000808b", u64"0x0000000080000001",
u64"0x8000000080008081", u64"0x8000000000008009",
u64"0x000000000000008a", u64"0x0000000000000088",
u64"0x0000000080008009", u64"0x000000008000000a",
u64"0x000000008000808b", u64"0x800000000000008b",
u64"0x8000000000008089", u64"0x8000000000008003",
u64"0x8000000000008002", u64"0x8000000000000080",
u64"0x000000000000800a", u64"0x800000008000000a",
u64"0x8000000080008081", u64"0x8000000000008080",
u64"0x0000000080000001", u64"0x8000000080008008"
)
val rotc: ISZ[U64] = ISZ(
u64"1", u64"3", u64"6", u64"10", u64"15", u64"21", u64"28", u64"36",
u64"45", u64"55", u64"2", u64"14", u64"27", u64"41", u64"56", u64"8",
u64"25", u64"43", u64"62", u64"18", u64"39", u64"61", u64"20", u64"44"
)
val piln: ISZ[Z] = ISZ(
10, 7, 11, 17, 18, 3, 5, 16,
8, 21, 24, 4, 15, 23, 19, 13,
12, 2, 20, 14, 22, 9, 6, 1
)
// @formatter:on
@pure def rotl(x: U64, y: U64): U64 = {
return (x << y) | (x >> (u64"64" - y))
}
def keccakf(s: MSZ[U64]): Unit = {
var t = u64"0"
val bc = MSZ.create(5, u64"0")
for (round <- z"0" until rounds) {
/* Theta */
for (i <- 0 until 5) {
bc(i) = s(i) |^ s(i + 5) |^ s(i + 10) |^ s(i + 15) |^ s(i + 20)
}
for (i <- 0 until 5) {
t = bc((i + 4) % 5) |^ rotl(bc((i + 1) % 5), u64"1")
for (j <- 0 until 25 by 5) {
s(j + i) = s(j + i) |^ t
}
}
/* Rho Pi */
t = s(1)
for (i <- 0 until 24) {
val j = piln(i)
bc(0) = s(j)
s(j) = rotl(t, rotc(i))
t = bc(0)
}
/* Chi */
for (j <- 0 until 25 by 5) {
for (i <- 0 until 5) {
bc(i) = s(j + i)
}
for (i <- 0 until 5) {
s(j + i) = s(j + i) |^ ((~bc((i + 1) % 5)) & bc((i + 2) % 5))
}
}
/* Iota */
s(0) = s(0) |^ rndc(round)
}
}
@pure def init256: SHA3 = {
return SHA3(8)
}
@pure def init384: SHA3 = {
return SHA3(12)
}
@pure def init512: SHA3 = {
return SHA3(16)
}
@pure def sum256(data: ISZ[U8]): ISZ[U8] = {
val sha3 = init256
sha3.update(data)
val r = sha3.finalise()
return r
}
@pure def sum384(data: ISZ[U8]): ISZ[U8] = {
val sha3 = init384
sha3.update(data)
val r = sha3.finalise()
return r
}
@pure def sum512(data: ISZ[U8]): ISZ[U8] = {
val sha3 = init512
sha3.update(data)
val r = sha3.finalise()
return r
}
}
import SHA3._
@record class SHA3(val capacityWords: Z) {
var saved: U64 = u64"0"
var byteIndex: U64 = u64"0"
var wordIndex: Z = 0
var s: MSZ[U64] = MS.create[Z, U64](25, u64"0")
def update(buf: ISZ[U8]): Unit = {
assert(byteIndex < u64"8")
assert(wordIndex < 25)
var oldTail = (8 - conversions.U64.toZ(byteIndex)) % 8
var len = buf.size
var index = 0
if (len < oldTail) {
while (len > 0) {
saved = saved | (conversions.U8.toU64(buf(index)) << (byteIndex * u64"8"))
byteIndex = byteIndex + u64"1"
index = index + 1
len = len - 1
}
assert(byteIndex < u64"8")
return
}
if (oldTail > 0) {
len = len - oldTail
while (oldTail > 0) {
saved = saved | (conversions.U8.toU64(buf(index)) << (byteIndex * u64"8"))
byteIndex = byteIndex + u64"1"
index = index + 1
oldTail = oldTail - 1
}
s(wordIndex) = s(wordIndex) |^ saved
assert(byteIndex == u64"8")
byteIndex = u64"0"
saved = u64"0"
wordIndex = wordIndex + 1
if (wordIndex == spongeWords - capacityWords) {
keccakf(s)
wordIndex = 0
}
}
assert(byteIndex == u64"0")
val words = len / 8
for (_ <- z"0" until words) {
val t = conversions.U8.toU64(buf(index)) |
(conversions.U8.toU64(buf(index + 1)) << u64"8") |
(conversions.U8.toU64(buf(index + 2)) << u64"16") |
(conversions.U8.toU64(buf(index + 3)) << u64"24") |
(conversions.U8.toU64(buf(index + 4)) << u64"32") |
(conversions.U8.toU64(buf(index + 5)) << u64"40") |
(conversions.U8.toU64(buf(index + 6)) << u64"48") |
(conversions.U8.toU64(buf(index + 7)) << u64"56")
s(wordIndex) = s(wordIndex) |^ t
wordIndex = wordIndex + 1
if (wordIndex == spongeWords - capacityWords) {
keccakf(s)
wordIndex = 0
}
index = index + 8
}
var tail = len - words * 8
assert(byteIndex == u64"0" && tail < 8)
while (tail > 0) {
saved = saved | (conversions.U8.toU64(buf(index)) << (byteIndex * u64"8"))
byteIndex = byteIndex + u64"1"
index = index + 1
tail = tail - 1
}
assert(byteIndex < u64"8")
}
def finalise(): ISZ[U8] = {
s(wordIndex) = s(wordIndex) |^ (saved |^ ((u64"0x02" | (u64"1" << u64"2")) << (byteIndex * u64"8")))
s(spongeWords - capacityWords - 1) = s(spongeWords - capacityWords - 1) |^ u64"0x8000000000000000"
keccakf(s)
val sb = MSZ.create(capacityWords * 4, u8"0")
for (i <- z"0" until (capacityWords / 2)) {
val t = s(i)
sb(i * 8) = conversions.U64.toU8(t & u64"0xFF")
sb(i * 8 + 1) = conversions.U64.toU8((t >> u64"8") & u64"0xFF")
sb(i * 8 + 2) = conversions.U64.toU8((t >> u64"16") & u64"0xFF")
sb(i * 8 + 3) = conversions.U64.toU8((t >> u64"24") & u64"0xFF")
sb(i * 8 + 4) = conversions.U64.toU8((t >> u64"32") & u64"0xFF")
sb(i * 8 + 5) = conversions.U64.toU8((t >> u64"40") & u64"0xFF")
sb(i * 8 + 6) = conversions.U64.toU8((t >> u64"48") & u64"0xFF")
sb(i * 8 + 7) = conversions.U64.toU8((t >> u64"56") & u64"0xFF")
}
return sb.toIS
}
}
|
sireum/v3-logika-runtime
|
library/shared/src/main/scala/org/sireum/crypto/SHA3.scala
|
Scala
|
bsd-2-clause
| 7,217 |
package pl.newicom.dddd.test.dummy
import akka.actor.{ActorPath, Props}
import pl.newicom.dddd.actor.PassivationConfig
import pl.newicom.dddd.aggregate._
import pl.newicom.dddd.messaging.correlation.EntityIdResolution
import pl.newicom.dddd.messaging.event.EventMessage
import pl.newicom.dddd.process.{Saga, SagaActorFactory, SagaConfig}
import pl.newicom.dddd.test.dummy.DummyAggregateRoot.ValueChanged
import pl.newicom.dddd.test.dummy.DummySaga.DummyCommand
object DummySaga {
implicit def defaultSagaIdResolution[A]: EntityIdResolution[A] = new EntityIdResolution[A]
implicit object DummySagaActorFactory extends SagaActorFactory[DummySaga] {
override def props(pc: PassivationConfig): Props = {
Props(new DummySaga(pc, None))
}
}
class DummySagaConfig(bpsName: String) extends SagaConfig[DummySaga](bpsName) {
def correlationIdResolver = {
case ValueChanged(pId, _, _) => pId
case _ => throw new scala.RuntimeException("unknown event")
}
}
case class DummyCommand(processId: EntityId, value: Int) extends Command {
override def aggregateId: String = processId
}
}
/**
* <code>DummySaga</code> keeps a <code>counter</code> that is bumped whenever
* <code>DummyEvent</code> is received containing <code>value</code> equal to <code>counter + 1</code>
*/
class DummySaga(override val pc: PassivationConfig, dummyOffice: Option[ActorPath]) extends Saga {
override def persistenceId: String = s"DummySaga-$id"
var counter: Int = 0
def applyEvent = {
case e =>
val de = e.asInstanceOf[ValueChanged]
counter = de.value.asInstanceOf[Int]
context.system.eventStream.publish(e)
log.debug(s"Applied event message: ${eventMessage}")
if (dummyOffice.isDefined) {
deliverCommand(dummyOffice.get, DummyCommand(de.id, counter))
}
}
// see alternative implementation below
def receiveEvent: Receive = {
case em @ EventMessage(_, ValueChanged(_, value: Int, _)) if counter + 1 == value =>
raise(em)
log.debug(s"Processed event: $em")
}
// alternative implementation
/*
def receiveEvent: Receive = {
case em: EventMessage => em.event match {
case ValueChanged(_, value: Int) if currentValue + 1 == value =>
raise(em)
log.debug(s"Processed event: $em")
case _ => handleUnexpectedEvent(em)
}
}
*/
}
|
ahjohannessen/akka-ddd
|
akka-ddd-test/src/test/scala/pl/newicom/dddd/test/dummy/DummySaga.scala
|
Scala
|
mit
| 2,410 |
package org.cmt
object Common {
val aCommonValue: Int = 123
}
|
lightbend-training/course-management-tools
|
course-templates/scala3-cmt-template-common/common/src/main/scala/org/cmt/Common.scala
|
Scala
|
apache-2.0
| 64 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.akka
import scala.concurrent.duration.Duration
import akka.actor.{ Actor, Terminated }
import com.typesafe.scalalogging.LazyLogging
abstract class BaseActor extends Actor with LazyLogging {
implicit def system = context.system
def scheduler = system.scheduler
implicit def dispatcher = system.dispatcher
// FIXME is ReceiveTimeout set up by default?
override def preStart(): Unit = context.setReceiveTimeout(Duration.Undefined)
override def preRestart(reason: Throwable, message: Option[Any]): Unit =
logger.error(s"Actor $this crashed on message $message", reason)
override def unhandled(message: Any): Unit =
message match {
case Terminated(dead) => super.unhandled(message)
case unknown => throw new IllegalArgumentException(s"Actor $this doesn't support message $unknown")
}
}
|
wiacekm/gatling
|
gatling-core/src/main/scala/io/gatling/core/akka/BaseActor.scala
|
Scala
|
apache-2.0
| 1,477 |
package controllers.backend
import play.api.libs.json.Json
import com.overviewdocs.models.StoreObject
import com.overviewdocs.models.tables.{StoreObjects,DocumentStoreObjects}
class DbStoreObjectBackendSpec extends DbBackendSpecification {
trait BaseScope extends DbBackendScope {
import database.api._
val backend = new DbStoreObjectBackend(database)
def findStoreObject(id: Long) = {
blockingDatabase.option(StoreObjects.filter(_.id === id))
}
def findDocumentStoreObject(documentId: Long, storeObjectId: Long) = {
blockingDatabase.option(
DocumentStoreObjects
.filter(_.documentId === documentId)
.filter(_.storeObjectId === storeObjectId)
)
}
}
"DbStoreObjectBackend" should {
"#index" should {
trait IndexScope extends BaseScope {
val documentSet = factory.documentSet()
val apiToken = factory.apiToken(documentSetId=Some(documentSet.id))
val store = factory.store(apiToken=apiToken.token)
}
"index a store's objects" in new IndexScope {
val so1 = factory.storeObject(storeId=store.id)
val so2 = factory.storeObject(storeId=store.id)
val ret = await(backend.index(store.id))
ret.length must beEqualTo(2)
ret.map(_.id) must containTheSameElementsAs(Vector(so1.id, so2.id))
}
"filter by indexedLong" in new IndexScope {
val so1 = factory.storeObject(storeId=store.id, indexedLong=Some(4L))
val so2 = factory.storeObject(storeId=store.id, indexedLong=Some(6L))
val ret = await(backend.index(store.id, indexedLong=Some(4L)))
ret.length must beEqualTo(1)
ret(0).id must beEqualTo(so1.id)
}
"filter by indexedString" in new IndexScope {
val so1 = factory.storeObject(storeId=store.id, indexedString=Some("foo"))
val so2 = factory.storeObject(storeId=store.id, indexedString=Some("bar"))
val ret = await(backend.index(store.id, indexedString=Some("foo")))
ret.length must beEqualTo(1)
ret(0).id must beEqualTo(so1.id)
}
"filter by both indexedLong and indexedString" in new IndexScope {
val so1 = factory.storeObject(storeId=store.id, indexedLong=Some(4L), indexedString=Some("foo"))
val so2 = factory.storeObject(storeId=store.id, indexedLong=Some(4L), indexedString=Some("bar"))
val so3 = factory.storeObject(storeId=store.id, indexedLong=Some(6L), indexedString=Some("bar"))
val ret = await(backend.index(store.id, indexedLong=Some(4L), indexedString=Some("bar")))
ret.length must beEqualTo(1)
ret(0).id must beEqualTo(so2.id)
}
}
"#show" should {
"show a StoreObject" in new BaseScope {
val documentSet = factory.documentSet()
val apiToken = factory.apiToken(documentSetId=Some(documentSet.id))
val store = factory.store(apiToken=apiToken.token)
val storeObject = factory.storeObject(storeId=store.id, json=Json.obj("foo" -> "bar"))
val ret = await(backend.show(storeObject.id))
ret.map(_.id) must beSome(storeObject.id)
ret.map(_.json) must beSome(storeObject.json)
}
"return None on an invalid StoreObject" in new BaseScope {
val ret = await(backend.show(123L))
ret must beNone
}
}
"#create" should {
trait CreateScope extends BaseScope {
val documentSet = factory.documentSet()
val apiToken = factory.apiToken(documentSetId=Some(documentSet.id))
val store = factory.store(apiToken=apiToken.token)
val attributes = StoreObject.CreateAttributes(
indexedLong=Some(4L),
indexedString=Some("foo"),
json=Json.obj("foo" -> "bar")
)
def createStoreObject = await(backend.create(store.id, attributes))
lazy val storeObject = createStoreObject
}
"return a StoreObject" in new CreateScope {
storeObject.storeId must beEqualTo(store.id)
storeObject.indexedLong must beSome(4L)
storeObject.indexedString must beSome("foo")
storeObject.json must beEqualTo(Json.obj("foo" -> "bar"))
}
"write the StoreObject to the database" in new CreateScope {
val dbStoreObject = findStoreObject(storeObject.id)
dbStoreObject.map(_.storeId) must beSome(store.id)
dbStoreObject.flatMap(_.indexedLong) must beSome(4L)
dbStoreObject.flatMap(_.indexedString) must beSome("foo")
dbStoreObject.map(_.json) must beSome(Json.obj("foo" -> "bar"))
}
"pick non-conflicting StoreObject IDs" in new CreateScope {
val ret1 = createStoreObject
val ret2 = createStoreObject
ret1.id must not(beEqualTo(ret2.id))
}
}
"#createMany" should {
trait CreateManyScope extends BaseScope {
val documentSet = factory.documentSet()
val apiToken = factory.apiToken(documentSetId=Some(documentSet.id))
val store = factory.store(apiToken=apiToken.token)
val attrs1 = StoreObject.CreateAttributes(
indexedLong=Some(1L),
indexedString=Some("foo"),
json=Json.obj("foo" -> "bar")
)
val attrs2 = StoreObject.CreateAttributes(
indexedLong=Some(2L),
indexedString=Some("bar"),
json=Json.obj("bar" -> "baz")
)
val attributesSeq = Vector(attrs1, attrs2)
def createMany = await(backend.createMany(store.id, attributesSeq))
lazy val storeObjects = createMany
}
"return StoreObjects" in new CreateManyScope {
storeObjects.map(_.storeId) must beEqualTo(Vector(store.id, store.id))
storeObjects.map(_.indexedLong) must beEqualTo(Vector(Some(1L), Some(2L)))
storeObjects.map(_.indexedString) must beEqualTo(Vector(Some("foo"), Some("bar")))
storeObjects.map(_.json) must beEqualTo(Vector(Json.obj("foo" -> "bar"), Json.obj("bar" -> "baz")))
}
"write the StoreObjects to the database" in new CreateManyScope {
val dbStoreObject1 = findStoreObject(storeObjects(0).id)
val dbStoreObject2 = findStoreObject(storeObjects(1).id)
dbStoreObject1.map(_.storeId) must beSome(store.id)
dbStoreObject1.flatMap(_.indexedLong) must beSome(1L)
dbStoreObject2.flatMap(_.indexedString) must beSome("bar")
}
"pick non-conflicting IDs" in new CreateManyScope {
await(backend.create(store.id, attrs2))
createMany must not(throwA[Exception])
await(backend.index(store.id)).length must beEqualTo(3)
}
"work with an empty list" in new CreateManyScope {
override val attributesSeq = Vector()
storeObjects must beEqualTo(Vector())
}
}
"#update" should {
trait UpdateScope extends BaseScope {
val documentSet = factory.documentSet()
val apiToken = factory.apiToken(documentSetId=Some(documentSet.id))
val store = factory.store(apiToken=apiToken.token)
val storeObject = factory.storeObject(storeId=store.id)
val attributes = StoreObject.UpdateAttributes(
indexedLong=Some(1L),
indexedString=Some("foo"),
json=Json.obj("new foo" -> "new bar")
)
lazy val newStoreObject = updateStoreObject
val storeObjectId = storeObject.id
def updateStoreObject = await(backend.update(storeObjectId, attributes))
}
"return a StoreObject" in new UpdateScope {
newStoreObject.map(_.id) must beSome(storeObject.id)
newStoreObject.map(_.storeId) must beSome(storeObject.storeId)
}
"update the StoreObject" in new UpdateScope {
updateStoreObject
val dbStoreObject = findStoreObject(storeObject.id)
dbStoreObject.map(_.id) must beSome(storeObject.id)
dbStoreObject.map(_.storeId) must beSome(storeObject.storeId)
dbStoreObject.map(_.indexedLong) must beSome(attributes.indexedLong)
dbStoreObject.map(_.indexedString) must beSome(attributes.indexedString)
dbStoreObject.map(_.json) must beSome(attributes.json)
}
"return None when updating a non-StoreObject" in new UpdateScope {
override val storeObjectId = storeObject.id + 1L
newStoreObject must beNone
}
"not update other StoreObjects" in new UpdateScope {
val storeObject2 = factory.storeObject(storeId=store.id, json=Json.obj("bar" -> "baz"))
updateStoreObject
val dbStoreObject2 = findStoreObject(storeObject2.id)
dbStoreObject2.map(_.id) must beSome(storeObject2.id)
dbStoreObject2.map(_.json) must beSome(Json.obj("bar" -> "baz"))
}
}
"#destroy" should {
trait DestroyScope extends BaseScope {
val documentSet = factory.documentSet()
val apiToken = factory.apiToken(documentSetId=Some(documentSet.id))
val store = factory.store(apiToken=apiToken.token)
val storeObject = factory.storeObject(storeId=store.id)
def destroy(id: Long) = await(backend.destroy(id))
}
"delete a StoreObject from the database" in new DestroyScope {
destroy(storeObject.id)
findStoreObject(storeObject.id) must beNone
}
"succeed when the StoreObject does not exist" in new DestroyScope {
destroy(storeObject.id + 1L)
}
"not destroy other StoreObjects" in new DestroyScope {
val storeObject2 = factory.storeObject(storeId=store.id)
destroy(storeObject.id)
findStoreObject(storeObject2.id) must beSome
}
"destroy associated DocumentStoreObjects" in new DestroyScope {
val document = factory.document(documentSetId=documentSet.id)
val dvo = factory.documentStoreObject(documentId=document.id, storeObjectId=storeObject.id)
destroy(storeObject.id)
findStoreObject(storeObject.id) must beNone
findDocumentStoreObject(document.id, storeObject.id) must beNone
}
}
"#destroyMany" should {
trait DestroyManyScope extends BaseScope {
val documentSet = factory.documentSet()
val apiToken = factory.apiToken(documentSetId=Some(documentSet.id))
val store = factory.store(apiToken=apiToken.token)
val obj1 = factory.storeObject(storeId=store.id)
val obj2 = factory.storeObject(storeId=store.id)
def destroyMany(ids: Long*) = await(backend.destroyMany(store.id, ids.toIndexedSeq))
}
"delete StoreObjects from the database" in new DestroyManyScope {
destroyMany(obj1.id, obj2.id) must beEqualTo(())
findStoreObject(obj1.id) must beNone
findStoreObject(obj2.id) must beNone
}
"ignore missing StoreObjects" in new DestroyManyScope {
destroyMany(obj1.id, obj2.id + 1) must beEqualTo(())
findStoreObject(obj1.id) must beNone
findStoreObject(obj2.id) must beSome
}
"ignore StoreObjects from other Views" in new DestroyManyScope {
val apiToken2 = factory.apiToken(documentSetId=Some(documentSet.id), token="token2")
val store2 = factory.store(apiToken=apiToken2.token)
val obj3 = factory.storeObject(storeId=store2.id)
destroyMany(obj1.id, obj3.id) must beEqualTo(())
findStoreObject(obj1.id) must beNone
findStoreObject(obj3.id) must beSome
}
"destroy associated DocumentStoreObjects" in new DestroyManyScope {
val document = factory.document(documentSetId=documentSet.id)
val dvo = factory.documentStoreObject(documentId=document.id, storeObjectId=obj1.id)
destroyMany(obj1.id) must beEqualTo(())
findStoreObject(obj1.id) must beNone
findDocumentStoreObject(document.id, obj1.id) must beNone
}
}
}
}
|
overview/overview-server
|
web/test/controllers/backend/DbStoreObjectBackendSpec.scala
|
Scala
|
agpl-3.0
| 11,794 |
package monocle.generic
import language.higherKinds
import monocle.PTraversal
import monocle.function.Each
import monocle.{ Iso, Traversal }
import monocle.generic.internal.TupleGeneric
import cats.Applicative
import cats.syntax.cartesian._
import shapeless.{ ::, Generic, HList, HNil }
object product extends ProductOptics
trait ProductOptics {
def productToTuple[S <: Product](implicit ev: TupleGeneric[S]): Iso[S, ev.Repr] =
Iso[S, ev.Repr](s => ev.to(s))(t => ev.from(t))
implicit def hNilEach[A] = new Each[HNil, A] {
def each: Traversal[HNil, A] = Traversal.void[HNil, A]
}
implicit def hConsEach[A, Rest <: HList](implicit restEach: Each[Rest, A]) = new Each[A :: Rest, A] {
def each: Traversal[A :: Rest, A] = new PTraversal[A :: Rest, A :: Rest, A, A] {
def modifyF[F[_]: Applicative](f: A => F[A])(s: A :: Rest): F[A :: Rest] =
(f(s.head) |@| restEach.each.modifyF(f)(s.tail)).map(_ :: _)
}
}
implicit def productEach[S, SGen <: HList, A](implicit gen: Generic.Aux[S, SGen], genEach: Each[SGen, A]): Each[S, A] = new Each[S, A] {
def each: Traversal[S, A] = new Traversal[S, A] {
def modifyF[F[_]: Applicative](f: A => F[A])(s: S): F[S] =
Applicative[F].map(genEach.each.modifyF(f)(gen.to(s)))(gen.from)
}
}
}
|
fkz/Monocle
|
generic/shared/src/main/scala/monocle/generic/Product.scala
|
Scala
|
mit
| 1,292 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "Taurus"
val appVersion = "2.0-SNAPSHOT"
val appDependencies = Seq(
"postgresql" % "postgresql" % "9.1-901-1.jdbc4",
"com.typesafe.play.extras" % "iteratees-extras_2.10" % "1.0.1",
jdbc,
anorm
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
)
}
|
radonsky/Taurus
|
project/Build.scala
|
Scala
|
apache-2.0
| 473 |
package org.gbougeard.model.changes
/**
* Created with IntelliJ IDEA.
* User: gbougeard
* Date: 13/07/13
* Time: 21:23
* To change this template use File | Settings | File Templates.
*/
case class SubmitInput(wait_for_merge: Option[Boolean] = Some(false))
object SubmitInput {
import play.api.libs.json.Json
import play.api.libs.functional.syntax._
implicit val format = Json.format[SubmitInput]
}
|
gbougeard/gas
|
src/main/scala/org/gbougeard/model/changes/SubmitInput.scala
|
Scala
|
apache-2.0
| 414 |
package controllers
import java.util.concurrent.TimeoutException
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import javax.inject.Inject
import models.Category
import play.api.Logger
import play.api.i18n.I18nSupport
import play.api.i18n.MessagesApi
import play.api.mvc.Action
import play.api.mvc.Controller
import reactivemongo.bson.BSONObjectID
import models.services.CategoryService
import play.api.cache.Cached
class CategoryControl @Inject() (catService: CategoryService, val messagesApi: MessagesApi, cached: Cached) extends Controller with I18nSupport {
implicit val timeout = 10.seconds
def category = cached("list_category").default(timeout){ Action.async { implicit request =>
val cats = catService.findSimplesCategories(new String)
cats.map {
cat => Ok(views.html.category.list_category(cat))
}.recover {
case t: TimeoutException =>
Logger.error("Problem adding in Category list process")
InternalServerError(t.getMessage)
}
}
}
def categoryManager = Authenticated.async { implicit request =>
val cats = catService.findSimplesCategories(new String)
cats.map {
cat => Ok(views.html.manager.category.list_category(cat))
}.recover {
case t: TimeoutException =>
Logger.error("Problem adding in Category list process")
InternalServerError(t.getMessage)
}
}
def add = Authenticated.async { implicit request =>
Category.formGall.bindFromRequest.fold(
error => Future.successful(Ok(views.html.manager.category.create_category(error))),
data => {
catService.findOneCategory(data._id.getOrElse("")).map {
case Some(cat) => {
catService.updateCategory(data)
Redirect(routes.CategoryControl.categoryManager()).flashing("success" -> messagesApi("success.update"))
}
case None => {
val cat = data.copy(_id = Some(BSONObjectID.generate.stringify))
catService.addCategory(cat)
Redirect(routes.CategoryControl.categoryManager()).flashing("success" -> messagesApi("success.add"))
}
}
}).recover {
case t: TimeoutException =>
Logger.error("Problem adding in Category list process")
InternalServerError(t.getMessage)
}
}
def edit(id: String) = Authenticated.async { implicit request =>
catService.findOneCategory(id).map {
case Some(cat) => Ok(views.html.manager.category.create_category(Category.formGall.fill(cat)))
case None => Redirect(routes.CategoryControl.categoryManager())
}.recover {
case t: TimeoutException =>
Logger.error("Problem adding in Category list process")
InternalServerError(t.getMessage)
}
}
def remove(id: String) = Authenticated.async { implicit request =>
catService.removeCategory(id).map {
case Some(ok) => Redirect(routes.CategoryControl.categoryManager()).flashing("success" -> messagesApi("success.remove"))
case None => Redirect(routes.CategoryControl.categoryManager()).flashing("fail" -> messagesApi("fail.remove.category"))
}.recover {
case t: TimeoutException =>
Logger.error("Problem adding in Category list process")
InternalServerError(t.getMessage)
}
}
}
|
carlosFattor/DoceTentacaoScala
|
app/controllers/CategoryControl.scala
|
Scala
|
apache-2.0
| 3,375 |
package controllers.services
import javax.inject.Inject
import _root_.util.ClusterAgentRepoProvider
import akka.util.Timeout
import api.FactorialService.Compute
import play.api.mvc._
import akka.pattern.ask
import play.api.libs.json._
import api.FactorialService
import scala.concurrent.duration._
import play.api.libs.concurrent.Execution.Implicits._
class Factorial @Inject() (repoProvider: ClusterAgentRepoProvider) extends Controller {
val repo = repoProvider.get()
implicit val to = Timeout(3.seconds)
implicit val binIntFormat = new Format[BigInt] {
def reads(json: JsValue): JsResult[BigInt] = json.validate[JsNumber].map(_.value.toBigInt())
def writes(o: BigInt): JsValue = JsNumber(o.toLong)
}
implicit val format = Json.format[FactorialService.Result]
// normal way
def calcNormal(number: Int) = Action.async { _ ⇒
repo.factorial ? Compute(number) map {
case r: FactorialService.Result ⇒ Ok(Json.toJson(r))
case _ ⇒ InternalServerError
}
}
// dsl way
import asobu.dsl.Syntax._
import asobu.dsl.akka.Builders._
import asobu.dsl.DefaultImplicits._
val calcDsl = handle(
process[Compute].using(repo.factorial) >>
expect[FactorialService.Result].respondJson(Ok)
)
}
|
kailuowang/asobu
|
example/frontend/app/controllers/services/Factorial.scala
|
Scala
|
apache-2.0
| 1,244 |
package sbt
package appmacro
import scala.reflect._
import macros._
import scala.tools.nsc.Global
import ContextUtil.{ DynamicDependencyError, DynamicReferenceError }
object ContextUtil {
final val DynamicDependencyError = "Illegal dynamic dependency"
final val DynamicReferenceError = "Illegal dynamic reference"
/**
* Constructs an object with utility methods for operating in the provided macro context `c`.
* Callers should explicitly specify the type parameter as `c.type` in order to preserve the path dependent types.
*/
def apply[C <: Context with Singleton](c: C): ContextUtil[C] = new ContextUtil(c)
/**
* Helper for implementing a no-argument macro that is introduced via an implicit.
* This method removes the implicit conversion and evaluates the function `f` on the target of the conversion.
*
* Given `myImplicitConversion(someValue).extensionMethod`, where `extensionMethod` is a macro that uses this
* method, the result of this method is `f(<Tree of someValue>)`.
*/
def selectMacroImpl[T: c.WeakTypeTag](c: Context)(f: (c.Expr[Any], c.Position) => c.Expr[T]): c.Expr[T] =
{
import c.universe._
c.macroApplication match {
case s @ Select(Apply(_, t :: Nil), tp) => f(c.Expr[Any](t), s.pos)
case x => unexpectedTree(x)
}
}
def unexpectedTree[C <: Context](tree: C#Tree): Nothing = sys.error("Unexpected macro application tree (" + tree.getClass + "): " + tree)
}
// TODO 2.11 Remove this after dropping 2.10.x support.
private object HasCompat { val compat = ??? }; import HasCompat._
/**
* Utility methods for macros. Several methods assume that the context's universe is a full compiler (`scala.tools.nsc.Global`).
* This is not thread safe due to the underlying Context and related data structures not being thread safe.
* Use `ContextUtil[c.type](c)` to construct.
*/
final class ContextUtil[C <: Context](val ctx: C) {
import ctx.universe.{ Apply => ApplyTree, _ }
import compat._
val powerContext = ctx.asInstanceOf[reflect.macros.runtime.Context]
val global: powerContext.universe.type = powerContext.universe
def callsiteTyper: global.analyzer.Typer = powerContext.callsiteTyper
val initialOwner: Symbol = callsiteTyper.context.owner.asInstanceOf[ctx.universe.Symbol]
lazy val alistType = ctx.typeOf[AList[KList]]
lazy val alist: Symbol = alistType.typeSymbol.companionSymbol
lazy val alistTC: Type = alistType.typeConstructor
/** Modifiers for a local val.*/
lazy val localModifiers = Modifiers(NoFlags)
def getPos(sym: Symbol) = if (sym eq null) NoPosition else sym.pos
/**
* Constructs a unique term name with the given prefix within this Context.
* (The current implementation uses Context.fresh, which increments
*/
def freshTermName(prefix: String) = newTermName(ctx.fresh("$" + prefix))
/**
* Constructs a new, synthetic, local ValDef Type `tpe`, a unique name,
* Position `pos`, an empty implementation (no rhs), and owned by `owner`.
*/
def freshValDef(tpe: Type, pos: Position, owner: Symbol): ValDef =
{
val SYNTHETIC = (1 << 21).toLong.asInstanceOf[FlagSet]
val sym = owner.newTermSymbol(freshTermName("q"), pos, SYNTHETIC)
setInfo(sym, tpe)
val vd = ValDef(sym, EmptyTree)
vd.setPos(pos)
vd
}
lazy val parameterModifiers = Modifiers(Flag.PARAM)
/**
* Collects all definitions in the tree for use in checkReferences.
* This excludes definitions in wrapped expressions because checkReferences won't allow nested dereferencing anyway.
*/
def collectDefs(tree: Tree, isWrapper: (String, Type, Tree) => Boolean): collection.Set[Symbol] =
{
val defs = new collection.mutable.HashSet[Symbol]
// adds the symbols for all non-Ident subtrees to `defs`.
val process = new Traverser {
override def traverse(t: Tree) = t match {
case _: Ident => ()
case ApplyTree(TypeApply(Select(_, nme), tpe :: Nil), qual :: Nil) if isWrapper(nme.decoded, tpe.tpe, qual) => ()
case tree =>
if (tree.symbol ne null) defs += tree.symbol;
super.traverse(tree)
}
}
process.traverse(tree)
defs
}
/**
* A reference is illegal if it is to an M instance defined within the scope of the macro call.
* As an approximation, disallow referenced to any local definitions `defs`.
*/
def illegalReference(defs: collection.Set[Symbol], sym: Symbol): Boolean =
sym != null && sym != NoSymbol && defs.contains(sym)
/**
* A function that checks the provided tree for illegal references to M instances defined in the
* expression passed to the macro and for illegal dereferencing of M instances.
*/
def checkReferences(defs: collection.Set[Symbol], isWrapper: (String, Type, Tree) => Boolean): Tree => Unit = {
case s @ ApplyTree(TypeApply(Select(_, nme), tpe :: Nil), qual :: Nil) =>
if (isWrapper(nme.decoded, tpe.tpe, qual)) ctx.error(s.pos, DynamicDependencyError)
case id @ Ident(name) if illegalReference(defs, id.symbol) => ctx.error(id.pos, DynamicReferenceError + ": " + name)
case _ => ()
}
/** Constructs a ValDef with a parameter modifier, a unique name, with the provided Type and with an empty rhs. */
def freshMethodParameter(tpe: Type): ValDef =
ValDef(parameterModifiers, freshTermName("p"), TypeTree(tpe), EmptyTree)
/** Constructs a ValDef with local modifiers and a unique name. */
def localValDef(tpt: Tree, rhs: Tree): ValDef =
ValDef(localModifiers, freshTermName("q"), tpt, rhs)
/** Constructs a tuple value of the right TupleN type from the provided inputs.*/
def mkTuple(args: List[Tree]): Tree =
global.gen.mkTuple(args.asInstanceOf[List[global.Tree]]).asInstanceOf[ctx.universe.Tree]
def setSymbol[Tree](t: Tree, sym: Symbol): Unit =
t.asInstanceOf[global.Tree].setSymbol(sym.asInstanceOf[global.Symbol])
def setInfo[Tree](sym: Symbol, tpe: Type): Unit =
sym.asInstanceOf[global.Symbol].setInfo(tpe.asInstanceOf[global.Type])
/** Creates a new, synthetic type variable with the specified `owner`. */
def newTypeVariable(owner: Symbol, prefix: String = "T0"): TypeSymbol =
owner.asInstanceOf[global.Symbol].newSyntheticTypeParam(prefix, 0L).asInstanceOf[ctx.universe.TypeSymbol]
/** The type representing the type constructor `[X] X` */
lazy val idTC: Type =
{
val tvar = newTypeVariable(NoSymbol)
polyType(tvar :: Nil, refVar(tvar))
}
/** A Type that references the given type variable. */
def refVar(variable: TypeSymbol): Type = variable.toTypeConstructor
/** Constructs a new, synthetic type variable that is a type constructor. For example, in type Y[L[x]], L is such a type variable. */
def newTCVariable(owner: Symbol): TypeSymbol =
{
val tc = newTypeVariable(owner)
val arg = newTypeVariable(tc, "x")
tc.setTypeSignature(PolyType(arg :: Nil, emptyTypeBounds))
tc
}
/** >: Nothing <: Any */
def emptyTypeBounds: TypeBounds = TypeBounds(definitions.NothingClass.toType, definitions.AnyClass.toType)
/** Creates a new anonymous function symbol with Position `pos`. */
def functionSymbol(pos: Position): Symbol =
callsiteTyper.context.owner.newAnonymousFunctionValue(pos.asInstanceOf[global.Position]).asInstanceOf[ctx.universe.Symbol]
def functionType(args: List[Type], result: Type): Type =
{
val tpe = global.definitions.functionType(args.asInstanceOf[List[global.Type]], result.asInstanceOf[global.Type])
tpe.asInstanceOf[Type]
}
/** Create a Tree that references the `val` represented by `vd`, copying attributes from `replaced`. */
def refVal(replaced: Tree, vd: ValDef): Tree =
treeCopy.Ident(replaced, vd.name).setSymbol(vd.symbol)
/** Creates a Function tree using `functionSym` as the Symbol and changing `initialOwner` to `functionSym` in `body`.*/
def createFunction(params: List[ValDef], body: Tree, functionSym: Symbol): Tree =
{
changeOwner(body, initialOwner, functionSym)
val f = Function(params, body)
setSymbol(f, functionSym)
f
}
def changeOwner(tree: Tree, prev: Symbol, next: Symbol): Unit =
new ChangeOwnerAndModuleClassTraverser(prev.asInstanceOf[global.Symbol], next.asInstanceOf[global.Symbol]).traverse(tree.asInstanceOf[global.Tree])
// Workaround copied from scala/async:can be removed once https://github.com/scala/scala/pull/3179 is merged.
private[this] class ChangeOwnerAndModuleClassTraverser(oldowner: global.Symbol, newowner: global.Symbol) extends global.ChangeOwnerTraverser(oldowner, newowner) {
override def traverse(tree: global.Tree) {
tree match {
case _: global.DefTree => change(tree.symbol.moduleClass)
case _ =>
}
super.traverse(tree)
}
}
/** Returns the Symbol that references the statically accessible singleton `i`. */
def singleton[T <: AnyRef with Singleton](i: T)(implicit it: ctx.TypeTag[i.type]): Symbol =
it.tpe match {
case SingleType(_, sym) if !sym.isFreeTerm && sym.isStatic => sym
case x => sys.error("Instance must be static (was " + x + ").")
}
def select(t: Tree, name: String): Tree = Select(t, newTermName(name))
/** Returns the symbol for the non-private method named `name` for the class/module `obj`. */
def method(obj: Symbol, name: String): Symbol = {
val ts: Type = obj.typeSignature
val m: global.Symbol = ts.asInstanceOf[global.Type].nonPrivateMember(global.newTermName(name))
m.asInstanceOf[Symbol]
}
/**
* Returns a Type representing the type constructor tcp.<name>. For example, given
* `object Demo { type M[x] = List[x] }`, the call `extractTC(Demo, "M")` will return a type representing
* the type constructor `[x] List[x]`.
*/
def extractTC(tcp: AnyRef with Singleton, name: String)(implicit it: ctx.TypeTag[tcp.type]): ctx.Type =
{
val itTpe = it.tpe.asInstanceOf[global.Type]
val m = itTpe.nonPrivateMember(global.newTypeName(name))
val tc = itTpe.memberInfo(m).asInstanceOf[ctx.universe.Type]
assert(tc != NoType && tc.takesTypeArgs, "Invalid type constructor: " + tc)
tc
}
/**
* Substitutes wrappers in tree `t` with the result of `subWrapper`.
* A wrapper is a Tree of the form `f[T](v)` for which isWrapper(<Tree of f>, <Underlying Type>, <qual>.target) returns true.
* Typically, `f` is a `Select` or `Ident`.
* The wrapper is replaced with the result of `subWrapper(<Type of T>, <Tree of v>, <wrapper Tree>)`
*/
def transformWrappers(t: Tree, subWrapper: (String, Type, Tree, Tree) => Converted[ctx.type]): Tree =
{
// the main tree transformer that replaces calls to InputWrapper.wrap(x) with
// plain Idents that reference the actual input value
object appTransformer extends Transformer {
override def transform(tree: Tree): Tree =
tree match {
case ApplyTree(TypeApply(Select(_, nme), targ :: Nil), qual :: Nil) =>
subWrapper(nme.decoded, targ.tpe, qual, tree) match {
case Converted.Success(t, finalTx) =>
changeOwner(qual, currentOwner, initialOwner) // Fixes https://github.com/sbt/sbt/issues/1150
finalTx(t)
case Converted.Failure(p, m) => ctx.abort(p, m)
case _: Converted.NotApplicable[_] => super.transform(tree)
}
case _ => super.transform(tree)
}
}
appTransformer.atOwner(initialOwner) {
appTransformer.transform(t)
}
}
}
|
pdalpra/sbt
|
util/appmacro/src/main/scala/sbt/appmacro/ContextUtil.scala
|
Scala
|
bsd-3-clause
| 11,673 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation.extern
import leon.lang._
object OptionFromLib {
case class Dummy1(val x: Int)
case class Dummy2(val opt: Option[Int])
def foo(x: Int): Option[Int] = {
require(x >= 0)
if (x % 2 == 1) Some(x)
else None[Int]
}
def bar(x: Int): Option[Dummy1] = {
require(x >= 0)
if (x % 2 != 0) Some(Dummy1(x))
else None[Dummy1]
}
def baz(opt: Option[Int]): Dummy2 = {
Dummy2(opt)
}
def funnyTwice(x: Int) = Some(x + x)
def test1(): Int = {
val o1 = foo(1)
val o2 = foo(2)
if (o1.nonEmpty && o2.isEmpty && o1.get == 1) 0
else 1
} ensuring { _ == 0 }
def test2(): Int = {
val o1 = bar(1)
val o2 = bar(2)
if (o1.nonEmpty && o2.isEmpty && o1.get.x == 1) 0
else 1
} ensuring { _ == 0 }
def test3(): Int = {
val o = baz(Some(42))
if (o.opt.isDefined && o.opt.get == 42) 0
else 1
} ensuring { _ == 0 }
// Below we test the inlining of several methods
def testGetOrElse(): Int = {
Some(5) getOrElse { 6 }
} ensuring { _ == 5 }
def testOrElse(x: Int): Int = {
require(x >= 0 && x < 2147483646)
foo(x) orElse { foo(x + 1) } get
} ensuring { res =>
((x % 2 == 1) ==> (res == x)) &&
((x % 2 == 0) ==> (res == x + 1))
}
def testMap(x: Int): Int = {
funnyTwice(x) map { y: Int => y + x } get
} ensuring { _ == x * 3 }
def testFlatMap(x: Int): Int = {
funnyTwice(x) flatMap { y => funnyTwice(y) } get
} ensuring { _ == x * 4 }
def testFilter(x: Int): Int = {
funnyTwice(x) filter { _ % 2 == 0 } get
} ensuring { _ == 2 * x }
def testWithFilter(x: Int): Int = {
funnyTwice(x) withFilter { _ % 2 == 0 } get
} ensuring { _ == 2 * x }
def testForall(x: Int): Int = {
if (funnyTwice(x) forall { _ % 2 == 0 }) 0
else 1
} ensuring { _ == 0 }
def testExists(x: Int): Int = {
if (funnyTwice(x) exists { _ % 2 == 0 }) 0
else 1
} ensuring { _ == 0 }
def _main() = {
test1() +
test2() +
test3() +
(testGetOrElse() - 5) +
(testOrElse(0) - 1) +
(testOrElse(1) - 1) +
(testMap(0)) +
(testMap(2) - 6) +
(testFlatMap(3) - 12) +
(testFilter(0)) +
(testFilter(-1) + 2) +
(testWithFilter(-50) + 100) +
(testForall(42)) +
(testExists(58))
} ensuring { _ == 0 }
@extern
def main(args: Array[String]): Unit = _main()
}
|
epfl-lara/leon
|
src/test/resources/regression/genc/valid/OptionFromLib.scala
|
Scala
|
gpl-3.0
| 2,428 |
package name.ambidextrous.rorganize
// Reads the user input
import java.io.File
class UserInterface {
def getDirectoryName(): String = {
println("Enter a directory name: ")
val dir = readLine()
dir
}
def getFilePattern(): String = {
println("What pattern of files? ")
val pattern = readLine()
pattern
}
def getOption(): Int = {
println("0 moves, 1 renames. ")
val opt: Int = readInt()
opt
}
}
object Main extends App {
val ui = new UserInterface
println(ui.getDirectoryName)
// println(ui.getFilePattern)
// println(ui.getOption)
new FileSystemHandler().getAllFiles("./")
new FileSystemHandler().getFilesWithExtension("./", "scala")
}
|
ambidextrousTx/ROrganize
|
UserInterface.scala
|
Scala
|
bsd-2-clause
| 700 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.batch
import org.apache.flink.api.dag.Transformation
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.io.network.DataExchangeMode
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.streaming.api.transformations.{PartitionTransformation, ShuffleMode}
import org.apache.flink.streaming.runtime.partitioner.{BroadcastPartitioner, GlobalPartitioner, RebalancePartitioner}
import org.apache.flink.table.api.BatchTableEnvironment
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.{CodeGeneratorContext, HashCodeGenerator}
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.plan.nodes.common.CommonPhysicalExchange
import org.apache.flink.table.plan.nodes.exec.{BatchExecNode, ExecNode}
import org.apache.flink.table.runtime.BinaryHashPartitioner
import org.apache.flink.table.types.logical.RowType
import org.apache.flink.table.typeutils.BaseRowTypeInfo
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.{RelDistribution, RelNode, RelWriter}
import java.util
import scala.collection.JavaConversions._
/**
* This RelNode represents a change of partitioning of the input elements.
*
* This does not create a physical transformation If its relDistribution' type is not range,
* it only affects how upstream operations are connected to downstream operations.
*
* But if the type is range, this relNode will create some physical transformation because it
* need calculate the data distribution. To calculate the data distribution, the received stream
* will split in two process stream. For the first process stream, it will go through the sample
* and statistics to calculate the data distribution in pipeline mode. For the second process
* stream will been bocked. After the first process stream has been calculated successfully,
* then the two process stream will union together. Thus it can partitioner the record based
* the data distribution. Then The RelNode will create the following transformations.
*
* +---------------------------------------------------------------------------------------------+
* | |
* | +-----------------------------+ |
* | | Transformation | ------------------------------------> |
* | +-----------------------------+ | |
* | | | |
* | | | |
* | |forward & PIPELINED | |
* | \\|/ | |
* | +--------------------------------------------+ | |
* | | OneInputTransformation[LocalSample, n] | | |
* | +--------------------------------------------+ | |
* | | |forward & BATCH |
* | |forward & PIPELINED | |
* | \\|/ | |
* | +--------------------------------------------------+ | |
* | |OneInputTransformation[SampleAndHistogram, 1] | | |
* | +--------------------------------------------------+ | |
* | | | |
* | |broadcast & PIPELINED | |
* | | | |
* | \\|/ \\|/ |
* | +---------------------------------------------------+------------------------------+ |
* | | TwoInputTransformation[AssignRangeId, n] | |
* | +----------------------------------------------------+-----------------------------+ |
* | | |
* | |custom & PIPELINED |
* | \\|/ |
* | +---------------------------------------------------+------------------------------+ |
* | | OneInputTransformation[RemoveRangeId, n] | |
* | +----------------------------------------------------+-----------------------------+ |
* | |
* +---------------------------------------------------------------------------------------------+
*/
class BatchExecExchange(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
relDistribution: RelDistribution)
extends CommonPhysicalExchange(cluster, traitSet, inputRel, relDistribution)
with BatchPhysicalRel
with BatchExecNode[BaseRow] {
// TODO reuse PartitionTransformation
// currently, an Exchange' input transformation will be reused if it is reusable,
// and different PartitionTransformation objects will be created which have same input.
// cache input transformation to reuse
private var reusedInput: Option[Transformation[BaseRow]] = None
// the required exchange mode for reusable ExchangeBatchExec
// if it's None, use value from getDataExchangeMode
private var requiredExchangeMode: Option[DataExchangeMode] = None
override def copy(
traitSet: RelTraitSet,
newInput: RelNode,
newDistribution: RelDistribution): BatchExecExchange = {
new BatchExecExchange(cluster, traitSet, newInput, relDistribution)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("exchange_mode", requiredExchangeMode.orNull,
requiredExchangeMode.contains(DataExchangeMode.BATCH))
}
//~ ExecNode methods -----------------------------------------------------------
def setRequiredDataExchangeMode(exchangeMode: DataExchangeMode): Unit = {
require(exchangeMode != null)
requiredExchangeMode = Some(exchangeMode)
}
private[flink] def getDataExchangeMode(tableConf: Configuration): DataExchangeMode = {
requiredExchangeMode match {
case Some(mode) if mode eq DataExchangeMode.BATCH => mode
case _ => DataExchangeMode.PIPELINED
}
}
override def getDamBehavior: DamBehavior = {
distribution.getType match {
case RelDistribution.Type.RANGE_DISTRIBUTED => DamBehavior.FULL_DAM
case _ => DamBehavior.PIPELINED
}
}
override def getInputNodes: util.List[ExecNode[BatchTableEnvironment, _]] =
getInputs.map(_.asInstanceOf[ExecNode[BatchTableEnvironment, _]])
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[BatchTableEnvironment, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override def translateToPlanInternal(
tableEnv: BatchTableEnvironment): Transformation[BaseRow] = {
val input = reusedInput match {
case Some(transformation) => transformation
case None =>
val input = getInputNodes.get(0).translateToPlan(tableEnv)
.asInstanceOf[Transformation[BaseRow]]
reusedInput = Some(input)
input
}
val inputType = input.getOutputType.asInstanceOf[BaseRowTypeInfo]
val outputRowType = BaseRowTypeInfo.of(FlinkTypeFactory.toLogicalRowType(getRowType))
val shuffleMode = requiredExchangeMode match {
case None => ShuffleMode.PIPELINED
case Some(mode) =>
mode match {
case DataExchangeMode.BATCH => ShuffleMode.BATCH
case DataExchangeMode.PIPELINED => ShuffleMode.PIPELINED
}
}
relDistribution.getType match {
case RelDistribution.Type.ANY =>
val transformation = new PartitionTransformation(
input,
null,
shuffleMode)
transformation.setOutputType(outputRowType)
transformation
case RelDistribution.Type.SINGLETON =>
val transformation = new PartitionTransformation(
input,
new GlobalPartitioner[BaseRow],
shuffleMode)
transformation.setOutputType(outputRowType)
transformation
case RelDistribution.Type.RANDOM_DISTRIBUTED =>
val transformation = new PartitionTransformation(
input,
new RebalancePartitioner[BaseRow],
shuffleMode)
transformation.setOutputType(outputRowType)
transformation
case RelDistribution.Type.BROADCAST_DISTRIBUTED =>
val transformation = new PartitionTransformation(
input,
new BroadcastPartitioner[BaseRow],
shuffleMode)
transformation.setOutputType(outputRowType)
transformation
case RelDistribution.Type.HASH_DISTRIBUTED =>
// TODO Eliminate duplicate keys
val keys = relDistribution.getKeys
val partitioner = new BinaryHashPartitioner(
HashCodeGenerator.generateRowHash(
CodeGeneratorContext(tableEnv.config),
RowType.of(inputType.getLogicalTypes: _*),
"HashPartitioner",
keys.map(_.intValue()).toArray),
keys.map(getInput.getRowType.getFieldNames.get(_)).toArray
)
val transformation = new PartitionTransformation(
input,
partitioner,
shuffleMode)
transformation.setOutputType(outputRowType)
transformation
case _ =>
throw new UnsupportedOperationException(
s"not support RelDistribution: ${relDistribution.getType} now!")
}
}
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/batch/BatchExecExchange.scala
|
Scala
|
apache-2.0
| 11,266 |
/*
@meta {
"processorId": "org.helgoboss.scala_bundle:1.0.0",
"projectId": "org.helgoboss:mini-obr-runtime:1.0-SNAPSHOT",
"dependencies": [
"com.weiglewilczek.scala-lang-osgi:scala-library:2.9.1",
"org.helgoboss:commandlet:1.0-SNAPSHOT"
],
"transformers": [
"org.helgoboss.my_oss:1.0.0"
]
}
*/
package org.helgoboss.mini_obr_runtime
import org.helgoboss.commandlet._
trait MiniObrRuntime {
def commandletExecutionContext: ExecutionContext
def stop(exitCode: Int)
}
|
helgoboss/mini-obr-runtime
|
org.helgoboss.mini-obr-runtime.scala
|
Scala
|
mit
| 516 |
package com.spann
import com.spann.models.Station
import com.spann.utils.{RacerStatus, StationStatus}
import scala.collection.mutable
class StationHandler {
}
object StationHandler {
var stationList = mutable.MutableList[Station]()
var stationStatus = mutable.HashMap[String, StationStatus]()
def initializeStations = {
stationList += Station("a", None, 0)
stationList += Station("b", Station.findByName("a"), 10)
stationList += Station("e", Station.findByName("b"), 8)
stationList += Station("f", Station.findByName("e"), 6)
stationList += Station("c", Station.findByName("f"), 10)
stationList += Station("i", Station.findByName("c"), 12)
stationList += Station("g", Station.findByName("i"), 10)
stationList += Station("h", Station.findByName("g"), 5)
stationList += Station("d", Station.findByName("h"), 20)
stationList += Station("j", Station.findByName("d"), 20)
stationList.foreach {
station =>
stationStatus(station.name) = StationStatus(station, new mutable.MutableList[Int](), new mutable.MutableList[Int]())
}
}
def initializeStationStatus(racerId: Int, source: Station, destination: Station) = {
var i = stationList.indexOf(destination) - 1
var station: Station = stationList.get(i).get
while (source != station) {
val presentStatus = stationStatus(station.name)
stationStatus(station.name) = StationStatus(station, presentStatus.vehiclesApproaching += racerId, new mutable.MutableList[Int]())
i = i - 1
station = stationList.get(i).get
}
// println("stationList status:"+stationStatus)
}
def getDistanceBetweenStations(a: Station, b: Station): Int = {
var distance: Int = 0
val indexOfA = stationList.indexOf(a)
val indexOfB = stationList.indexOf(b)
if (indexOfA < indexOfB) {
// move left
var currentStation = b
var previousStation = b.previousStation.get
while (currentStation != a) {
distance = distance + currentStation.distanceFromPreviousStation
currentStation = previousStation
if (previousStation.previousStation.isDefined)
previousStation = previousStation.previousStation.get
}
} else {
// move right
var currentStation = a
var previousStation = a.previousStation.get
while (currentStation != b) {
distance = distance + currentStation.distanceFromPreviousStation
currentStation = previousStation
if (previousStation.previousStation.isDefined)
previousStation = previousStation.previousStation.get
}
}
distance
}
def getStationByName(name: String) = {
var stationFound: Option[Station] = None
stationList.filter(_.name == name).foreach {
station =>
stationFound = Some(station)
}
stationFound
}
def updateStationStatus(racerId: Int, racerStatus: RacerStatus) = {
val previous = getPreviousStationsForRacer(racerStatus.source, racerStatus.distanceTravelled)
updateStationReport(racerId, previous)
}
def updateStationReport(racerId: Int, previousStation: Option[Station]) = {
if (previousStation.isDefined) {
// updated previous station data
val presentStatus = stationStatus(previousStation.get.name)
if (presentStatus.vehiclesApproaching.contains(racerId)) {
stationStatus(previousStation.get.name) = StationStatus(previousStation.get, presentStatus.vehiclesApproaching.filter(_ != racerId),
presentStatus.vehiclesPassed += racerId)
// println("stationStatus: "+stationStatus)
}
}
}
def getPreviousStationsForRacer(source: Station, distanceTravelled: Int) = {
val indexOfSource = stationList.indexOf(source)
var indexOfNextStation = indexOfSource + 1
var distance = getDistanceBetweenStations(stationList.get(indexOfSource).get, stationList.get(indexOfNextStation).get)
var previousStation: Option[Station] = None
while (distance < distanceTravelled) {
previousStation = stationList.get(indexOfNextStation)
indexOfNextStation = indexOfNextStation + 1
distance = distance + previousStation.get.distanceFromPreviousStation
}
previousStation
}
}
|
sunil-123/SpannRace
|
src/main/scala/com/spann/StationHandler.scala
|
Scala
|
mit
| 4,220 |
package com.twitter.inject.utils
import com.twitter.finagle.{FailedFastException, SourcedException, TimeoutException}
import com.twitter.util.Throwables._
import com.twitter.util.{Throw, Try}
object ExceptionUtils {
def stripNewlines(e: Throwable): String = {
stripNewlines(e.toString)
}
def stripNewlines(str: String): String = {
str.replace("\n\twith NoSources", "")
}
def toExceptionDetails(exception: Throwable): String = {
mkString(exception).mkString("/")
}
def toExceptionMessage(tryThrowable: Try[_]): String = tryThrowable match {
case Throw(e) => toExceptionMessage(e)
case _ => ""
}
def toExceptionMessage(exception: Throwable): String = exception match {
case e: TimeoutException =>
e.exceptionMessage
case e: FailedFastException =>
e.getClass.getName
case e: SourcedException =>
stripNewlines(e)
case e =>
val msg = e.getMessage
if (msg == null || msg.isEmpty)
e.getClass.getName
else
e.getClass.getName + " " + msg
}
def toDetailedExceptionMessage(tryThrowable: Try[_]): String = tryThrowable match {
case Throw(e) => toExceptionDetails(e) + " " + toExceptionMessage(e)
case _ => ""
}
}
|
twitter/finatra
|
inject/inject-utils/src/main/scala/com/twitter/inject/utils/ExceptionUtils.scala
|
Scala
|
apache-2.0
| 1,229 |
package com.andre_cruz.collection
import com.andre_cruz.OptionUtils.optionIf
object TraversableOnceUtils {
implicit class RichTraversableOnce[+A](underlying: TraversableOnce[A]) {
def minOption[B >: A](implicit cmp: Ordering[B]): Option[A] = optionIf(underlying.nonEmpty) {
underlying.min(cmp)
}
def maxOption[B >: A](implicit cmp: Ordering[B]): Option[A] = optionIf(underlying.nonEmpty) {
underlying.max(cmp)
}
def minByOption[B](f: A => B)(implicit cmp: Ordering[B]): Option[A] = optionIf(underlying.nonEmpty) {
underlying.minBy(f)
}
def maxByOption[B](f: A => B)(implicit cmp: Ordering[B]): Option[A] = optionIf(underlying.nonEmpty) {
underlying.maxBy(f)
}
}
}
|
codecruzer/scala-utils
|
src/main/scala/com/andre_cruz/collection/TraversableOnceUtils.scala
|
Scala
|
apache-2.0
| 734 |
package org.odfi.indesign.module.git
import org.odfi.indesign.core.harvest.Harvester
import org.odfi.indesign.core.harvest.fs.HarvestedFile
import java.io.File
import org.odfi.indesign.core.harvest.fs.FileSystemHarvester
object GitHarvester extends FileSystemHarvester {
this.onDeliverFor[HarvestedFile] {
case f if (f.path.toFile.isDirectory && new File(f.path.toFile,".git/config").exists()) =>
var gitResource = new GitRepository(f)
gather(gitResource)
true
}
}
|
opendesignflow/indesign
|
indesign-git/src/main/scala/org/odfi/indesign/module/git/GitHarvester.scala
|
Scala
|
gpl-3.0
| 507 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.io._
import scala.util.parsing.combinator.RegexParsers
import com.fasterxml.jackson.core._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
private[this] sealed trait PathInstruction
private[this] object PathInstruction {
private[expressions] case object Subscript extends PathInstruction
private[expressions] case object Wildcard extends PathInstruction
private[expressions] case object Key extends PathInstruction
private[expressions] case class Index(index: Long) extends PathInstruction
private[expressions] case class Named(name: String) extends PathInstruction
}
private[this] sealed trait WriteStyle
private[this] object WriteStyle {
private[expressions] case object RawStyle extends WriteStyle
private[expressions] case object QuotedStyle extends WriteStyle
private[expressions] case object FlattenStyle extends WriteStyle
}
private[this] object JsonPathParser extends RegexParsers {
import PathInstruction._
def root: Parser[Char] = '$'
def long: Parser[Long] = "\\\\d+".r ^? {
case x => x.toLong
}
// parse `[*]` and `[123]` subscripts
def subscript: Parser[List[PathInstruction]] =
for {
operand <- '[' ~> ('*' ^^^ Wildcard | long ^^ Index) <~ ']'
} yield {
Subscript :: operand :: Nil
}
// parse `.name` or `['name']` child expressions
def named: Parser[List[PathInstruction]] =
for {
name <- '.' ~> "[^\\\\.\\\\[]+".r | "['" ~> "[^\\\\'\\\\?]+".r <~ "']"
} yield {
Key :: Named(name) :: Nil
}
// child wildcards: `..`, `.*` or `['*']`
def wildcard: Parser[List[PathInstruction]] =
(".*" | "['*']") ^^^ List(Wildcard)
def node: Parser[List[PathInstruction]] =
wildcard |
named |
subscript
val expression: Parser[List[PathInstruction]] = {
phrase(root ~> rep(node) ^^ (x => x.flatten))
}
def parse(str: String): Option[List[PathInstruction]] = {
this.parseAll(expression, str) match {
case Success(result, _) =>
Some(result)
case NoSuccess(msg, next) =>
None
}
}
}
private[this] object SharedFactory {
val jsonFactory = new JsonFactory()
// Enabled for Hive compatibility
jsonFactory.enable(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS)
}
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*/
@ExpressionDescription(
usage = "_FUNC_(json_txt, path) - Extracts a json object from `path`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":"b"}', '$.a');
b
""")
case class GetJsonObject(json: Expression, path: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
import com.fasterxml.jackson.core.JsonToken._
import PathInstruction._
import SharedFactory._
import WriteStyle._
override def left: Expression = json
override def right: Expression = path
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def prettyName: String = "get_json_object"
@transient private lazy val parsedPath = parsePath(path.eval().asInstanceOf[UTF8String])
override def eval(input: InternalRow): Any = {
val jsonStr = json.eval(input).asInstanceOf[UTF8String]
if (jsonStr == null) {
return null
}
val parsed = if (path.foldable) {
parsedPath
} else {
parsePath(path.eval(input).asInstanceOf[UTF8String])
}
if (parsed.isDefined) {
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, jsonStr)) { parser =>
val output = new ByteArrayOutputStream()
val matched = Utils.tryWithResource(
jsonFactory.createGenerator(output, JsonEncoding.UTF8)) { generator =>
parser.nextToken()
evaluatePath(parser, generator, RawStyle, parsed.get)
}
if (matched) {
UTF8String.fromBytes(output.toByteArray)
} else {
null
}
}
} catch {
case _: JsonProcessingException => null
}
} else {
null
}
}
private def parsePath(path: UTF8String): Option[List[PathInstruction]] = {
if (path != null) {
JsonPathParser.parse(path.toString)
} else {
None
}
}
// advance to the desired array index, assumes to start at the START_ARRAY token
private def arrayIndex(p: JsonParser, f: () => Boolean): Long => Boolean = {
case _ if p.getCurrentToken == END_ARRAY =>
// terminate, nothing has been written
false
case 0 =>
// we've reached the desired index
val dirty = f()
while (p.nextToken() != END_ARRAY) {
// advance the token stream to the end of the array
p.skipChildren()
}
dirty
case i if i > 0 =>
// skip this token and evaluate the next
p.skipChildren()
p.nextToken()
arrayIndex(p, f)(i - 1)
}
/**
* Evaluate a list of JsonPath instructions, returning a bool that indicates if any leaf nodes
* have been written to the generator
*/
private def evaluatePath(
p: JsonParser,
g: JsonGenerator,
style: WriteStyle,
path: List[PathInstruction]): Boolean = {
(p.getCurrentToken, path) match {
case (VALUE_STRING, Nil) if style == RawStyle =>
// there is no array wildcard or slice parent, emit this string without quotes
if (p.hasTextCharacters) {
g.writeRaw(p.getTextCharacters, p.getTextOffset, p.getTextLength)
} else {
g.writeRaw(p.getText)
}
true
case (START_ARRAY, Nil) if style == FlattenStyle =>
// flatten this array into the parent
var dirty = false
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, style, Nil)
}
dirty
case (_, Nil) =>
// general case: just copy the child tree verbatim
g.copyCurrentStructure(p)
true
case (START_OBJECT, Key :: xs) =>
var dirty = false
while (p.nextToken() != END_OBJECT) {
if (dirty) {
// once a match has been found we can skip other fields
p.skipChildren()
} else {
dirty = evaluatePath(p, g, style, xs)
}
}
dirty
case (START_ARRAY, Subscript :: Wildcard :: Subscript :: Wildcard :: xs) =>
// special handling for the non-structure preserving double wildcard behavior in Hive
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, FlattenStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Wildcard :: xs) if style != QuotedStyle =>
// retain Flatten, otherwise use Quoted... cannot use Raw within an array
val nextStyle = style match {
case RawStyle => QuotedStyle
case FlattenStyle => FlattenStyle
case QuotedStyle => throw new IllegalStateException()
}
// temporarily buffer child matches, the emitted json will need to be
// modified slightly if there is only a single element written
val buffer = new StringWriter()
var dirty = 0
Utils.tryWithResource(jsonFactory.createGenerator(buffer)) { flattenGenerator =>
flattenGenerator.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// track the number of array elements and only emit an outer array if
// we've written more than one element, this matches Hive's behavior
dirty += (if (evaluatePath(p, flattenGenerator, nextStyle, xs)) 1 else 0)
}
flattenGenerator.writeEndArray()
}
val buf = buffer.getBuffer
if (dirty > 1) {
g.writeRawValue(buf.toString)
} else if (dirty == 1) {
// remove outer array tokens
g.writeRawValue(buf.substring(1, buf.length()-1))
} // else do not write anything
dirty > 0
case (START_ARRAY, Subscript :: Wildcard :: xs) =>
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// wildcards can have multiple matches, continually update the dirty count
dirty |= evaluatePath(p, g, QuotedStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Index(idx) :: (xs@Subscript :: Wildcard :: _)) =>
p.nextToken()
// we're going to have 1 or more results, switch to QuotedStyle
arrayIndex(p, () => evaluatePath(p, g, QuotedStyle, xs))(idx)
case (START_ARRAY, Subscript :: Index(idx) :: xs) =>
p.nextToken()
arrayIndex(p, () => evaluatePath(p, g, style, xs))(idx)
case (FIELD_NAME, Named(name) :: xs) if p.getCurrentName == name =>
// exact field match
if (p.nextToken() != JsonToken.VALUE_NULL) {
evaluatePath(p, g, style, xs)
} else {
false
}
case (FIELD_NAME, Wildcard :: xs) =>
// wildcard field match
p.nextToken()
evaluatePath(p, g, style, xs)
case _ =>
p.skipChildren()
false
}
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(jsonStr, p1, p2, ..., pn) - Returns a tuple like the function get_json_object, but it takes multiple names. All the input parameters and output column types are string.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":2}', 'a', 'b');
1 2
""")
// scalastyle:on line.size.limit
case class JsonTuple(children: Seq[Expression])
extends Generator with CodegenFallback {
import SharedFactory._
override def nullable: Boolean = {
// a row is always returned
false
}
// if processing fails this shared value will be returned
@transient private lazy val nullRow: Seq[InternalRow] =
new GenericInternalRow(Array.ofDim[Any](fieldExpressions.length)) :: Nil
// the json body is the first child
@transient private lazy val jsonExpr: Expression = children.head
// the fields to query are the remaining children
@transient private lazy val fieldExpressions: Seq[Expression] = children.tail
// eagerly evaluate any foldable the field names
@transient private lazy val foldableFieldNames: IndexedSeq[Option[String]] = {
fieldExpressions.map {
case expr if expr.foldable => Option(expr.eval()).map(_.asInstanceOf[UTF8String].toString)
case _ => null
}.toIndexedSeq
}
// and count the number of foldable fields, we'll use this later to optimize evaluation
@transient private lazy val constantFields: Int = foldableFieldNames.count(_ != null)
override def elementSchema: StructType = StructType(fieldExpressions.zipWithIndex.map {
case (_, idx) => StructField(s"c$idx", StringType, nullable = true)
})
override def prettyName: String = "json_tuple"
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires at least two arguments")
} else if (children.forall(child => StringType.acceptsType(child.dataType))) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires that all arguments are strings")
}
}
override def eval(input: InternalRow): TraversableOnce[InternalRow] = {
val json = jsonExpr.eval(input).asInstanceOf[UTF8String]
if (json == null) {
return nullRow
}
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parseRow(parser, input)
}
} catch {
case _: JsonProcessingException =>
nullRow
}
}
private def parseRow(parser: JsonParser, input: InternalRow): Seq[InternalRow] = {
// only objects are supported
if (parser.nextToken() != JsonToken.START_OBJECT) {
return nullRow
}
// evaluate the field names as String rather than UTF8String to
// optimize lookups from the json token, which is also a String
val fieldNames = if (constantFields == fieldExpressions.length) {
// typically the user will provide the field names as foldable expressions
// so we can use the cached copy
foldableFieldNames.map(_.orNull)
} else if (constantFields == 0) {
// none are foldable so all field names need to be evaluated from the input row
fieldExpressions.map(_.eval(input).asInstanceOf[UTF8String].toString)
} else {
// if there is a mix of constant and non-constant expressions
// prefer the cached copy when available
foldableFieldNames.zip(fieldExpressions).map {
case (null, expr) => expr.eval(input).asInstanceOf[UTF8String].toString
case (fieldName, _) => fieldName.orNull
}
}
val row = Array.ofDim[Any](fieldNames.length)
// start reading through the token stream, looking for any requested field names
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken == JsonToken.FIELD_NAME) {
// check to see if this field is desired in the output
val jsonField = parser.getCurrentName
var idx = fieldNames.indexOf(jsonField)
if (idx >= 0) {
// it is, copy the child tree to the correct location in the output row
val output = new ByteArrayOutputStream()
// write the output directly to UTF8 encoded byte array
if (parser.nextToken() != JsonToken.VALUE_NULL) {
Utils.tryWithResource(jsonFactory.createGenerator(output, JsonEncoding.UTF8)) {
generator => copyCurrentStructure(generator, parser)
}
val jsonValue = UTF8String.fromBytes(output.toByteArray)
// SPARK-21804: json_tuple returns null values within repeated columns
// except the first one; so that we need to check the remaining fields.
do {
row(idx) = jsonValue
idx = fieldNames.indexOf(jsonField, idx + 1)
} while (idx >= 0)
}
}
}
// always skip children, it's cheap enough to do even if copyCurrentStructure was called
parser.skipChildren()
}
new GenericInternalRow(row) :: Nil
}
private def copyCurrentStructure(generator: JsonGenerator, parser: JsonParser): Unit = {
parser.getCurrentToken match {
// if the user requests a string field it needs to be returned without enclosing
// quotes which is accomplished via JsonGenerator.writeRaw instead of JsonGenerator.write
case JsonToken.VALUE_STRING if parser.hasTextCharacters =>
// slight optimization to avoid allocating a String instance, though the characters
// still have to be decoded... Jackson doesn't have a way to access the raw bytes
generator.writeRaw(parser.getTextCharacters, parser.getTextOffset, parser.getTextLength)
case JsonToken.VALUE_STRING =>
// the normal String case, pass it through to the output without enclosing quotes
generator.writeRaw(parser.getText)
case JsonToken.VALUE_NULL =>
// a special case that needs to be handled outside of this method.
// if a requested field is null, the result must be null. the easiest
// way to achieve this is just by ignoring null tokens entirely
throw new IllegalStateException("Do not attempt to copy a null field")
case _ =>
// handle other types including objects, arrays, booleans and numbers
generator.copyCurrentStructure(parser)
}
}
}
/**
* Converts an json input string to a [[StructType]], [[ArrayType]] or [[MapType]]
* with the specified schema.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(jsonStr, schema[, options]) - Returns a struct value with the given `jsonStr` and `schema`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":0.8}', 'a INT, b DOUBLE');
{"a":1, "b":0.8}
> SELECT _FUNC_('{"time":"26/08/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MM/yyyy'));
{"time":"2015-08-26 00:00:00.0"}
""",
since = "2.2.0")
// scalastyle:on line.size.limit
case class JsonToStructs(
schema: DataType,
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
val forceNullableSchema = SQLConf.get.getConf(SQLConf.FROM_JSON_FORCE_NULLABLE_SCHEMA)
// The JSON input data might be missing certain fields. We force the nullability
// of the user-provided schema to avoid data corruptions. In particular, the parquet-mr encoder
// can generate incorrect files if values are missing in columns declared as non-nullable.
val nullableSchema = if (forceNullableSchema) schema.asNullable else schema
override def nullable: Boolean = true
// Used in `FunctionRegistry`
def this(child: Expression, schema: Expression, options: Map[String, String]) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = options,
child = child,
timeZoneId = None)
def this(child: Expression, schema: Expression) = this(child, schema, Map.empty[String, String])
def this(child: Expression, schema: Expression, options: Expression) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
override def checkInputDataTypes(): TypeCheckResult = nullableSchema match {
case _: StructType | _: ArrayType | _: MapType =>
super.checkInputDataTypes()
case _ => TypeCheckResult.TypeCheckFailure(
s"Input schema ${nullableSchema.catalogString} must be a struct, an array or a map.")
}
// This converts parsed rows to the desired output by the given schema.
@transient
lazy val converter = nullableSchema match {
case _: StructType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next() else null
case _: ArrayType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next().getArray(0) else null
case _: MapType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next().getMap(0) else null
}
val nameOfCorruptRecord = SQLConf.get.getConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD)
@transient lazy val parser = {
val parsedOptions = new JSONOptions(options, timeZoneId.get, nameOfCorruptRecord)
val mode = parsedOptions.parseMode
if (mode != PermissiveMode && mode != FailFastMode) {
throw new IllegalArgumentException(s"from_json() doesn't support the ${mode.name} mode. " +
s"Acceptable modes are ${PermissiveMode.name} and ${FailFastMode.name}.")
}
val (parserSchema, actualSchema) = nullableSchema match {
case s: StructType =>
ExprUtils.verifyColumnNameOfCorruptRecord(s, parsedOptions.columnNameOfCorruptRecord)
(s, StructType(s.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord)))
case other =>
(StructType(StructField("value", other) :: Nil), other)
}
val rawParser = new JacksonParser(actualSchema, parsedOptions, allowArrayAsStructs = false)
val createParser = CreateJacksonParser.utf8String _
new FailureSafeParser[UTF8String](
input => rawParser.parse(input, createParser, identity[UTF8String]),
mode,
parserSchema,
parsedOptions.columnNameOfCorruptRecord)
}
override def dataType: DataType = nullableSchema
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(json: Any): Any = {
converter(parser.parse(json.asInstanceOf[UTF8String]))
}
override def inputTypes: Seq[AbstractDataType] = StringType :: Nil
override def sql: String = schema match {
case _: MapType => "entries"
case _ => super.sql
}
override def prettyName: String = "from_json"
}
/**
* Converts a [[StructType]], [[ArrayType]] or [[MapType]] to a JSON output string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr[, options]) - Returns a JSON string with a given struct value",
examples = """
Examples:
> SELECT _FUNC_(named_struct('a', 1, 'b', 2));
{"a":1,"b":2}
> SELECT _FUNC_(named_struct('time', to_timestamp('2015-08-26', 'yyyy-MM-dd')), map('timestampFormat', 'dd/MM/yyyy'));
{"time":"26/08/2015"}
> SELECT _FUNC_(array(named_struct('a', 1, 'b', 2)));
[{"a":1,"b":2}]
> SELECT _FUNC_(map('a', named_struct('b', 1)));
{"a":{"b":1}}
> SELECT _FUNC_(map(named_struct('a', 1),named_struct('b', 2)));
{"[1]":{"b":2}}
> SELECT _FUNC_(map('a', 1));
{"a":1}
> SELECT _FUNC_(array((map('a', 1))));
[{"a":1}]
""",
since = "2.2.0")
// scalastyle:on line.size.limit
case class StructsToJson(
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
override def nullable: Boolean = true
def this(options: Map[String, String], child: Expression) = this(options, child, None)
// Used in `FunctionRegistry`
def this(child: Expression) = this(Map.empty, child, None)
def this(child: Expression, options: Expression) =
this(
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
@transient
lazy val writer = new CharArrayWriter()
@transient
lazy val gen = new JacksonGenerator(
inputSchema, writer, new JSONOptions(options, timeZoneId.get))
@transient
lazy val inputSchema = child.dataType
// This converts rows to the JSON output according to the given schema.
@transient
lazy val converter: Any => UTF8String = {
def getAndReset(): UTF8String = {
gen.flush()
val json = writer.toString
writer.reset()
UTF8String.fromString(json)
}
inputSchema match {
case _: StructType =>
(row: Any) =>
gen.write(row.asInstanceOf[InternalRow])
getAndReset()
case _: ArrayType =>
(arr: Any) =>
gen.write(arr.asInstanceOf[ArrayData])
getAndReset()
case _: MapType =>
(map: Any) =>
gen.write(map.asInstanceOf[MapData])
getAndReset()
}
}
override def dataType: DataType = StringType
override def checkInputDataTypes(): TypeCheckResult = inputSchema match {
case struct: StructType =>
try {
JacksonUtils.verifySchema(struct)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case map: MapType =>
// TODO: let `JacksonUtils.verifySchema` verify a `MapType`
try {
val st = StructType(StructField("a", map) :: Nil)
JacksonUtils.verifySchema(st)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case array: ArrayType =>
try {
JacksonUtils.verifyType(prettyName, array)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case _ => TypeCheckResult.TypeCheckFailure(
s"Input type ${child.dataType.catalogString} must be a struct, array of structs or " +
"a map or array of map.")
}
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(value: Any): Any = converter(value)
override def inputTypes: Seq[AbstractDataType] = TypeCollection(ArrayType, StructType) :: Nil
override def prettyName: String = "to_json"
}
/**
* A function infers schema of JSON string.
*/
@ExpressionDescription(
usage = "_FUNC_(json[, options]) - Returns schema in the DDL format of JSON string.",
examples = """
Examples:
> SELECT _FUNC_('[{"col":0}]');
array<struct<col:bigint>>
> SELECT _FUNC_('[{"col":01}]', map('allowNumericLeadingZeros', 'true'));
array<struct<col:bigint>>
""",
since = "2.4.0")
case class SchemaOfJson(
child: Expression,
options: Map[String, String])
extends UnaryExpression with CodegenFallback {
def this(child: Expression) = this(child, Map.empty[String, String])
def this(child: Expression, options: Expression) = this(
child = child,
options = ExprUtils.convertToMapData(options))
override def dataType: DataType = StringType
override def nullable: Boolean = false
@transient
private lazy val jsonOptions = new JSONOptions(options, "UTC")
@transient
private lazy val jsonFactory = {
val factory = new JsonFactory()
jsonOptions.setJacksonOptions(factory)
factory
}
@transient
private lazy val jsonInferSchema = new JsonInferSchema(jsonOptions)
@transient
private lazy val json = child.eval().asInstanceOf[UTF8String]
override def checkInputDataTypes(): TypeCheckResult = child match {
case Literal(s, StringType) if s != null => super.checkInputDataTypes()
case _ => TypeCheckResult.TypeCheckFailure(
s"The input json should be a string literal and not null; however, got ${child.sql}.")
}
override def eval(v: InternalRow): Any = {
val dt = Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parser.nextToken()
jsonInferSchema.inferField(parser)
}
UTF8String.fromString(dt.catalogString)
}
override def prettyName: String = "schema_of_json"
}
|
aosagie/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
|
Scala
|
apache-2.0
| 27,543 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.stats
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
trait StatTestHelper {
val sftSpec = "strAttr:String,intAttr:Integer,longAttr:Long,doubleAttr:Double,floatAttr:Float,geom:Geometry:srid=4326,dtg:Date"
val sft = SimpleFeatureTypes.createType("test", sftSpec)
val stringIndex = sft.indexOf("strAttr")
val intIndex = sft.indexOf("intAttr")
val longIndex = sft.indexOf("longAttr")
val doubleIndex = sft.indexOf("doubleAttr")
val floatIndex = sft.indexOf("floatAttr")
val geomIndex = sft.indexOf("geom")
val dateIndex = sft.indexOf("dtg")
val features = (0 until 100).toArray.map { i =>
val a = Array(f"abc$i%03d", i, i, i, i, s"POINT(-$i ${i / 2})", f"2012-01-01T${i%24}%02d:00:00.000Z")
SimpleFeatureBuilder.build(sft, a.asInstanceOf[Array[AnyRef]], i.toString)
}
val features2 = (100 until 200).toArray.map { i =>
val a = Array(f"abc$i%03d", i, i, i, i, s"POINT(${i -20} ${i / 2 - 20})", f"2012-01-02T${i%24}%02d:00:00.000Z")
SimpleFeatureBuilder.build(sft, a.asInstanceOf[Array[AnyRef]], i.toString)
}
}
|
nagavallia/geomesa
|
geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/StatTestHelper.scala
|
Scala
|
apache-2.0
| 1,627 |
import scala.Tuple._
trait Trait1
trait Trait2
case class Box[+T](t: T)
type N[x] = x match {
case Box[String] => Trait1
case Box[Int] => Trait2
}
trait Cov[+T]
type M[t] = t match {
case Cov[x] => N[x]
}
trait Root[A] {
def thing: M[A]
}
class Asploder extends Root[Cov[Box[Int & String]]] {
def thing = new Trait1 {} // error
}
object Main {
def foo[T <: Cov[Box[Int]]](c: Root[T]): Trait2 = c.thing
def explode = foo(new Asploder)
def main(args: Array[String]): Unit =
explode
}
|
som-snytt/dotty
|
tests/neg/6570-1.scala
|
Scala
|
apache-2.0
| 511 |
package geotrellis.statistics.op.stat
import scala.math.max
import geotrellis._
case class Max(r:Op[Raster]) extends logic.Reducer1(r)({
r =>
var zmax = Int.MinValue
r.foreach(z => if (z != NODATA) zmax = max(z, zmax))
zmax
})({
zs => zs.reduceLeft(max)
})
|
Tjoene/thesis
|
Case_Programs/geotrellis-0.7.0/src/main/scala/geotrellis/statistics/op/stat/Max.scala
|
Scala
|
gpl-2.0
| 270 |
package org.zeroturnaround.geckoboard
object Highcharts {
type SeriesType = String
val Line: SeriesType = "line"
val Bar: SeriesType = "bar"
val Spline: SeriesType = "spline"
val Column: SeriesType = "column"
type PlotOptions = Map[SeriesType, SeriesTypePlotOptions]
implicit def axis2list(axis: Axis) = List(axis)
}
import Highcharts._
case class HighchartsChart(
chart: Chart,
title: Title,
subtitle: Option[Title] = None,
xAxis: List[Axis],
yAxis: List[Axis],
plotOptions: Option[PlotOptions] = None,
series: List[Series[_]]
)
case class Chart(renderTo: String, defaultSeriesType: SeriesType)
case class Title(text: String)
trait Axis
case class AxisCategories(categories: List[String]) extends Axis
case class AxisTitle[A](title: Title, opposite: Boolean = false, reversed: Boolean = false, min: Option[A] = None, max: Option[A] = None) extends Axis
case class Series[A](
name: String,
data: List[A],
`type`: Option[SeriesType] = None,
xAxis: Int = 0,
yAxis: Int = 0
)
case class SeriesTypePlotOptions(
dataLabels: Option[DataLabelsOptions] = None,
enableMouseTracking: Boolean = true
)
case class DataLabelsOptions(enabled: Boolean)
|
Villane/geckoboard-scala
|
src/main/scala/org/zeroturnaround/geckoboard/Highcharts.scala
|
Scala
|
apache-2.0
| 1,190 |
package org.dy.lint.warning
class Unused(fileName: String, lineNumber: Int, msg: String) extends Warning(fileName, lineNumber, msg) {
override val dctype = Type.tp2
}
object Unused {
val pattern = "\\\\[warn]\\\\s(.*.scala):(\\\\d*):\\\\s(.*is never used)".r
}
|
at15/scalac-log-formatter
|
src/main/scala/org/dy/lint/warning/Unused.scala
|
Scala
|
mit
| 259 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.graph.internal.ops
import org.apache.spark.sql.{ SQLContext, Row }
import org.scalatest.Matchers
import org.trustedanalytics.sparktk.graph.Graph
import org.trustedanalytics.sparktk.testutils.TestingSparkContextWordSpec
class SingleSourceShortestPathTest extends TestingSparkContextWordSpec with Matchers {
"Single source shortest path" should {
//create Graph of friends in a social network.
def getGraph: Graph = {
val sqlContext = new SQLContext(sparkContext)
// Vertex DataFrame
val v = sqlContext.createDataFrame(List(
("a", "Alice", 34),
("b", "Bob", 36),
("c", "Charlie", 30),
("d", "David", 29),
("e", "Esther", 32),
("f", "Fanny", 36),
("g", "Gabby", 60)
)).toDF("id", "name", "age")
val e = sqlContext.createDataFrame(List(
("a", "b", "friend", 12),
("b", "c", "follow", 2),
("c", "b", "follow", 5),
("f", "c", "follow", 4),
("e", "f", "follow", 8),
("e", "d", "friend", 9),
("d", "a", "friend", 10),
("a", "e", "friend", 3)
)).toDF("src", "dst", "relationship", "distance")
// create sparktk graph
new Graph(v, e)
}
//create Graph of friends in a social network with integer vertex IDs.
def getNewGraph: Graph = {
val sqlContext: SQLContext = new SQLContext(sparkContext)
val v = sqlContext.createDataFrame(List(
(1, "Alice", 34),
(2, "Bob", 36),
(3, "Charlie", 30),
(4, "David", 29),
(5, "Esther", 32),
(6, "Fanny", 36),
(7, "Gabby", 60)
)).toDF("id", "name", "age")
val e = sqlContext.createDataFrame(List(
(1, 2, "friend", 12),
(2, 3, "follow", 2),
(3, 2, "follow", 5),
(6, 3, "follow", 4),
(5, 6, "follow", 8),
(5, 4, "friend", 9),
(4, 1, "friend", 10),
(1, 5, "friend", 3)
)).toDF("src", "dst", "relationship", "distance")
// Create a a sparktk graph
new Graph(v, e)
}
"calculate the single source shortest path" in {
val singleSourceShortestPathFrame = getGraph.singleSourceShortestPath("a")
singleSourceShortestPathFrame.collect().head shouldBe Row("b", "Bob", 36, 1.0, "[" + Seq("a", "b").mkString(", ") + "]")
}
"calculate the single source shortest paths with edge weights" in {
val singleSourceShortestPathFrame = getGraph.singleSourceShortestPath("a", Some("distance"))
singleSourceShortestPathFrame.collect().head shouldBe Row("b", "Bob", 36, 12.0, "[" + Seq("a", "b").mkString(", ") + "]")
}
"calculate the single source shortest paths with maximum path length constraint" in {
val singleSourceShortestPathFrame = getGraph.singleSourceShortestPath("a", None, Some(2))
singleSourceShortestPathFrame.collect() shouldBe Array(Row("b", "Bob", 36, 1.0, "[" + Seq("a", "b").mkString(", ") + "]"),
Row("d", "David", 29, 2.0, "[" + Seq("a", "e", "d").mkString(", ") + "]"),
Row("f", "Fanny", 36, 2.0, "[" + Seq("a", "e", "f").mkString(", ") + "]"),
Row("a", "Alice", 34, 0.0, "[" + Seq("a").mkString(", ") + "]"),
Row("c", "Charlie", 30, 2.0, "[" + Seq("a", "b", "c").mkString(", ") + "]"),
Row("e", "Esther", 32, 1.0, "[" + Seq("a", "e").mkString(", ") + "]"),
Row("g", "Gabby", 60, Double.PositiveInfinity, "[" + Seq().mkString(", ") + "]"))
}
"calculate the single source shortest path for a graph with integer vertex IDs" in {
val singleSourceShortestPathFrame = getNewGraph.singleSourceShortestPath(1)
singleSourceShortestPathFrame.collect().head shouldBe Row(4, "David", 29, 2.0, "[" + Seq(1, 5, 4).mkString(", ") + "]")
}
}
}
|
trustedanalytics/spark-tk
|
sparktk-core/src/test/scala/org/trustedanalytics/sparktk/graph/internal/ops/SingleSourceShortestPathTest.scala
|
Scala
|
apache-2.0
| 4,476 |
package com.twitter.finatra.http.test
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.databind.node.ObjectNode
import com.twitter.finagle.httpx.{Request, Response}
import com.twitter.finatra.json.FinatraObjectMapper
import com.twitter.finatra.test.EmbeddedTwitterServer
import com.twitter.inject.InjectUtils
import com.twitter.inject.server.AsyncStreamUtils
import com.twitter.io.Buf
import com.twitter.logging.Logger
import com.twitter.util.Await
import java.net.URLEncoder
import java.util.logging.Level
import org.apache.commons.io.IOUtils
trait HttpTest
extends com.twitter.inject.Test
with HttpMockResponses {
protected val testClientAppId = 12345L
protected val mapper = FinatraObjectMapper.create()
protected val pool = InjectUtils.newUnboundedPool("HttpTest " + getClass.getSimpleName)
override protected def beforeAll() {
super.beforeAll()
configFinagleLogging()
}
override protected def afterAll() {
super.afterAll()
pool.executor.shutdown()
}
def configFinagleLogging() {
val finagleLog = Logger("finagle")
finagleLog.setLevel(Level.WARNING)
}
val NormalizedId = "0"
def idNormalizer(jsonNode: JsonNode): JsonNode = {
val objNode = jsonNode.asInstanceOf[ObjectNode]
if (objNode.has("id")) {
objNode.put("id", NormalizedId)
}
objNode
}
@deprecated("Use server.assertHealthy()", "")
def assertHealth(server: EmbeddedTwitterServer) = {
server.assertHealthy()
server
}
@deprecated("Use server.assertHealthy()", "")
def assertHealth(server: EmbeddedHttpServer, healthy: Boolean = true) = {
server.assertHealthy(healthy)
server
}
def resolverMap(resolverMap: (String, String)*): String = {
if (resolverMap.isEmpty)
""
else
"-com.twitter.server.resolverMap=" + {
resolverMap map { case (k, v) =>
k + "=" + v
} mkString ","
}
}
def resolverMap(name: String, httpServer: EmbeddedHttpServer): (String, String) = {
("com.twitter.server.resolverMap", name + "=" + httpServer.externalHttpHostAndPort)
}
def urlEncode(str: String) = {
URLEncoder.encode(str, "UTF-8")
.replaceAll("\\\\+", "%20")
.replaceAll("\\\\%21", "!")
.replaceAll("\\\\%27", "'")
.replaceAll("\\\\%28", "(")
.replaceAll("\\\\%29", ")")
.replaceAll("\\\\%7E", "~")
}
def deserializeRequest(name: String) = {
val requestBytes = IOUtils.toByteArray(getClass.getResourceAsStream(name))
Request.decodeBytes(requestBytes)
}
def writeJsonArray(request: Request, seq: Seq[Any], delayMs: Long): Unit = {
request.writeAndWait("[")
request.writeAndWait(seq.head.toJson)
for (elem <- seq.tail) {
request.writeAndWait("," + elem.toJson)
Thread.sleep(delayMs)
}
request.writeAndWait("]")
request.closeAndWait()
}
/* JSON Implicit Utils */
implicit class RichAny(any: Any) {
def toJson = {
mapper.writeValueAsString(any)
}
def toPrettyJson = {
mapper.writePrettyString(any)
}
def toPrettyJsonStdout(): Unit = {
println(
mapper.writePrettyString(any))
}
def parseJson[T: Manifest]: T = {
any match {
case str: String =>
mapper.parse[JsonNode](str).parseJson[T]
case _ =>
mapper.convert[T](any)
}
}
}
/* Request Implicit Utils */
implicit class RichRequest(request: Request) {
def writeAndWait(str: String) {
println("Write:\\t" + str)
Await.result(
request.writer.write(Buf.Utf8(str)))
}
def closeAndWait() {
Await.result(
request.close())
}
}
implicit class RichResponse(response: Response) {
def asyncStrings = {
AsyncStreamUtils.readerToAsyncStream(response.reader) map { case Buf.Utf8(str) =>
str
}
}
def printAsyncStrings() = {
Await.result(
response.asyncStrings map { "Read:\\t" + _ } foreach println)
}
}
}
|
jaume-pinyol/finatra
|
http/src/test/scala/com/twitter/finatra/http/test/HttpTest.scala
|
Scala
|
apache-2.0
| 3,991 |
package org.jetbrains.plugins.scala
package lang.refactoring.changeSignature
import com.intellij.openapi.project.Project
import com.intellij.psi._
import com.intellij.psi.codeStyle.JavaCodeStyleManager
import com.intellij.refactoring.changeSignature.JavaParameterInfo
import com.intellij.refactoring.util.CanonicalTypes
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.lang.psi.ElementScope
import org.jetbrains.plugins.scala.lang.psi.api.base.ScMethodLike
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameterClause}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.{FunctionType, JavaArrayType, Nothing}
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import scala.beans.{BeanProperty, BooleanBeanProperty}
/**
* Nikolay.Tropin
* 2014-08-10
*/
class ScalaParameterInfo(@BeanProperty var name: String,
@BeanProperty val oldIndex: Int,
var scType: ScType,
val project: Project,
var isRepeatedParameter: Boolean,
var isByName: Boolean,
@BeanProperty var defaultValue: String = "",
var keywordsAndAnnotations: String = "",
val isIntroducedParameter: Boolean = false)
extends JavaParameterInfo {
def this(p: ScParameter) {
this(p.name, p.index, p.getType(TypingContext.empty).getOrAny, p.getProject, p.isRepeatedParameter, p.isCallByNameParameter,
keywordsAndAnnotations = ScalaParameterInfo.keywordsAndAnnotations(p))
}
var defaultForJava = defaultValue
@BooleanBeanProperty
var useAnySingleVariable: Boolean = false
val wasArrayType: Boolean = scType match {
case JavaArrayType(_) => true
case _ => false
}
val isVarargType = false //overriders in java of method with repeated parameters are not varargs
protected def psiType: PsiType = {
if (scType == null) return null
implicit val elementScope = ElementScope(project)
val resultType = if (isByName) {
val functionType = FunctionType(scType, Seq())
functionType
}
else if (isRepeatedParameter) {
val seqType = ScalaPsiManager.instance(project).getCachedClass(elementScope.scope, "scala.collection.Seq")
.map(ScalaType.designator(_))
.getOrElse(Nothing)
ScParameterizedType(seqType, Seq(scType))
}
else scType
resultType.toPsiType()
}
override def createType(context: PsiElement, manager: PsiManager): PsiType = psiType
override def getValue(expr: PsiCallExpression): PsiExpression = {
if (defaultForJava.isEmpty) return null
val defaultText =
if (defaultForJava.contains("$default$")) {
val qual = expr match {
case mc: PsiMethodCallExpression =>
mc.getMethodExpression.getQualifierExpression match {
case _: PsiSuperExpression => ""
case null => ""
case q => q.getText + "."
}
case _ => ""
}
qual + defaultForJava
} else defaultForJava
val expression = JavaPsiFacade.getElementFactory(project).createExpressionFromText(defaultText, expr)
JavaCodeStyleManager.getInstance(project).shortenClassReferences(expression).asInstanceOf[PsiExpression]
}
override def getTypeWrapper: CanonicalTypes.Type = {
if (scType != null) CanonicalTypes.createTypeWrapper(psiType) else null
}
override def getTypeText: String =
if (scType != null) getTypeWrapper.getTypeText else null
def typeText: String = {
val baseText = Option(scType).fold("")(_.presentableText)
if (isRepeatedParameter) baseText + "*"
else if (isByName) " => " + baseText
else baseText
}
}
object ScalaParameterInfo {
def apply(p: ScParameter) = new ScalaParameterInfo(p)
def apply(project: Project) = new ScalaParameterInfo("", -1, null, project, false, false)
def keywordsAndAnnotations(p: ScParameter): String = {
val nameId = p.nameId
val elems = p.children.takeWhile(_ != nameId)
elems.map(_.getText).mkString
}
def allForMethod(methodLike: ScMethodLike): Seq[Seq[ScalaParameterInfo]] = {
def infos(clause: ScParameterClause): Seq[ScalaParameterInfo] = clause.parameters.map(new ScalaParameterInfo(_))
methodLike.parameterList.clauses.map(infos)
}
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/refactoring/changeSignature/ScalaParameterInfo.scala
|
Scala
|
apache-2.0
| 4,543 |
package odelay.jdk
import java.util.concurrent.{
Future => JFuture,
RejectedExecutionHandler, ScheduledExecutorService,
ScheduledThreadPoolExecutor, ThreadFactory }
import java.util.concurrent.atomic.AtomicInteger
import odelay.{ Delay, PeriodicDelay, PeriodicPromisingDelay, PromisingDelay, Timer }
import scala.concurrent.Promise
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal
/** A Timer implemented in terms of a jdk ScheduledThreadPoolExecutor */
class JdkTimer(
underlying: ScheduledExecutorService,
interruptOnCancel: Boolean)
extends Timer {
/** customizing constructor */
def this(
poolSize: Int = JdkTimer.poolSize,
threads: ThreadFactory = JdkTimer.threadFactory,
handler: Option[RejectedExecutionHandler] = JdkTimer.rejectionHandler,
interruptOnCancel: Boolean = JdkTimer.interruptOnCancel) =
this(handler.map( rejections => new ScheduledThreadPoolExecutor(poolSize, threads, rejections))
.getOrElse(new ScheduledThreadPoolExecutor(poolSize, threads)),
interruptOnCancel)
def apply[T](delay: FiniteDuration, op: => T): Delay[T] =
new PromisingDelay[T] {
val jfuture: Option[JFuture[_]] = try {
Some(underlying.schedule(new Runnable {
def run() = completePromise(op)
}, delay.length, delay.unit))
} catch {
case NonFatal(e) =>
failPromise(e)
None
}
def cancel() = jfuture.filterNot(_.isCancelled).foreach { f =>
f.cancel(interruptOnCancel)
cancelPromise()
}
}
def apply[T](delay: FiniteDuration, every: FiniteDuration, op: => T): PeriodicDelay[T] =
new PeriodicPromisingDelay[T](every) {
val jfuture: Option[JFuture[_]] = try {
Some(underlying.scheduleWithFixedDelay(new Runnable {
def run = if (promiseIncomplete) op
}, delay.toUnit(every.unit).toLong, every.length, every.unit))
} catch {
case NonFatal(e) =>
failPromise(e)
None
}
def cancel() = jfuture.filterNot(_.isCancelled).foreach { f =>
f.cancel(interruptOnCancel)
cancelPromise()
}
}
def stop() = if (!underlying.isShutdown) underlying.shutdownNow()
}
/** defaults for jdk timers */
object JdkTimer {
lazy val poolSize = Runtime.getRuntime().availableProcessors()
/** @return a new ThreadFactory with that produces new threads named odelay-{threadNum} */
def threadFactory: ThreadFactory = new ThreadFactory {
val grp = new ThreadGroup(
Thread.currentThread().getThreadGroup(), "odelay")
val threads = new AtomicInteger(1)
def newThread(runs: Runnable) =
new Thread(
grp, runs,
"odelay-%s" format threads.getAndIncrement()) {
setDaemon(true)
}
}
val rejectionHandler: Option[RejectedExecutionHandler] = None
val interruptOnCancel = true
/** @return a _new_ Timer. when used clients should be sure to call stop() on all instances for a clean shutdown */
def newTimer: Timer = new JdkTimer(
poolSize, threadFactory, rejectionHandler, interruptOnCancel)
}
|
softprops/odelay
|
odelay-core/jvm/src/main/scala/jdk/JdkTimer.scala
|
Scala
|
mit
| 3,191 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io._
import java.net.URI
import java.nio.charset.StandardCharsets
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import com.google.common.io.ByteStreams
import org.apache.commons.io.{FilenameUtils, FileUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path, RawLocalFileSystem}
import org.scalatest.{BeforeAndAfterEach, Matchers}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.api.r.RUtils
import org.apache.spark.deploy.SparkSubmit._
import org.apache.spark.deploy.SparkSubmitUtils.MavenCoordinate
import org.apache.spark.internal.config._
import org.apache.spark.internal.Logging
import org.apache.spark.TestUtils.JavaSourceFromString
import org.apache.spark.scheduler.EventLoggingListener
import org.apache.spark.util.{CommandLineUtils, ResetSystemProperties, Utils}
trait TestPrematureExit {
suite: SparkFunSuite =>
private val noOpOutputStream = new OutputStream {
def write(b: Int) = {}
}
/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
// scalastyle:off println
override def println(line: String) {
lineBuffer += line
}
// scalastyle:on println
}
/** Returns true if the script exits and the given search string is printed. */
private[spark] def testPrematureExit(
input: Array[String],
searchString: String,
mainObject: CommandLineUtils = SparkSubmit) : Unit = {
val printStream = new BufferPrintStream()
mainObject.printStream = printStream
@volatile var exitedCleanly = false
mainObject.exitFn = (_) => exitedCleanly = true
val thread = new Thread {
override def run() = try {
mainObject.main(input)
} catch {
// If exceptions occur after the "exit" has happened, fine to ignore them.
// These represent code paths not reachable during normal execution.
case e: Exception => if (!exitedCleanly) throw e
}
}
thread.start()
thread.join()
val joined = printStream.lineBuffer.mkString("\n")
if (!joined.contains(searchString)) {
fail(s"Search string '$searchString' not found in $joined")
}
}
}
// Note: this suite mixes in ResetSystemProperties because SparkSubmit.main() sets a bunch
// of properties that needed to be cleared after tests.
class SparkSubmitSuite
extends SparkFunSuite
with Matchers
with BeforeAndAfterEach
with ResetSystemProperties
with Timeouts
with TestPrematureExit {
override def beforeEach() {
super.beforeEach()
System.setProperty("spark.testing", "true")
}
// scalastyle:off println
test("prints usage on empty input") {
testPrematureExit(Array.empty[String], "Usage: spark-submit")
}
test("prints usage with only --help") {
testPrematureExit(Array("--help"), "Usage: spark-submit")
}
test("prints error with unrecognized options") {
testPrematureExit(Array("--blarg"), "Unrecognized option '--blarg'")
testPrematureExit(Array("-bleg"), "Unrecognized option '-bleg'")
}
test("handle binary specified but not class") {
testPrematureExit(Array("foo.jar"), "No main class")
}
test("handles arguments with --key=val") {
val clArgs = Seq(
"--jars=one.jar,two.jar,three.jar",
"--name=myApp")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.jars should include regex (".*one.jar,.*two.jar,.*three.jar")
appArgs.name should be ("myApp")
}
test("handles arguments to user program") {
val clArgs = Seq(
"--name", "myApp",
"--class", "Foo",
"userjar.jar",
"some",
"--weird", "args")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.childArgs should be (Seq("some", "--weird", "args"))
}
test("handles arguments to user program with name collision") {
val clArgs = Seq(
"--name", "myApp",
"--class", "Foo",
"userjar.jar",
"--master", "local",
"some",
"--weird", "args")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.childArgs should be (Seq("--master", "local", "some", "--weird", "args"))
}
test("print the right queue name") {
val clArgs = Seq(
"--name", "myApp",
"--class", "Foo",
"--conf", "spark.yarn.queue=thequeue",
"userjar.jar")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.queue should be ("thequeue")
appArgs.toString should include ("thequeue")
}
test("specify deploy mode through configuration") {
val clArgs = Seq(
"--master", "yarn",
"--conf", "spark.submit.deployMode=client",
"--class", "org.SomeClass",
"thejar.jar"
)
val appArgs = new SparkSubmitArguments(clArgs)
val (_, _, sysProps, _, _, _) = prepareSubmitEnvironment(appArgs)
appArgs.deployMode should be ("client")
sysProps("spark.submit.deployMode") should be ("client")
// Both cmd line and configuration are specified, cmdline option takes the priority
val clArgs1 = Seq(
"--master", "yarn",
"--deploy-mode", "cluster",
"--conf", "spark.submit.deployMode=client",
"-class", "org.SomeClass",
"thejar.jar"
)
val appArgs1 = new SparkSubmitArguments(clArgs1)
val (_, _, sysProps1, _, _, _) = prepareSubmitEnvironment(appArgs1)
appArgs1.deployMode should be ("cluster")
sysProps1("spark.submit.deployMode") should be ("cluster")
// Neither cmdline nor configuration are specified, client mode is the default choice
val clArgs2 = Seq(
"--master", "yarn",
"--class", "org.SomeClass",
"thejar.jar"
)
val appArgs2 = new SparkSubmitArguments(clArgs2)
appArgs2.deployMode should be (null)
val (_, _, sysProps2, _, _, _) = prepareSubmitEnvironment(appArgs2)
appArgs2.deployMode should be ("client")
sysProps2("spark.submit.deployMode") should be ("client")
}
test("handles YARN cluster mode") {
val clArgs = Seq(
"--deploy-mode", "cluster",
"--master", "yarn",
"--executor-memory", "5g",
"--executor-cores", "5",
"--class", "org.SomeClass",
"--jars", "one.jar,two.jar,three.jar",
"--driver-memory", "4g",
"--queue", "thequeue",
"--files", "file1.txt,file2.txt",
"--archives", "archive1.txt,archive2.txt",
"--num-executors", "6",
"--name", "beauty",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, sysProps, mainClass, _, _) = prepareSubmitEnvironment(appArgs)
val childArgsStr = childArgs.mkString(" ")
childArgsStr should include ("--class org.SomeClass")
childArgsStr should include ("--arg arg1 --arg arg2")
childArgsStr should include regex ("--jar .*thejar.jar")
mainClass should be ("org.apache.spark.deploy.yarn.Client")
// In yarn cluster mode, also adding jars to classpath
classpath(0) should endWith ("thejar.jar")
classpath(1) should endWith ("one.jar")
classpath(2) should endWith ("two.jar")
classpath(3) should endWith ("three.jar")
sysProps("spark.executor.memory") should be ("5g")
sysProps("spark.driver.memory") should be ("4g")
sysProps("spark.executor.cores") should be ("5")
sysProps("spark.yarn.queue") should be ("thequeue")
sysProps("spark.yarn.dist.jars") should include regex (".*one.jar,.*two.jar,.*three.jar")
sysProps("spark.yarn.dist.files") should include regex (".*file1.txt,.*file2.txt")
sysProps("spark.yarn.dist.archives") should include regex (".*archive1.txt,.*archive2.txt")
sysProps("spark.app.name") should be ("beauty")
sysProps("spark.ui.enabled") should be ("false")
sysProps("SPARK_SUBMIT") should be ("true")
}
test("handles YARN client mode") {
val clArgs = Seq(
"--deploy-mode", "client",
"--master", "yarn",
"--executor-memory", "5g",
"--executor-cores", "5",
"--class", "org.SomeClass",
"--jars", "one.jar,two.jar,three.jar",
"--driver-memory", "4g",
"--queue", "thequeue",
"--files", "file1.txt,file2.txt",
"--archives", "archive1.txt,archive2.txt",
"--num-executors", "6",
"--name", "trill",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, sysProps, mainClass, _, _) = prepareSubmitEnvironment(appArgs)
childArgs.mkString(" ") should be ("arg1 arg2")
mainClass should be ("org.SomeClass")
classpath should have length (4)
classpath(0) should endWith ("thejar.jar")
classpath(1) should endWith ("one.jar")
classpath(2) should endWith ("two.jar")
classpath(3) should endWith ("three.jar")
sysProps("spark.app.name") should be ("trill")
sysProps("spark.executor.memory") should be ("5g")
sysProps("spark.executor.cores") should be ("5")
sysProps("spark.yarn.queue") should be ("thequeue")
sysProps("spark.executor.instances") should be ("6")
sysProps("spark.yarn.dist.files") should include regex (".*file1.txt,.*file2.txt")
sysProps("spark.yarn.dist.archives") should include regex (".*archive1.txt,.*archive2.txt")
sysProps("spark.yarn.dist.jars") should include
regex (".*one.jar,.*two.jar,.*three.jar,.*thejar.jar")
sysProps("SPARK_SUBMIT") should be ("true")
sysProps("spark.ui.enabled") should be ("false")
}
test("handles standalone cluster mode") {
testStandaloneCluster(useRest = true)
}
test("handles legacy standalone cluster mode") {
testStandaloneCluster(useRest = false)
}
/**
* Test whether the launch environment is correctly set up in standalone cluster mode.
* @param useRest whether to use the REST submission gateway introduced in Spark 1.3
*/
private def testStandaloneCluster(useRest: Boolean): Unit = {
val clArgs = Seq(
"--deploy-mode", "cluster",
"--master", "spark://h:p",
"--class", "org.SomeClass",
"--supervise",
"--driver-memory", "4g",
"--driver-cores", "5",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.useRest = useRest
val (childArgs, classpath, sysProps, mainClass, _, _) = prepareSubmitEnvironment(appArgs)
val childArgsStr = childArgs.mkString(" ")
if (useRest) {
childArgsStr should endWith ("thejar.jar org.SomeClass arg1 arg2")
mainClass should be ("org.apache.spark.deploy.rest.RestSubmissionClient")
} else {
childArgsStr should startWith ("--supervise --memory 4g --cores 5")
childArgsStr should include regex "launch spark://h:p .*thejar.jar org.SomeClass arg1 arg2"
mainClass should be ("org.apache.spark.deploy.Client")
}
classpath should have size 0
sysProps should have size 9
sysProps.keys should contain ("SPARK_SUBMIT")
sysProps.keys should contain ("spark.master")
sysProps.keys should contain ("spark.app.name")
sysProps.keys should contain ("spark.jars")
sysProps.keys should contain ("spark.driver.memory")
sysProps.keys should contain ("spark.driver.cores")
sysProps.keys should contain ("spark.driver.supervise")
sysProps.keys should contain ("spark.ui.enabled")
sysProps.keys should contain ("spark.submit.deployMode")
sysProps("spark.ui.enabled") should be ("false")
}
test("handles standalone client mode") {
val clArgs = Seq(
"--deploy-mode", "client",
"--master", "spark://h:p",
"--executor-memory", "5g",
"--total-executor-cores", "5",
"--class", "org.SomeClass",
"--driver-memory", "4g",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, sysProps, mainClass, _, _) = prepareSubmitEnvironment(appArgs)
childArgs.mkString(" ") should be ("arg1 arg2")
mainClass should be ("org.SomeClass")
classpath should have length (1)
classpath(0) should endWith ("thejar.jar")
sysProps("spark.executor.memory") should be ("5g")
sysProps("spark.cores.max") should be ("5")
sysProps("spark.ui.enabled") should be ("false")
}
test("handles mesos client mode") {
val clArgs = Seq(
"--deploy-mode", "client",
"--master", "mesos://h:p",
"--executor-memory", "5g",
"--total-executor-cores", "5",
"--class", "org.SomeClass",
"--driver-memory", "4g",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, sysProps, mainClass, _, _) = prepareSubmitEnvironment(appArgs)
childArgs.mkString(" ") should be ("arg1 arg2")
mainClass should be ("org.SomeClass")
classpath should have length (1)
classpath(0) should endWith ("thejar.jar")
sysProps("spark.executor.memory") should be ("5g")
sysProps("spark.cores.max") should be ("5")
sysProps("spark.ui.enabled") should be ("false")
}
test("handles confs with flag equivalents") {
val clArgs = Seq(
"--deploy-mode", "cluster",
"--executor-memory", "5g",
"--class", "org.SomeClass",
"--conf", "spark.executor.memory=4g",
"--conf", "spark.master=yarn",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (_, _, sysProps, mainClass, _, _) = prepareSubmitEnvironment(appArgs)
sysProps("spark.executor.memory") should be ("5g")
sysProps("spark.master") should be ("yarn")
sysProps("spark.submit.deployMode") should be ("cluster")
mainClass should be ("org.apache.spark.deploy.yarn.Client")
}
test("launch simple application with spark-submit") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SimpleApplicationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
unusedJar.toString)
runSparkSubmit(args)
}
test("launch simple application with spark-submit with redaction") {
val testDir = Utils.createTempDir()
testDir.deleteOnExit()
val testDirPath = new Path(testDir.getAbsolutePath())
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val fileSystem = Utils.getHadoopFileSystem("/",
SparkHadoopUtil.get.newConfiguration(new SparkConf()))
try {
val args = Seq(
"--class", SimpleApplicationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", "spark.executorEnv.HADOOP_CREDSTORE_PASSWORD=secret_password",
"--conf", "spark.eventLog.enabled=true",
"--conf", "spark.eventLog.testing=true",
"--conf", s"spark.eventLog.dir=${testDirPath.toUri.toString}",
"--conf", "spark.hadoop.fs.defaultFS=unsupported://example.com",
unusedJar.toString)
runSparkSubmit(args)
val listStatus = fileSystem.listStatus(testDirPath)
val logData = EventLoggingListener.openEventLog(listStatus.last.getPath, fileSystem)
Source.fromInputStream(logData).getLines().foreach { line =>
assert(!line.contains("secret_password"))
}
} finally {
Utils.deleteRecursively(testDir)
}
}
test("includes jars passed in through --jars") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", JarCreationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
// SPARK-7287
test("includes jars passed in through --packages") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val main = MavenCoordinate("my.great.lib", "mylib", "0.1")
val dep = MavenCoordinate("my.great.dep", "mylib", "0.1")
IvyTestUtils.withRepository(main, Some(dep.toString), None) { repo =>
val args = Seq(
"--class", JarCreationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local-cluster[2,1,1024]",
"--packages", Seq(main, dep).mkString(","),
"--repositories", repo,
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
unusedJar.toString,
"my.great.lib.MyLib", "my.great.dep.MyLib")
runSparkSubmit(args)
}
}
// TODO(SPARK-9603): Building a package is flaky on Jenkins Maven builds.
// See https://gist.github.com/shivaram/3a2fecce60768a603dac for a error log
ignore("correctly builds R packages included in a jar with --packages") {
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
// Check if the SparkR package is installed
assume(RUtils.isSparkRInstalled, "SparkR is not installed in this build.")
val main = MavenCoordinate("my.great.lib", "mylib", "0.1")
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
val rScriptDir = Seq(
sparkHome, "R", "pkg", "tests", "fulltests", "packageInAJarTest.R").mkString(File.separator)
assert(new File(rScriptDir).exists)
IvyTestUtils.withRepository(main, None, None, withR = true) { repo =>
val args = Seq(
"--name", "testApp",
"--master", "local-cluster[2,1,1024]",
"--packages", main.toString,
"--repositories", repo,
"--verbose",
"--conf", "spark.ui.enabled=false",
rScriptDir)
runSparkSubmit(args)
}
}
test("include an external JAR in SparkR") {
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
// Check if the SparkR package is installed
assume(RUtils.isSparkRInstalled, "SparkR is not installed in this build.")
val rScriptDir =
Seq(sparkHome, "R", "pkg", "tests", "fulltests", "jarTest.R").mkString(File.separator)
assert(new File(rScriptDir).exists)
// compile a small jar containing a class that will be called from R code.
val tempDir = Utils.createTempDir()
val srcDir = new File(tempDir, "sparkrtest")
srcDir.mkdirs()
val excSource = new JavaSourceFromString(new File(srcDir, "DummyClass").toURI.getPath,
"""package sparkrtest;
|
|public class DummyClass implements java.io.Serializable {
| public static String helloWorld(String arg) { return "Hello " + arg; }
| public static int addStuff(int arg1, int arg2) { return arg1 + arg2; }
|}
""".stripMargin)
val excFile = TestUtils.createCompiledClass("DummyClass", srcDir, excSource, Seq.empty)
val jarFile = new File(tempDir, "sparkRTestJar-%s.jar".format(System.currentTimeMillis()))
val jarURL = TestUtils.createJar(Seq(excFile), jarFile, directoryPrefix = Some("sparkrtest"))
val args = Seq(
"--name", "testApp",
"--master", "local",
"--jars", jarURL.toString,
"--verbose",
"--conf", "spark.ui.enabled=false",
rScriptDir)
runSparkSubmit(args)
}
test("resolves command line argument paths correctly") {
val jars = "/jar1,/jar2" // --jars
val files = "local:/file1,file2" // --files
val archives = "file:/archive1,archive2" // --archives
val pyFiles = "py-file1,py-file2" // --py-files
// Test jars and files
val clArgs = Seq(
"--master", "local",
"--class", "org.SomeClass",
"--jars", jars,
"--files", files,
"thejar.jar")
val appArgs = new SparkSubmitArguments(clArgs)
val sysProps = SparkSubmit.prepareSubmitEnvironment(appArgs)._3
appArgs.jars should be (Utils.resolveURIs(jars))
appArgs.files should be (Utils.resolveURIs(files))
sysProps("spark.jars") should be (Utils.resolveURIs(jars + ",thejar.jar"))
sysProps("spark.files") should be (Utils.resolveURIs(files))
// Test files and archives (Yarn)
val clArgs2 = Seq(
"--master", "yarn",
"--class", "org.SomeClass",
"--files", files,
"--archives", archives,
"thejar.jar"
)
val appArgs2 = new SparkSubmitArguments(clArgs2)
val sysProps2 = SparkSubmit.prepareSubmitEnvironment(appArgs2)._3
appArgs2.files should be (Utils.resolveURIs(files))
appArgs2.archives should be (Utils.resolveURIs(archives))
sysProps2("spark.yarn.dist.files") should be (Utils.resolveURIs(files))
sysProps2("spark.yarn.dist.archives") should be (Utils.resolveURIs(archives))
// Test python files
val clArgs3 = Seq(
"--master", "local",
"--py-files", pyFiles,
"--conf", "spark.pyspark.driver.python=python3.4",
"--conf", "spark.pyspark.python=python3.5",
"mister.py"
)
val appArgs3 = new SparkSubmitArguments(clArgs3)
val sysProps3 = SparkSubmit.prepareSubmitEnvironment(appArgs3)._3
appArgs3.pyFiles should be (Utils.resolveURIs(pyFiles))
sysProps3("spark.submit.pyFiles") should be (
PythonRunner.formatPaths(Utils.resolveURIs(pyFiles)).mkString(","))
sysProps3(PYSPARK_DRIVER_PYTHON.key) should be ("python3.4")
sysProps3(PYSPARK_PYTHON.key) should be ("python3.5")
}
test("resolves config paths correctly") {
val jars = "/jar1,/jar2" // spark.jars
val files = "local:/file1,file2" // spark.files / spark.yarn.dist.files
val archives = "file:/archive1,archive2" // spark.yarn.dist.archives
val pyFiles = "py-file1,py-file2" // spark.submit.pyFiles
val tmpDir = Utils.createTempDir()
// Test jars and files
val f1 = File.createTempFile("test-submit-jars-files", "", tmpDir)
val writer1 = new PrintWriter(f1)
writer1.println("spark.jars " + jars)
writer1.println("spark.files " + files)
writer1.close()
val clArgs = Seq(
"--master", "local",
"--class", "org.SomeClass",
"--properties-file", f1.getPath,
"thejar.jar"
)
val appArgs = new SparkSubmitArguments(clArgs)
val sysProps = SparkSubmit.prepareSubmitEnvironment(appArgs)._3
sysProps("spark.jars") should be(Utils.resolveURIs(jars + ",thejar.jar"))
sysProps("spark.files") should be(Utils.resolveURIs(files))
// Test files and archives (Yarn)
val f2 = File.createTempFile("test-submit-files-archives", "", tmpDir)
val writer2 = new PrintWriter(f2)
writer2.println("spark.yarn.dist.files " + files)
writer2.println("spark.yarn.dist.archives " + archives)
writer2.close()
val clArgs2 = Seq(
"--master", "yarn",
"--class", "org.SomeClass",
"--properties-file", f2.getPath,
"thejar.jar"
)
val appArgs2 = new SparkSubmitArguments(clArgs2)
val sysProps2 = SparkSubmit.prepareSubmitEnvironment(appArgs2)._3
sysProps2("spark.yarn.dist.files") should be(Utils.resolveURIs(files))
sysProps2("spark.yarn.dist.archives") should be(Utils.resolveURIs(archives))
// Test python files
val f3 = File.createTempFile("test-submit-python-files", "", tmpDir)
val writer3 = new PrintWriter(f3)
writer3.println("spark.submit.pyFiles " + pyFiles)
writer3.close()
val clArgs3 = Seq(
"--master", "local",
"--properties-file", f3.getPath,
"mister.py"
)
val appArgs3 = new SparkSubmitArguments(clArgs3)
val sysProps3 = SparkSubmit.prepareSubmitEnvironment(appArgs3)._3
sysProps3("spark.submit.pyFiles") should be(
PythonRunner.formatPaths(Utils.resolveURIs(pyFiles)).mkString(","))
// Test remote python files
val f4 = File.createTempFile("test-submit-remote-python-files", "", tmpDir)
val writer4 = new PrintWriter(f4)
val remotePyFiles = "hdfs:///tmp/file1.py,hdfs:///tmp/file2.py"
writer4.println("spark.submit.pyFiles " + remotePyFiles)
writer4.close()
val clArgs4 = Seq(
"--master", "yarn",
"--deploy-mode", "cluster",
"--properties-file", f4.getPath,
"hdfs:///tmp/mister.py"
)
val appArgs4 = new SparkSubmitArguments(clArgs4)
val sysProps4 = SparkSubmit.prepareSubmitEnvironment(appArgs4)._3
// Should not format python path for yarn cluster mode
sysProps4("spark.submit.pyFiles") should be(
Utils.resolveURIs(remotePyFiles)
)
}
test("user classpath first in driver") {
val systemJar = TestUtils.createJarWithFiles(Map("test.resource" -> "SYSTEM"))
val userJar = TestUtils.createJarWithFiles(Map("test.resource" -> "USER"))
val args = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
"--conf", "spark.driver.extraClassPath=" + systemJar,
"--conf", "spark.driver.userClassPathFirst=true",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
userJar.toString)
runSparkSubmit(args)
}
test("SPARK_CONF_DIR overrides spark-defaults.conf") {
forConfDir(Map("spark.executor.memory" -> "2.3g")) { path =>
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SimpleApplicationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
unusedJar.toString)
val appArgs = new SparkSubmitArguments(args, Map("SPARK_CONF_DIR" -> path))
assert(appArgs.propertiesFile != null)
assert(appArgs.propertiesFile.startsWith(path))
appArgs.executorMemory should be ("2.3g")
}
}
test("comma separated list of files are unioned correctly") {
val left = Option("/tmp/a.jar,/tmp/b.jar")
val right = Option("/tmp/c.jar,/tmp/a.jar")
val emptyString = Option("")
Utils.unionFileLists(left, right) should be (Set("/tmp/a.jar", "/tmp/b.jar", "/tmp/c.jar"))
Utils.unionFileLists(emptyString, emptyString) should be (Set.empty)
Utils.unionFileLists(Option("/tmp/a.jar"), emptyString) should be (Set("/tmp/a.jar"))
Utils.unionFileLists(emptyString, Option("/tmp/a.jar")) should be (Set("/tmp/a.jar"))
Utils.unionFileLists(None, Option("/tmp/a.jar")) should be (Set("/tmp/a.jar"))
Utils.unionFileLists(Option("/tmp/a.jar"), None) should be (Set("/tmp/a.jar"))
}
// scalastyle:on println
private def checkDownloadedFile(sourcePath: String, outputPath: String): Unit = {
if (sourcePath == outputPath) {
return
}
val sourceUri = new URI(sourcePath)
val outputUri = new URI(outputPath)
assert(outputUri.getScheme === "file")
// The path and filename are preserved.
assert(outputUri.getPath.endsWith(sourceUri.getPath))
assert(FileUtils.readFileToString(new File(outputUri.getPath)) ===
FileUtils.readFileToString(new File(sourceUri.getPath)))
}
private def deleteTempOutputFile(outputPath: String): Unit = {
val outputFile = new File(new URI(outputPath).getPath)
if (outputFile.exists) {
outputFile.delete()
}
}
test("downloadFile - invalid url") {
intercept[IOException] {
SparkSubmit.downloadFile("abc:/my/file", new Configuration())
}
}
test("downloadFile - file doesn't exist") {
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
intercept[FileNotFoundException] {
SparkSubmit.downloadFile("s3a:/no/such/file", hadoopConf)
}
}
test("downloadFile does not download local file") {
// empty path is considered as local file.
assert(SparkSubmit.downloadFile("", new Configuration()) === "")
assert(SparkSubmit.downloadFile("/local/file", new Configuration()) === "/local/file")
}
test("download one file to local") {
val jarFile = File.createTempFile("test", ".jar")
jarFile.deleteOnExit()
val content = "hello, world"
FileUtils.write(jarFile, content)
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
val sourcePath = s"s3a://${jarFile.getAbsolutePath}"
val outputPath = SparkSubmit.downloadFile(sourcePath, hadoopConf)
checkDownloadedFile(sourcePath, outputPath)
deleteTempOutputFile(outputPath)
}
test("download list of files to local") {
val jarFile = File.createTempFile("test", ".jar")
jarFile.deleteOnExit()
val content = "hello, world"
FileUtils.write(jarFile, content)
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
val sourcePaths = Seq("/local/file", s"s3a://${jarFile.getAbsolutePath}")
val outputPaths = SparkSubmit.downloadFileList(sourcePaths.mkString(","), hadoopConf).split(",")
assert(outputPaths.length === sourcePaths.length)
sourcePaths.zip(outputPaths).foreach { case (sourcePath, outputPath) =>
checkDownloadedFile(sourcePath, outputPath)
deleteTempOutputFile(outputPath)
}
}
test("Avoid re-upload remote resources in yarn client mode") {
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
val tmpDir = Utils.createTempDir()
val file = File.createTempFile("tmpFile", "", tmpDir)
val pyFile = File.createTempFile("tmpPy", ".egg", tmpDir)
val mainResource = File.createTempFile("tmpPy", ".py", tmpDir)
val tmpJar = TestUtils.createJarWithFiles(Map("test.resource" -> "USER"), tmpDir)
val tmpJarPath = s"s3a://${new File(tmpJar.toURI).getAbsolutePath}"
val args = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripPrefix("$"),
"--name", "testApp",
"--master", "yarn",
"--deploy-mode", "client",
"--jars", tmpJarPath,
"--files", s"s3a://${file.getAbsolutePath}",
"--py-files", s"s3a://${pyFile.getAbsolutePath}",
s"s3a://$mainResource"
)
val appArgs = new SparkSubmitArguments(args)
val sysProps = SparkSubmit.prepareSubmitEnvironment(appArgs, Some(hadoopConf))._3
// All the resources should still be remote paths, so that YARN client will not upload again.
sysProps("spark.yarn.dist.jars") should be (tmpJarPath)
sysProps("spark.yarn.dist.files") should be (s"s3a://${file.getAbsolutePath}")
sysProps("spark.yarn.dist.pyFiles") should be (s"s3a://${pyFile.getAbsolutePath}")
// Local repl jars should be a local path.
sysProps("spark.repl.local.jars") should (startWith("file:"))
// local py files should not be a URI format.
sysProps("spark.submit.pyFiles") should (startWith("/"))
}
// NOTE: This is an expensive operation in terms of time (10 seconds+). Use sparingly.
private def runSparkSubmit(args: Seq[String]): Unit = {
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
val sparkSubmitFile = if (Utils.isWindows) {
new File("..\\bin\\spark-submit.cmd")
} else {
new File("../bin/spark-submit")
}
val process = Utils.executeCommand(
Seq(sparkSubmitFile.getCanonicalPath) ++ args,
new File(sparkHome),
Map("SPARK_TESTING" -> "1", "SPARK_HOME" -> sparkHome))
try {
val exitCode = failAfter(60 seconds) { process.waitFor() }
if (exitCode != 0) {
fail(s"Process returned with exit code $exitCode. See the log4j logs for more detail.")
}
} finally {
// Ensure we still kill the process in case it timed out
process.destroy()
}
}
private def forConfDir(defaults: Map[String, String]) (f: String => Unit) = {
val tmpDir = Utils.createTempDir()
val defaultsConf = new File(tmpDir.getAbsolutePath, "spark-defaults.conf")
val writer = new OutputStreamWriter(new FileOutputStream(defaultsConf), StandardCharsets.UTF_8)
for ((key, value) <- defaults) writer.write(s"$key $value\n")
writer.close()
try {
f(tmpDir.getAbsolutePath)
} finally {
Utils.deleteRecursively(tmpDir)
}
}
private def updateConfWithFakeS3Fs(conf: Configuration): Unit = {
conf.set("fs.s3a.impl", classOf[TestFileSystem].getCanonicalName)
conf.set("fs.s3a.impl.disable.cache", "true")
}
}
object JarCreationTest extends Logging {
def main(args: Array[String]) {
Utils.configTestLog4j("INFO")
val conf = new SparkConf()
val sc = new SparkContext(conf)
val result = sc.makeRDD(1 to 100, 10).mapPartitions { x =>
var exception: String = null
try {
Utils.classForName(args(0))
Utils.classForName(args(1))
} catch {
case t: Throwable =>
exception = t + "\n" + Utils.exceptionString(t)
exception = exception.replaceAll("\n", "\n\t")
}
Option(exception).toSeq.iterator
}.collect()
if (result.nonEmpty) {
throw new Exception("Could not load user class from jar:\n" + result(0))
}
sc.stop()
}
}
object SimpleApplicationTest {
def main(args: Array[String]) {
Utils.configTestLog4j("INFO")
val conf = new SparkConf()
val sc = new SparkContext(conf)
val configs = Seq("spark.master", "spark.app.name")
for (config <- configs) {
val masterValue = conf.get(config)
val executorValues = sc
.makeRDD(1 to 100, 10)
.map(x => SparkEnv.get.conf.get(config))
.collect()
.distinct
if (executorValues.size != 1) {
throw new SparkException(s"Inconsistent values for $config: $executorValues")
}
val executorValue = executorValues(0)
if (executorValue != masterValue) {
throw new SparkException(
s"Master had $config=$masterValue but executor had $config=$executorValue")
}
}
sc.stop()
}
}
object UserClasspathFirstTest {
def main(args: Array[String]) {
val ccl = Thread.currentThread().getContextClassLoader()
val resource = ccl.getResourceAsStream("test.resource")
val bytes = ByteStreams.toByteArray(resource)
val contents = new String(bytes, 0, bytes.length, StandardCharsets.UTF_8)
if (contents != "USER") {
throw new SparkException("Should have read user resource, but instead read: " + contents)
}
}
}
class TestFileSystem extends org.apache.hadoop.fs.LocalFileSystem {
override def copyToLocalFile(src: Path, dst: Path): Unit = {
// Ignore the scheme for testing.
super.copyToLocalFile(new Path(src.toUri.getPath), dst)
}
override def globStatus(pathPattern: Path): Array[FileStatus] = {
val newPath = new Path(pathPattern.toUri.getPath)
super.globStatus(newPath).map { status =>
val path = s"s3a://${status.getPath.toUri.getPath}"
status.setPath(new Path(path))
status
}
}
}
|
jlopezmalla/spark
|
core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
|
Scala
|
apache-2.0
| 35,985 |
package io.github.electricmind
import java.io.ByteArrayInputStream
import io.github.electricmind.autocomplete._
import org.scalatest.{FlatSpec, Matchers}
class testVocabulary extends FlatSpec with Matchers {
val sample = """
Only a few flies was flying around a corpse at the morning.
"""
def samplestream = new ByteArrayInputStream(sample.getBytes);
"A vocabulary" should "be created" in {
Vocabulary(samplestream) should have size 12
Vocabulary(samplestream) should contain allOf("Only", "a", "at")
}
}
|
electricmind/autocomplete
|
src/test/scala/autocomplete/testVocabulary.scala
|
Scala
|
apache-2.0
| 553 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ksmpartners.ernie.server
import net.liftweb.http._
import service.ServiceRegistry
import RestGenerator._
import ErnieRequestTemplates._
import net.liftweb.json._
/**
* Object containing the stateless dispatch definition for an ernie server.
* Generates a dispatch table using the resource tree defined in the api value.
*/
object DispatchRestAPI extends RestGenerator with JsonTranslator {
case class TimeoutResponse() extends LiftResponse with HeaderDefaults {
def toResponse = InMemoryResponse(Array(), headers, cookies, 504)
}
/**
* Wrap a TimeoutResponse and message with in an ErnieError case class
*/
def timeoutErnieError(src: String = null): ErnieError = ErnieError(TimeoutResponse(), Some(new java.util.concurrent.TimeoutException(if (src != null) src + " timed out" else "Timeout")))
/**
* Shutdown the ernie server
*/
def shutdown() {
ServiceRegistry.shutDown
}
/**
* Set this variable for BASIC HTTP authentication.
* PartialFunction should:
* 1. Attempt to authenticate the user
* 1. If successful, populate userRoles RequestVar with roles (see [[com.ksmpartners.ernie.server.filter.SAMLConstants.]]).
* 1. Return the result of authentication as a Boolean
* For example:
* {{{
* basicAuthentication = ({
* case (user:String, pass:String, req:Req) =>
* MyUserCollection.getUser(user, pass).map( u => {
* userRoles(u.getRoles)
* true
* }) getOrElse false
* })
* }}}
*/
var basicAuthentication: PartialFunction[(String, String, Req), Boolean] = PartialFunction.empty[(String, String, Req), Boolean]
val reportDetail = Resource(Left("detail"), "Report details", false, List(getReportDetail, headReportDetail))
val jobResult = Resource(Left("result"), "Job results", false, List(getJobResult, headJobResult, deleteJobResult), reportDetail)
val jobStatus = Resource(Left("status"), "Job status", false, List(getJobStatus, headJobStatus))
val job = Resource(Right(Variable("job_id")), "Job resource", false, List(getJob, headJob), jobStatus, jobResult)
val expiredCatalog = Resource(Left("expired"), "Expired catalog", false, List(purgeExpired, getExpiredCatalog, headExpiredCatalog))
val failedCatalog = Resource(Left("failed"), "Failed catalog", false, List(getFailedCatalog, headFailedCatalog))
val deletedCatalog = Resource(Left("deleted"), "Deleted catalog", false, List(getDeletedCatalog, headDeletedCatalog))
val completeCatalog = Resource(Left("complete"), "Complete catalog", false, List(getCompleteCatalog, headCompleteCatalog))
val jobsCatalog = Resource(Left("catalog"), "Full catalog", false, List(getCatalog, headCatalog))
val jobsSwagger = Resource(Left("jobsapi"), "Jobs JSON", false, jobsJSON :: Nil)
val jobs = Resource(Left("jobs"), "Jobs api", true, List(getJobsList, headJobsList, postJob), job, jobsCatalog, completeCatalog, expiredCatalog, failedCatalog, deletedCatalog)
val design = Resource(Left("rptdesign"), "Definition rptdesign", false, List(putDesign))
val defi = Resource(Right(Variable("def_id")), "Definition resource", false, List(getDef, headDef, deleteDef), design)
val defsSwagger = Resource(Left("defsapi"), "Defs JSON", false, defsJSON :: Nil)
val defs = Resource(Left("defs"), "Definitions api", true, List(getDefs, headDefs, postDef), defi)
val swagger = Resource(Left("resources"), "Resources JSON", false, resourcesJSON :: Nil)
protected val api = jobs :: defs :: swagger :: jobsSwagger :: defsSwagger :: Nil
var jobsAPI: JObject = null
var defsAPI: JObject = null
var resourceListing: JObject = null
/**
* Initialize the ServiceRegistry and serve the API.
*/
def init() {
ServiceRegistry.init()
jobsAPI = SwaggerUtils.buildSwaggerApi(".1", "1.1", "http://localhost:8080", jobs)
defsAPI = SwaggerUtils.buildSwaggerApi(".1", "1.1", "http://localhost:8080", defs)
resourceListing = SwaggerUtils.buildSwaggerResourceListing(List(jobs, defs), ".1", "1.1", "http://localhost:8080")
super.serveApi()
}
}
|
ksmpartners/ernie
|
ernie-server/src/main/scala/com/ksmpartners/ernie/server/DispatchRestAPI.scala
|
Scala
|
apache-2.0
| 4,634 |
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import org.scalatest._
import events.TestFailed
import org.scalatest.exceptions.DuplicateTestNameException
class FixturePropSpecSpec extends org.scalatest.FunSpec with PrivateMethodTester with SharedHelpers {
describe("A FixturePropSpec") {
it("should return the test names in order of registration from testNames") {
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("that") { fixture =>
}
property("this") { fixture =>
}
}
assertResult(List("that", "this")) {
a.testNames.iterator.toList
}
val b = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
}
assertResult(List[String]()) {
b.testNames.iterator.toList
}
val c = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("this") { fixture =>
}
property("that") { fixture =>
}
}
assertResult(List("this", "that")) {
c.testNames.iterator.toList
}
}
it("should throw NotAllowedException if a duplicate test name registration is attempted") {
intercept[DuplicateTestNameException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("test this") { fixture =>
}
property("test this") { fixture =>
}
}
}
intercept[DuplicateTestNameException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("test this") { fixture =>
}
ignore("test this") { fixture =>
}
}
}
intercept[DuplicateTestNameException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
ignore("test this") { fixture =>
}
ignore("test this") { fixture =>
}
}
}
intercept[DuplicateTestNameException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
ignore("test this") { fixture =>
}
property("test this") { fixture =>
}
}
}
}
it("should pass in the fixture to every test method") {
val a = new FixturePropSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest): Outcome = {
test(hello)
}
property("this") { fixture =>
assert(fixture === hello)
}
property("that") { fixture =>
assert(fixture === hello)
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
assert(!rep.eventsReceived.exists(_.isInstanceOf[TestFailed]))
}
it("should throw NullPointerException if a null test tag is provided") {
// test
intercept[NullPointerException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("hi", null) { fixture => }
}
}
val caught = intercept[NullPointerException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("hi", mytags.SlowAsMolasses, null) { fixture => }
}
}
assert(caught.getMessage === "a test tag was null")
intercept[NullPointerException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) { fixture => }
}
}
// ignore
intercept[NullPointerException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
ignore("hi", null) { fixture => }
}
}
val caught2 = intercept[NullPointerException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
ignore("hi", mytags.SlowAsMolasses, null) { fixture => }
}
}
assert(caught2.getMessage === "a test tag was null")
intercept[NullPointerException] {
new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
ignore("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) { fixture => }
}
}
}
it("should return a correct tags map from the tags method") {
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
ignore("test this") { fixture => }
property("test that") { fixture => }
}
assertResult(Map("test this" -> Set("org.scalatest.Ignore"))) {
a.tags
}
val b = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("test this") { fixture => }
ignore("test that") { fixture => }
}
assertResult(Map("test that" -> Set("org.scalatest.Ignore"))) {
b.tags
}
val c = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
ignore("test this") { fixture => }
ignore("test that") { fixture => }
}
assertResult(Map("test this" -> Set("org.scalatest.Ignore"), "test that" -> Set("org.scalatest.Ignore"))) {
c.tags
}
val d = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("test this", mytags.SlowAsMolasses) { fixture => }
ignore("test that", mytags.SlowAsMolasses) { fixture => }
}
assertResult(Map("test this" -> Set("org.scalatest.SlowAsMolasses"), "test that" -> Set("org.scalatest.Ignore", "org.scalatest.SlowAsMolasses"))) {
d.tags
}
val e = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
}
assertResult(Map()) {
e.tags
}
val f = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = Succeeded
property("test this", mytags.SlowAsMolasses, mytags.WeakAsAKitten) { fixture => }
property("test that", mytags.SlowAsMolasses) { fixture => }
}
assertResult(Map("test this" -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), "test that" -> Set("org.scalatest.SlowAsMolasses"))) {
f.tags
}
}
class TestWasCalledSuite extends FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
property("this") { fixture => theTestThisCalled = true }
property("that") { fixture => theTestThatCalled = true }
}
it("should execute all tests when run is called with testName None") {
val b = new TestWasCalledSuite
b.run(None, Args(SilentReporter))
assert(b.theTestThisCalled)
assert(b.theTestThatCalled)
}
it("should execute one test when run is called with a defined testName") {
val a = new TestWasCalledSuite
a.run(Some("this"), Args(SilentReporter))
assert(a.theTestThisCalled)
assert(!a.theTestThatCalled)
}
it("should report as ignored, and not run, tests marked ignored") {
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
property("test this") { fixture => theTestThisCalled = true }
property("test that") { fixture => theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
val b = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { fixture => theTestThisCalled = true }
property("test that") { fixture => theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB))
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!b.theTestThisCalled)
assert(b.theTestThatCalled)
val c = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
property("test this") { fixture => theTestThisCalled = true }
ignore("test that") { fixture => theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repC))
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(c.theTestThisCalled)
assert(!c.theTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
val d = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { fixture => theTestThisCalled = true }
ignore("test that") { fixture => theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD))
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!d.theTestThisCalled)
assert(!d.theTestThatCalled)
}
it("should ignore a test marked as ignored if run is invoked with that testName") {
// If I provide a specific testName to run, then it should ignore an Ignore on that test
// method and actually invoke it.
val e = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { fixture => theTestThisCalled = true }
property("test that") { fixture => theTestThatCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(Some("test this"), Args(repE))
assert(repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(!e.theTestThatCalled)
}
it("should run only those tests selected by the tags to include and exclude sets") {
// Nothing is excluded
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
property("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
property("test that") { fixture => theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
val b = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
property("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
property("test that") { fixture => theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repB.testIgnoredReceived)
assert(b.theTestThisCalled)
assert(!b.theTestThatCalled)
// SlowAsMolasses is included, and both tests should be included
val c = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
property("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repC.testIgnoredReceived)
assert(c.theTestThisCalled)
assert(c.theTestThatCalled)
// SlowAsMolasses is included. both tests should be included but one ignored
val d = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repD.testIgnoredReceived)
assert(!d.theTestThisCalled)
assert(d.theTestThatCalled)
// SlowAsMolasses included, FastAsLight excluded
val e = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
property("test the other") { fixture => theTestTheOtherCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(None, Args(repE, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(e.theTestThatCalled)
assert(!e.theTestTheOtherCalled)
// An Ignored test that was both included and excluded should not generate a TestIgnored event
val f = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
property("test the other") { fixture => theTestTheOtherCalled = true }
}
val repF = new TestIgnoredTrackingReporter
f.run(None, Args(repF, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repF.testIgnoredReceived)
assert(!f.theTestThisCalled)
assert(f.theTestThatCalled)
assert(!f.theTestTheOtherCalled)
// An Ignored test that was not included should not generate a TestIgnored event
val g = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
ignore("test the other") { fixture => theTestTheOtherCalled = true }
}
val repG = new TestIgnoredTrackingReporter
g.run(None, Args(repG, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repG.testIgnoredReceived)
assert(!g.theTestThisCalled)
assert(g.theTestThatCalled)
assert(!g.theTestTheOtherCalled)
// No tagsToInclude set, FastAsLight excluded
val h = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
property("test the other") { fixture => theTestTheOtherCalled = true }
}
val repH = new TestIgnoredTrackingReporter
h.run(None, Args(repH, Stopper.default, Filter(None, Set("org.scalatest.FastAsLight")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repH.testIgnoredReceived)
assert(!h.theTestThisCalled)
assert(h.theTestThatCalled)
assert(h.theTestTheOtherCalled)
// No tagsToInclude set, SlowAsMolasses excluded
val i = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
property("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
property("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
property("test the other") { fixture => theTestTheOtherCalled = true }
}
val repI = new TestIgnoredTrackingReporter
i.run(None, Args(repI, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!i.theTestThisCalled)
assert(!i.theTestThatCalled)
assert(i.theTestTheOtherCalled)
// No tagsToInclude set, SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
val j = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
property("test the other") { fixture => theTestTheOtherCalled = true }
}
val repJ = new TestIgnoredTrackingReporter
j.run(None, Args(repJ, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!j.theTestThisCalled)
assert(!j.theTestThatCalled)
assert(j.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
val k = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
ignore("test the other") { fixture => theTestTheOtherCalled = true }
}
val repK = new TestIgnoredTrackingReporter
k.run(None, Args(repK, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repK.testIgnoredReceived)
assert(!k.theTestThisCalled)
assert(!k.theTestThatCalled)
assert(!k.theTestTheOtherCalled)
}
it("should return the correct test count from its expectedTestCount method") {
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("test this") { fixture => }
property("test that") { fixture => }
}
assert(a.expectedTestCount(Filter()) === 2)
val b = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
ignore("test this") { fixture => }
property("test that") { fixture => }
}
assert(b.expectedTestCount(Filter()) === 1)
val c = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("test this", mytags.FastAsLight) { fixture => }
property("test that") { fixture => }
}
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
val d = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("test this", mytags.FastAsLight, mytags.SlowAsMolasses) { fixture => }
property("test that", mytags.SlowAsMolasses) { fixture => }
property("test the other thing") { fixture => }
}
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(d.expectedTestCount(Filter()) === 3)
val e = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("test this", mytags.FastAsLight, mytags.SlowAsMolasses) { fixture => }
property("test that", mytags.SlowAsMolasses) { fixture => }
ignore("test the other thing") { fixture => }
}
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 0)
assert(e.expectedTestCount(Filter()) === 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) === 10)
}
it("should generate a TestPending message when the test body is (pending)") {
val a = new FixturePropSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest): Outcome = {
test(hello)
}
property("should do this") (pending)
property("should do that") { fixture =>
assert(fixture === hello)
}
property("should do something else") { fixture =>
assert(fixture === hello)
pending
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tp = rep.testPendingEventsReceived
assert(tp.size === 2)
}
it("should allow tests without fixtures to be combined with tests with fixtures") {
val a = new FixturePropSpec {
var theTestWithFixtureWasRun = false
var theTestWithoutFixtureWasRun = false
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest): Outcome = {
test(hello)
}
property("should do this") (pending)
property("should do that") { fixture =>
assert(fixture === hello)
theTestWithFixtureWasRun = true
}
property("should do something else") { fixture =>
assert(fixture === hello)
pending
}
property("should do that without a fixture") { () =>
assert(2 + 2 === 4)
theTestWithoutFixtureWasRun = true
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tp = rep.testPendingEventsReceived
assert(tp.size === 2)
assert(a.theTestWithFixtureWasRun)
assert(a.theTestWithoutFixtureWasRun)
}
it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError") {
val a = new FixturePropSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest): Outcome = {
test(hello)
}
property("throws AssertionError") { s => throw new AssertionError }
property("throws plain old Error") { s => throw new Error }
property("throws Throwable") { s => throw new Throwable }
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
it("should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than " +
"AssertionError, causing Suites and Runs to abort.") {
val a = new FixturePropSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest): Outcome = {
test(hello)
}
property("throws AssertionError") { s => throw new OutOfMemoryError }
}
intercept[OutOfMemoryError] {
a.run(None, Args(SilentReporter))
}
}
it("should allow both tests that take fixtures and tests that don't") {
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = {
test("Hello, world!")
}
var takesNoArgsInvoked = false
property("take no args") { () => takesNoArgsInvoked = true }
var takesAFixtureInvoked = false
property("takes a fixture") { s => takesAFixtureInvoked = true }
}
a.run(None, Args(SilentReporter))
assert(a.testNames.size === 2, a.testNames)
assert(a.takesNoArgsInvoked)
assert(a.takesAFixtureInvoked)
}
it("should work with test functions whose inferred result type is not Unit") {
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = {
test("Hello, world!")
}
var takesNoArgsInvoked = false
property("take no args") { () => takesNoArgsInvoked = true; true }
var takesAFixtureInvoked = false
property("takes a fixture") { s => takesAFixtureInvoked = true; true }
}
assert(!a.takesNoArgsInvoked)
assert(!a.takesAFixtureInvoked)
a.run(None, Args(SilentReporter))
assert(a.testNames.size === 2, a.testNames)
assert(a.takesNoArgsInvoked)
assert(a.takesAFixtureInvoked)
}
it("should work with ignored tests whose inferred result type is not Unit") {
val a = new FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { () => theTestThisCalled = true; "hi" }
ignore("test that") { fixture => theTestThatCalled = true; 42 }
}
assert(!a.theTestThisCalled)
assert(!a.theTestThatCalled)
val reporter = new EventRecordingReporter
a.run(None, Args(reporter))
assert(reporter.testIgnoredEventsReceived.size === 2)
assert(!a.theTestThisCalled)
assert(!a.theTestThatCalled)
}
it("should pass a NoArgTest to withFixture for tests that take no fixture") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
var aNoArgTestWasPassed = false
var aOneArgTestWasPassed = false
override def withFixture(test: NoArgTest): Outcome = {
aNoArgTestWasPassed = true
Succeeded
}
def withFixture(test: OneArgTest): Outcome = {
aOneArgTestWasPassed = true
Succeeded
}
property("something") { () =>
assert(1 + 1 === 2)
}
}
val s = new MySuite
s.run(None, Args(SilentReporter))
assert(s.aNoArgTestWasPassed)
assert(!s.aOneArgTestWasPassed)
}
it("should not pass a NoArgTest to withFixture for tests that take a Fixture") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
var aNoArgTestWasPassed = false
var aOneArgTestWasPassed = false
override def withFixture(test: NoArgTest): Outcome = {
aNoArgTestWasPassed = true
Succeeded
}
def withFixture(test: OneArgTest): Outcome = {
aOneArgTestWasPassed = true
Succeeded
}
property("something") { fixture =>
assert(1 + 1 === 2)
}
}
val s = new MySuite
s.run(None, Args(SilentReporter))
assert(!s.aNoArgTestWasPassed)
assert(s.aOneArgTestWasPassed)
}
it("should pass a NoArgTest that invokes the no-arg test when the " +
"NoArgTest's no-arg apply method is invoked") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
var theNoArgTestWasInvoked = false
def withFixture(test: OneArgTest): Outcome = {
// Shouldn't be called, but just in case don't invoke a OneArgTest
Succeeded
}
property("something") { () =>
theNoArgTestWasInvoked = true
}
}
val s = new MySuite
s.run(None, Args(SilentReporter))
assert(s.theNoArgTestWasInvoked)
}
it("should pass the correct test name in the OneArgTest passed to withFixture") {
val a = new FixturePropSpec {
type FixtureParam = String
var correctTestNameWasPassed = false
def withFixture(test: OneArgTest): Outcome = {
correctTestNameWasPassed = test.name == "something"
test("hi")
}
property("something") { fixture => }
}
a.run(None, Args(SilentReporter))
assert(a.correctTestNameWasPassed)
}
it("should pass the correct config map in the OneArgTest passed to withFixture") {
val a = new FixturePropSpec {
type FixtureParam = String
var correctConfigMapWasPassed = false
def withFixture(test: OneArgTest): Outcome = {
correctConfigMapWasPassed = (test.configMap == ConfigMap("hi" -> 7))
test("hi")
}
property("something") { fixture => }
}
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> 7), None, new Tracker(), Set.empty))
assert(a.correctConfigMapWasPassed)
}
describe("(when a nesting rule has been violated)") {
it("should, if they call a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("should blow up") { fixture =>
property("should never run") { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySuite
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("should blow up") { fixture =>
property("should never run", mytags.SlowAsMolasses) { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySuite
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("should blow up") { fixture =>
ignore("should never run") { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySuite
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = { test("hi") }
property("should blow up") { fixture =>
ignore("should never run", mytags.SlowAsMolasses) { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySuite
ensureTestFailedEventReceived(spec, "should blow up")
}
}
it("should throw IllegalArgumentException if passed a testName that doesn't exist") {
class MySuite extends FixturePropSpec {
type FixtureParam = String
def withFixture(test: OneArgTest): Outcome = {
test("hi")
}
property("one") {s => () }
property("two") {s => () }
}
val suite = new MySuite
intercept[IllegalArgumentException] {
suite.run(Some("three"), Args(SilentReporter))
}
}
}
}
|
svn2github/scalatest
|
src/test/scala/org/scalatest/fixture/FixturePropSpecSpec.scala
|
Scala
|
apache-2.0
| 36,070 |
trait Parameters {
def InverterDelay = 2
def AndGateDelay = 3
def OrGateDelay = 5
}
|
tobal/scala-course
|
examples/discrete_event_simulation/Parameters.scala
|
Scala
|
gpl-3.0
| 91 |
package perm.tryfuture.exchange
import perm.tryfuture.exchange.ExchangeServer.{BuyOrder, SellOrder}
import perm.tryfuture.exchange.OrderBook.Trade
import scala.collection.SortedSet
case class OrderBook(buyOrders: SortedSet[BuyOrder], sellOrders: SortedSet[SellOrder], trades: List[Trade]) {
def processBuyOrder(buyOrder: BuyOrder): OrderBook = {
sellOrders.headOption match {
case Some(sellOrder) if buyOrder.price >= sellOrder.price =>
if (sellOrder.quantity >= buyOrder.quantity) {
val trade = Trade(sellOrder.price, buyOrder.quantity, buyOrder, sellOrder)
if (sellOrder.quantity > buyOrder.quantity) {
val sellRestOrder = sellOrder.copy(quantity = sellOrder.quantity - buyOrder.quantity)
OrderBook(buyOrders, sellOrders - sellOrder + sellRestOrder, trade :: trades)
} else {
OrderBook(buyOrders, sellOrders - sellOrder, trade :: trades)
}
} else {
val trade = Trade(sellOrder.price, sellOrder.quantity, buyOrder, sellOrder)
val buyRestOrder = buyOrder.copy(quantity = buyOrder.quantity - sellOrder.quantity)
OrderBook(buyOrders, sellOrders - sellOrder, trade :: trades).processBuyOrder(buyRestOrder)
}
case _ =>
this.copy(buyOrders = buyOrders + buyOrder)
}
}
def processSellOrder(sellOrder: SellOrder): OrderBook = {
buyOrders.headOption match {
case Some(buyOrder) if sellOrder.price <= buyOrder.price =>
if (buyOrder.quantity >= sellOrder.quantity) {
val trade = Trade(buyOrder.price, sellOrder.quantity, buyOrder, sellOrder)
if (buyOrder.quantity > sellOrder.quantity) {
val buyRestOrder = buyOrder.copy(quantity = buyOrder.quantity - sellOrder.quantity)
OrderBook(buyOrders - buyOrder + buyRestOrder, sellOrders, trade :: trades)
} else {
OrderBook(buyOrders - buyOrder, sellOrders, trade :: trades)
}
} else {
val trade = Trade(buyOrder.price, buyOrder.quantity, buyOrder, sellOrder)
val sellRestOrder = sellOrder.copy(quantity = sellOrder.quantity - buyOrder.quantity)
OrderBook(buyOrders - buyOrder, sellOrders, trade :: trades).processSellOrder(sellRestOrder)
}
case _ =>
this.copy(sellOrders = sellOrders + sellOrder)
}
}
def forgetHistory: OrderBook = {
copy(trades = List())
}
}
object OrderBook {
def empty = OrderBook(SortedSet(), SortedSet(), List())
case class Trade(price: BigDecimal, quantity: Long, buyOrder: BuyOrder, sellOrder: SellOrder)
}
|
ipostanogov/akka-stock-exchange
|
src/main/scala/perm/tryfuture/exchange/OrderBook.scala
|
Scala
|
mit
| 2,611 |
package io.surfkit.modules
import java.util.Date
import io.surfkit.core.common.PostgresService
import io.surfkit.model.Chat.{DbEntry, ChatID}
import io.surfkit.model._
import org.joda.time.LocalDateTime
import play.api.libs.json._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
trait ChatStore extends PostgresService {
implicit val chatIdRead2: Reads[ChatID] = (JsPath).read[Long].map(ChatID(_))
implicit val chatIdWrite2: Writes[ChatID] = Writes {
(chatId: ChatID) => JsNumber(chatId.chatId)
}
import PostgresQ._
//TODO Wait to see if it's used
/* def getChatById( id: Long, entries: Option[java.util.Date] = None, maxid: Option[Long] = None ):Future[Option[Chat]] = {
Q("""
SELECT * FROM public.\"Chat\" c
WHERE c.chat_id = ?;
"""
).use(id)
None
}*/
def createSchemaQ() =
Q(
"""
|--
|-- Name: Chat; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
|--
|
|CREATE TABLE "Chat" (
| chat_id bigint NOT NULL,
| chat_created timestamp without time zone NOT NULL,
| chat_updated timestamp without time zone NOT NULL,
| chat_creator_key bigint NOT NULL,
| group_name character varying(255),
| group_permission smallint DEFAULT 0
|);
|
|
|ALTER TABLE public."Chat" OWNER TO postgres;
|
|--
|-- Name: ChatEntry; Type: TABLE; Schema: public; Owner: postgres; Tablespace:
|--
|
|CREATE TABLE "ChatEntry" (
| chatentry_id bigint NOT NULL,
| chatentry_from_jid character varying(256) NOT NULL,
| chatentry_json json NOT NULL,
| chatentry_chat_key bigint NOT NULL,
| chatentry_provider smallint NOT NULL,
| chatentry_timestamp timestamp without time zone NOT NULL
|);
|
|
|ALTER TABLE public."ChatEntry" OWNER TO postgres;
|
|--
|-- Name: ChatEntry_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
|--
|
|CREATE SEQUENCE "ChatEntry_id_seq"
| START WITH 1
| INCREMENT BY 1
| NO MINVALUE
| NO MAXVALUE
| CACHE 1;
|
|
|ALTER TABLE public."ChatEntry_id_seq" OWNER TO postgres;
|
|--
|-- Name: ChatEntry_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
|--
|
|ALTER SEQUENCE "ChatEntry_id_seq" OWNED BY "ChatEntry".chatentry_id;
|
|
|--
|-- Name: Chat_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres
|--
|
|CREATE SEQUENCE "Chat_id_seq"
| START WITH 1
| INCREMENT BY 1
| NO MINVALUE
| NO MAXVALUE
| CACHE 1;
|
|
|ALTER TABLE public."Chat_id_seq" OWNER TO postgres;
|
|--
|-- Name: Chat_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres
|--
|
|ALTER SEQUENCE "Chat_id_seq" OWNED BY "Chat".chat_id;
|
|
|--
|-- Name: chat_id; Type: DEFAULT; Schema: public; Owner: postgres
|--
|
|ALTER TABLE ONLY "Chat" ALTER COLUMN chat_id SET DEFAULT nextval('"Chat_id_seq"'::regclass);
|
|
|--
|-- Name: chatentry_id; Type: DEFAULT; Schema: public; Owner: postgres
|--
|
|ALTER TABLE ONLY "ChatEntry" ALTER COLUMN chatentry_id SET DEFAULT nextval('"ChatEntry_id_seq"'::regclass);
|
|--
|-- Name: ChatEntry_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
|--
|
|ALTER TABLE ONLY "ChatEntry"
| ADD CONSTRAINT "ChatEntry_pkey" PRIMARY KEY (chatentry_id);
|
|
|--
|-- Name: Chat_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres; Tablespace:
|--
|
|ALTER TABLE ONLY "Chat"
| ADD CONSTRAINT "Chat_pkey" PRIMARY KEY (chat_id);
|
|--
|-- Name: ChatEntry_from_jid_idx; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
|--
|
|CREATE INDEX "ChatEntry_from_jid_idx" ON "ChatEntry" USING btree (chatentry_from_jid);
|
|--
|-- Name: fki_Chat_creator_key_fkey; Type: INDEX; Schema: public; Owner: postgres; Tablespace:
|--
|
|CREATE INDEX "fki_Chat_creator_key_fkey" ON "Chat" USING btree (chat_creator_key);
|
|--
|-- Name: ChatEntry_chat_key_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres
|--
|
|ALTER TABLE ONLY "ChatEntry"
| ADD CONSTRAINT "ChatEntry_chat_key_fkey" FOREIGN KEY (chatentry_chat_key) REFERENCES "Chat"(chat_id);
|
|
|--
|-- Name: Chat_creator_key_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres
|--
|
|ALTER TABLE ONLY "Chat"
| ADD CONSTRAINT "Chat_creator_key_fkey" FOREIGN KEY (chat_creator_key) REFERENCES "Users"(user_id);
|
|
""".stripMargin
).sendCreateSchemaQuery
createSchemaQ()
implicit val chatEntryReader = RowReader[Chat.DbEntry]{ row =>
Chat.DbEntry(
row("chatentry_chat_key").asInstanceOf[Long],
row("chatentry_id").asInstanceOf[Long],
row("chatentry_from_jid").asInstanceOf[String],
row("chatentry_timestamp").asInstanceOf[LocalDateTime].toDate.getTime,
row("chatentry_provider").asInstanceOf[Short],
row("chatentry_json").asInstanceOf[String]
)
}
def createChat(uid: Long, name: Option[String] = None, permission: Option[Short] = None): Future[ChatID] = {
Q(
"""
|INSERT INTO public."Chat" (chat_created,chat_updated,chat_creator_key, group_name, group_permission)
|VALUES (?,?,?,?,?)
|RETURNING chat_id;
"""
).use(dateTimeStr(), dateTimeStr(), uid, name.getOrElse(null), permission.getOrElse(null)).getSingle[ChatID]("chat_id")
}
def createGroup(uid: Long, name: String, permission: Short): Future[Long] = {
Q(
"""
|INSERT INTO public."Chat" (chat_created,chat_updated,chat_creator_key, group_name, group_permission)
|VALUES (?,?,?,?,?)
|RETURNING chat_id;
"""
).use(dateTimeStr(), dateTimeStr(), uid, name, permission).getSingle[Long]("chat_id")
}
def getChatEntriesByChatId( id: ChatID, offset: Long = 0L, limit: Long = 20L ):Future[Seq[Chat.DbEntry]] = {
Q(
"""
|SELECT *
|FROM public."ChatEntry" CE
|WHERE CE.chatentry_chat_key = ?
|ORDER BY CE.chatentry_id DESC
|LIMIT ? OFFSET ?;
"""
).use(id, limit, offset).getRows[Chat.DbEntry]
}
def getChatEntriesForChats(chatIds:Seq[Long], date: Date, limit: Int = 25):Future[Seq[Chat.DbEntry]] = {
Q(
"""
|SELECT DISTINCT ON (ce.chatentry_chat_key) *
|FROM public."ChatEntry" ce
|JOIN public."Chat" c ON c.chat_id = ce.chatentry_chat_key
|WHERE c.chat_id = ANY(?) AND CE.chatentry_timestamp > ?
|ORDER BY ce.chatentry_chat_key, ce.chatentry_id DESC
|LIMIT ?;
"""
).use(chatIds.toArray, dateTimeStr(date), limit).getRows[Chat.DbEntry]
}
def addChatEntry(chatid: ChatID, from: String, provider: Providers.Provider, msg: String):Future[DbEntry] = {
val now = new Date()
val nowStr = dateTimeStr(now)
Q(
"""
|INSERT INTO public."ChatEntry"
| (chatentry_from_jid, chatentry_timestamp, chatentry_chat_key, chatentry_provider, chatentry_json)
| VALUES(?,?,?,?,?)
| RETURNING chatentry_id;
"""
).use(from, nowStr, chatid.chatId, provider.idx, Json.obj("msg" -> msg, "ts" -> nowStr)).getSingle[Long]("chatentry_id").map{
entryId =>
DbEntry(chatid.chatId, entryId, from, now.getTime, provider.idx.asInstanceOf[Short], Json.obj("msg" -> msg, "ts" -> dateTimeStr()).toString)
}
}
}
|
coreyauger/surfkit
|
modules/sexwax/src/main/scala/io/surfkit/modules/ChatStore.scala
|
Scala
|
mit
| 8,139 |
/*
* Copyright 2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package mapper {
import _root_.org.specs._
import _root_.org.specs.runner.JUnit3
import _root_.org.specs.runner.ConsoleRunner
import _root_.net.liftweb.common._
import _root_.net.liftweb.util._
import Helpers._
class MappedDateSpecsAsTest extends JUnit3(MappedDateSpecs)
object MappedDateSpecsRunner extends ConsoleRunner(MappedDateSpecs)
object MappedDateSpecs extends Specification {
"MappedDate" should {
"handle a Number in setFromAny" in {
val dog = Dog2.create
val currentDate = new java.util.Date()
dog.createdTime.setFromAny(BigInt(currentDate.getTime))
dog.createdTime.is mustEqual currentDate
}
"handle a full Box in setFromAny" in {
val dog = Dog2.create
val someDate = new java.util.Date(1000)
dog.createdTime.setFromAny(Full(someDate))
dog.createdTime.is mustEqual someDate
}
"handle en empty Box in setFromAny" in {
val dog = Dog2.create
dog.createdTime.setFromAny(Empty)
dog.createdTime.is must beNull
}
}
}
}
}
|
lift/lift
|
framework/lift-persistence/lift-mapper/src/test/scala/net/liftweb/mapper/MappedDateSpec.scala
|
Scala
|
apache-2.0
| 1,661 |
/* *********************************************************************************************************************
*
* Copyright (C) 2010 by the Stratosphere project (http://stratosphere.eu)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* ********************************************************************************************************************
*/
package eu.stratosphere.tpch
import eu.stratosphere.pact.client.LocalExecutor
import eu.stratosphere.pact.client.RemoteExecutor
import eu.stratosphere.tpch.config.TPCHConfig
import eu.stratosphere.tpch.query.TPCHQuery
import eu.stratosphere.scala.ScalaPlan
import eu.stratosphere.tpch.config.TPCHConfig
import scopt.TPCHOptionParser
/**
* Abstract job runner encapsulating common driver logic.
*/
abstract class AbstractJobRunner {
/**
* Main method.
*/
def main(args: Array[String]) {
TPCHOptionParser().parse(args, TPCHConfig()) map { config =>
try {
TPCHQuery.createQuery(config)
.map(query => executeQuery(query.plan()))
.getOrElse {
System.err.println(f"Sorry, TPC-H Query #${config.queryNo}%02d is not yet supported.")
}
} catch {
case e: Throwable => {
System.err.println("Unexpected error during execution: " + e.getMessage())
e.printStackTrace(System.err)
System.exit(-1)
}
}
} getOrElse {
System.exit(-1)
}
}
/**
* Executes the query in a specific environment (local or remote).
*/
def executeQuery(plan: ScalaPlan)
}
/**
* To run TPCH Query X locally with this class using:
* mvn exec:exec -Dexec.executable="java" -Dexec.args="-cp %classpath eu.stratosphere.tpch.LocalJobRunner QXX 2 file:///tpch/path file:///output/path <Query-X-args>"
*/
object LocalJobRunner extends AbstractJobRunner {
def executeQuery(plan: ScalaPlan) {
LocalExecutor.execute(plan)
}
}
/**
* To run TPCH Query X on a cluster with this class using:
* mvn exec:exec -Dexec.executable="java" -Dexec.args="-cp %classpath eu.stratosphere.tpch.RemoteJobRunner QXX 2 file:///input/path file:///output/path <Query-X-args>"
*/
object RemoteJobRunner extends AbstractJobRunner {
def executeQuery(plan: ScalaPlan) {
(new RemoteExecutor("localhost", 6123, "target/stratosphere-tpch-bin.jar")).executePlan(plan)
}
}
|
stratosphere/stratosphere-tpch
|
src/main/scala/eu/stratosphere/tpch/Job.scala
|
Scala
|
apache-2.0
| 2,851 |
package fpinscala.datastructures
import scala.annotation.tailrec
sealed trait Tree[+A]
case class Leaf[A](value: A) extends Tree[A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
object Tree {
def size(tree: Tree[_]): Int =
tree match {
case (Leaf(value)) => 1
case (Branch(left, right)) => 1 + size(left) + size(right)
}
def maximum(tree: Tree[Int]): Int =
tree match {
case (Leaf(value)) => value
case (Branch(left, right)) => maximum(left).max(maximum(right))
}
def depth(tree: Tree[_]): Int =
tree match {
case (Leaf(value)) => 0
case (Branch(left, right)) => 1 + depth(left).max(depth(right))
}
def map[A, B](tree: Tree[A])(f: A => B): Tree[B] =
tree match {
case (Leaf(value)) => Leaf(f(value))
case (Branch(left, right)) => Branch(map(left)(f), map(right)(f))
}
def fold[A, B](tree: Tree[A])(f: A => B)(g: (B, B) => B): B =
tree match {
case Leaf(v) => f(v)
case Branch(l: Tree[A], r: Tree[A]) => g(fold(l)(f)(g), fold(r)(f)(g))
}
def sizeViaFold[A](t: Tree[A]): Int =
fold(t)(a => 1)(1 + _ + _)
def maximumViaFold[T](t: Tree[T])(implicit ev: Numeric[T]): T =
fold(t)(a => a)((x: T, y: T) => ev.max(x, y))
def depthViaFold[A](t: Tree[A]): Int =
fold(t)(a => 0)((x:Int, y:Int) => 1 + x.max(y))
}
|
RawToast/fpinscala
|
exercises/src/main/scala/fpinscala/datastructures/Tree.scala
|
Scala
|
mit
| 1,362 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License") you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.aliyun.dts
import scala.collection.JavaConversions._
import com.alibaba.fastjson.JSONObject
import com.aliyun.drc.clusterclient.message.ClusterMessage
import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.function.{Function => JFunction}
import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaStreamingContext, JavaReceiverInputDStream}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
/**
* Various utility classes for working with Aliyun DTS.
*/
object DtsUtils extends Logging {
/**
* Create an input stream that pulls message from a Aliyun DTS stream.
* {{{
* val ssc: StreamingSparkContext = ...
* val accessKeyId = "kj7aY*******UYx6"
* val accessKeySecret = "AiNMAlxz*************1PxaPaL8t"
* val guid = "dts-guid-name"
*
* def func: ClusterMessage => String = msg => msg.getRecord.toString
*
* val dtsStream = DtsUtils.createStream(
* ssc,
* accessKeyId,
* accessKeySecret,
* guid,
* func,
* StorageLevel.MEMORY_AND_DISK_2,
* false)
*
* dtsStream.foreachRDD(rdd => {
* ...
* })
*
* }}}
* @param ssc StreamingContext object.
* @param accessKeyId Aliyun Access Key ID.
* @param accessKeySecret Aliyun Access Key Secret.
* @param guid Aliyun DTS guid name.
* @param func Extract information from DTS record message.
* @param storageLevel Storage level to use for storing the received objects.
* StorageLevel.MEMORY_AND_DISK_2 is recommended.
* @param usePublicIp use public ip or not.
* @return
*/
@Experimental
def createStream(
ssc: StreamingContext,
accessKeyId: String,
accessKeySecret: String,
guid: String,
func: ClusterMessage => String,
storageLevel: StorageLevel,
usePublicIp: Boolean): ReceiverInputDStream[String] = {
new BinlogDStream(ssc, accessKeyId, accessKeySecret, guid, func, storageLevel, usePublicIp)
}
/**
* Create an input stream that pulls message from a Aliyun DTS stream.
* {{{
* val ssc: StreamingSparkContext = ...
* val accessKeyId = "kj7aY*******UYx6"
* val accessKeySecret = "AiNMAlxz*************1PxaPaL8t"
* val guid = "dts-guid-name"
*
* val dtsStream = DtsUtils.createStream(
* ssc,
* accessKeyId,
* accessKeySecret,
* guid,
* StorageLevel.MEMORY_AND_DISK_2,
* false)
*
* dtsStream.foreachRDD(rdd => {
* ...
* })
*
* }}}
* @param ssc StreamingContext object.
* @param accessKeyId Aliyun Access Key ID.
* @param accessKeySecret Aliyun Access Key Secret.
* @param guid Aliyun DTS guid name.
* @param storageLevel Storage level to use for storing the received objects.
* StorageLevel.MEMORY_AND_DISK_2 is recommended.
* @param usePublicIp use public ip or not.
* @return
*/
@Experimental
def createStream(
ssc: StreamingContext,
accessKeyId: String,
accessKeySecret: String,
guid: String,
storageLevel: StorageLevel,
usePublicIp: Boolean): ReceiverInputDStream[String] = {
new BinlogDStream(ssc, accessKeyId, accessKeySecret, guid, defaultMessageFunc, storageLevel, usePublicIp)
}
/**
* Create an input stream that pulls message from a Aliyun DTS stream.
* {{{
* JavaStreamingContext jssc = ...;
* String accessKeyId = "kj7aY*******UYx6";
* String accessKeySecret = "AiNMAlxz*************1PxaPaL8t";
* String guid = "dts-guid-name";
*
* static class ReadMessage implements Function<ClusterMessage, String> {
* @Override
* public String call(ClusterMessage msg) {
* return msg.getRecord.toString;
* }
* }
*
* JavaReceiverInputDStream<String> dtsStream = DtsUtils.createStream(
* ssc,
* accessKeyId,
* accessKeySecret,
* guid,
* ReadMessage,
* StorageLevel.MEMORY_AND_DISK_2,
* false);
*
* dtsStream.foreachRDD(rdd => {
* ...
* });
* }}}
* @param jssc Java streamingContext object.
* @param accessKeyId Aliyun Access Key ID.
* @param accessKeySecret Aliyun Access Key Secret.
* @param guid Aliyun DTS guid name.
* @param func Extract information from DTS record message.
* @param storageLevel Storage level to use for storing the received objects.
* StorageLevel.MEMORY_AND_DISK_2 is recommended.
* @param usePublicIp use public ip or not.
* @return
*/
@Experimental
def createStream(
jssc: JavaStreamingContext,
accessKeyId: String,
accessKeySecret: String,
guid: String,
func: JFunction[ClusterMessage, String],
storageLevel: StorageLevel,
usePublicIp: Boolean): JavaReceiverInputDStream[String] = {
createStream(jssc.ssc, accessKeyId, accessKeySecret, guid, (msg: ClusterMessage) => func.call(msg),
storageLevel, usePublicIp)
}
/**
* Create an input stream that pulls message from a Aliyun DTS stream.
* {{{
* JavaStreamingContext jssc = ...;
* String accessKeyId = "kj7aY*******UYx6";
* String accessKeySecret = "AiNMAlxz*************1PxaPaL8t";
* String guid = "dts-guid-name";
*
* JavaReceiverInputDStream<String> dtsStream = DtsUtils.createStream(
* ssc,
* accessKeyId,
* accessKeySecret,
* guid,
* StorageLevel.MEMORY_AND_DISK_2,
* false);
*
* dtsStream.foreachRDD(rdd => {
* ...
* });
* }}}
* @param jssc Java streamingContext object.
* @param accessKeyId Aliyun Access Key ID.
* @param accessKeySecret Aliyun Access Key Secret.
* @param guid Aliyun DTS guid name.
* @param storageLevel Storage level to use for storing the received objects.
* StorageLevel.MEMORY_AND_DISK_2 is recommended.
* @param usePublicIp use public ip or not.
* @return
*/
@Experimental
def createStream(
jssc: JavaStreamingContext,
accessKeyId: String,
accessKeySecret: String,
guid: String,
storageLevel: StorageLevel,
usePublicIp: Boolean): JavaReceiverInputDStream[String] = {
createStream(jssc.ssc, accessKeyId, accessKeySecret, guid, storageLevel, usePublicIp)
}
def defaultMessageFunc(message: ClusterMessage): String = {
try {
val obj = new JSONObject()
message.getRecord.getAttributes.foreach(attribute => {
obj.put(attribute._1, attribute._2)
})
message.getRecord.getFieldList.foreach(field => {
val fieldObj = new JSONObject()
fieldObj.put("Field name", field.name)
fieldObj.put("Field type", field.`type`)
fieldObj.put("Field length", field.length)
if (field.name != null) {
if (field.encoding.equals("binary")) {
fieldObj.put("Field value(binary)", field.getValue.getBytes)
} else {
fieldObj.put("Field value", field.getValue.toString(field.encoding))
}
} else {
fieldObj.put("Field value", "null")
}
obj.put("field", fieldObj.toJSONString)
})
obj.toJSONString
} catch {
case e: Exception =>
logError("Failed to resolve dts message.")
throw e
}
}
}
|
uncleGen/aliyun-emapreduce-sdk
|
external/emr-dts/src/main/scala/org/apache/spark/streaming/aliyun/dts/DtsUtils.scala
|
Scala
|
artistic-2.0
| 8,382 |
object Test {
trait Foo[S[_[_], _[_]]] extends Bar[S] {
def m[F[_]](x: S[({ type G[A] = Bar[S] })#G, F]): Unit
}
trait Bar[S[_[_], _[_]]] {
def m[F[_]](x: S[({ type G[A] = Bar[S] })#G, F]): Unit
}
}
|
lrytz/scala
|
test/files/pos/t12187.scala
|
Scala
|
apache-2.0
| 215 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2018 Helge Holzmann (L3S) and Vinay Goel (Internet Archive)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package org.archive.archivespark.sparkling.warc
import java.nio.charset.Charset
import java.util.UUID
import org.archive.archivespark.sparkling.Sparkling
import org.archive.archivespark.sparkling.util.DigestUtil
import org.joda.time.Instant
import org.joda.time.format.{DateTimeFormat, DateTimeFormatter, ISODateTimeFormat}
object WarcHeaders {
val UTF8: Charset = Charset.forName(Sparkling.DefaultCharset)
val ArcDateTimeFormat: DateTimeFormatter = DateTimeFormat.forPattern("yyyyMMddHHmmss").withZoneUTC
val WarcDateTimeFormat: DateTimeFormatter = ISODateTimeFormat.dateTimeNoMillis
val Br = "\\r\\n"
def arcFile(info: WarcFileMeta, filename: String): Array[Byte] = {
val header = StringBuilder.newBuilder
header.append("filedesc://")
header.append(filename)
header.append(" 0.0.0.0 ")
header.append(ArcDateTimeFormat.print(info.created))
header.append(" text/plain ")
val headerBody = StringBuilder.newBuilder
// Internet Archive: Name of gathering organization with no white space (http://archive.org/web/researcher/ArcFileFormat.php)
headerBody.append("1 0 " + info.publisher.replace(" ", "")).append(Br)
headerBody.append("URL IP-address Archive-date Content-type Archive-length").append(Br)
val headerBodyStr: String = headerBody.toString
val headerBodyBlob: Array[Byte] = headerBodyStr.getBytes(UTF8)
header.append(headerBodyBlob.length).append(Br)
header.append(headerBodyStr).append(Br)
header.toString().getBytes(UTF8)
}
def warcFile(meta: WarcFileMeta, filename: String): Array[Byte] = {
val header = StringBuilder.newBuilder
header.append("WARC/1.0").append(Br)
header.append("WARC-Type: warcinfo").append(Br)
header.append("WARC-Date: " + WarcDateTimeFormat.print(Instant.now)).append(Br)
header.append("WARC-Filename: " + filename).append(Br)
header.append("WARC-Record-ID: " + newRecordID()).append(Br)
header.append("Content-Type: application/warc-fields").append(Br)
val headerBody = StringBuilder.newBuilder
headerBody.append("software: " + meta.software).append(Br)
headerBody.append("format: WARC File Format 1.0").append(Br)
headerBody.append("conformsTo: http://bibnum.bnf.fr/WARC/WARC_ISO_28500_version1_latestdraft.pdf").append(Br)
headerBody.append("publisher: " + meta.publisher).append(Br)
headerBody.append("created: " + WarcDateTimeFormat.print(meta.created)).append(Br)
headerBody.append(Br * 3)
val headerBodyStr = headerBody.toString()
val headerBodyBlob = headerBodyStr.getBytes(UTF8)
header.append("Content-Length: " + headerBodyBlob.length).append(Br)
header.append(Br)
header.append(headerBodyStr)
header.toString().getBytes(UTF8)
}
def warcRecord(warcType: String, meta: WarcRecordMeta, contentLength: Long, payloadDigest: Option[String]): Array[Byte] = {
val header = StringBuilder.newBuilder
header.append("WARC/1.0").append(Br)
header.append("WARC-Type: " + warcType).append(Br)
header.append("WARC-Target-URI: " + meta.url).append(Br)
header.append("WARC-Date: " + WarcDateTimeFormat.print(meta.timestamp)).append(Br)
for (digest <- payloadDigest) header.append("WARC-Payload-Digest: " + digest).append(Br)
for (ip <- meta.ip) header.append("WARC-IP-Address: " + ip).append(Br)
header.append("WARC-Record-ID: " + meta.recordId.getOrElse(newRecordID())).append(Br)
header.append("Content-Type: application/http; msgtype=" + warcType).append(Br)
header.append("Content-Length: " + contentLength).append(Br)
header.append(Br)
header.toString().getBytes(UTF8)
}
def warcResponseRecord(meta: WarcRecordMeta, content: Array[Byte], payload: Array[Byte]): Array[Byte] = {
warcRecord("response", meta, content.length, Some("sha1:" + DigestUtil.sha1Base32(payload)))
}
def http(statusLine: String, headers: Seq[(String, String)]): Array[Byte] = {
val header = StringBuilder.newBuilder
header.append(statusLine).append(Br)
for ((key, value) <- headers) {
header.append(s"$key: $value").append(Br)
}
header.append(Br)
header.toString().getBytes(UTF8)
}
private def newRecordID(): String = "<urn:uuid:" + UUID.randomUUID() + ">"
}
|
helgeho/ArchiveSpark
|
src/main/scala/org/archive/archivespark/sparkling/warc/WarcHeaders.scala
|
Scala
|
mit
| 5,422 |
package com.github.mdr.ascii.common
object Point {
private def sameColumn(p1: Point, p2: Point, p3: Point) = p1.column == p2.column && p2.column == p3.column
private def sameRow(p1: Point, p2: Point, p3: Point) = p1.row == p2.row && p2.row == p3.row
private def colinear(p1: Point, p2: Point, p3: Point) = sameColumn(p1, p2, p3) || sameRow(p1, p2, p3)
def removeRedundantPoints(points: List[Point]): List[Point] = points match {
case List() | List(_) | List(_, _) ⇒ points
case p1 :: p2 :: p3 :: remainder if colinear(p1, p2, p3) ⇒ removeRedundantPoints(p1 :: p3 :: remainder)
case p :: ps ⇒ p :: removeRedundantPoints(ps)
}
}
case class Point(row: Int, column: Int) extends Translatable[Point] with Transposable[Point] {
def maxRowCol(that: Point): Point = Point(math.max(this.row, that.row), math.max(this.column, that.column))
type Self = Point
def translate(down: Int = 0, right: Int = 0): Point = Point(row + down, column + right)
def transpose: Point = Point(column, row)
def neighbours: List[Point] = List(up, right, down, left)
def withRow(newRow: Int) = copy(row = newRow)
def withColumn(newColumn: Int) = copy(column = newColumn)
def region: Region = Region(this, this)
}
|
jlmauduy/ascii-graphs
|
src/main/scala/com/github/mdr/ascii/common/Point.scala
|
Scala
|
mit
| 1,306 |
package vct.col.util
import hre.ast.Origin
import vct.col.ast.ASTNode
trait VisitorHelper {
def getOrigin() : Origin
/**
* This function is used in many AST classes to handle/print exceptions when
* executing a visitor pattern over the AST.
*/
def handle_throwable(t:Throwable) = {
if (ASTNode.thrown.get() != t) {
System.err.printf("Triggered by %s:%n", getOrigin())
ASTNode.thrown.set(t)
}
throw t
}
}
|
sccblom/vercors
|
vercors/src/main/java/vct/col/util/VisitorHelper.scala
|
Scala
|
mpl-2.0
| 448 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.