code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package gapt.proofs.expansion
import gapt.expr._
import gapt.proofs.Sequent
import gapt.proofs.context.mutable.MutableContext
import gapt.proofs.context.update.Sort
import gapt.proofs.gaptic._
import gapt.proofs.lk.transformations.LKToExpansionProof
import org.specs2.mutable.Specification
class UnifyInstancesTest extends Specification {
"example" in {
implicit val ctx = MutableContext.default()
ctx += Sort( "i" )
ctx += hoc"p: i>o"
ctx += hoc"q: i>o"
ctx += hoc"c: i"
val lk = Lemma( ( "hyp" -> hof"!x!y (p(x) & q(y))" ) +:
Sequent()
:+ ( "conj" -> hof"q(c) & p(c)" ) ) {
destruct( "conj" )
// two instance vectors:
allL( fov"x", le"c" ); prop
allL( le"c", fov"y" ); prop
}
val exp = LKToExpansionProof( lk )
numberOfInstancesET( exp ) must_== 4
val unified = unifyInstancesET( exp )
// now just one instance vector:
numberOfInstancesET( unified ) must_== 2
}
}
|
gapt/gapt
|
tests/src/test/scala/gapt/proofs/expansion/UnifyInstancesTest.scala
|
Scala
|
gpl-3.0
| 962 |
package commons.repositories.mappings
import commons.models.Login
import commons.repositories.BaseRepo
import slick.dbio.DBIO
import slick.jdbc.MySQLProfile.api.{DBIO => _, MappedTo => _, TableQuery => _, Rep => _, _}
import slick.lifted._
trait LoginDbMappings {
implicit val loginMapping: BaseColumnType[Login] = MappedColumnType.base[Login, String](
login => login.value,
str => Login(str)
)
}
|
Dasiu/play-framework-test-project
|
app/commons/repositories/mappings/LoginDbMappings.scala
|
Scala
|
mit
| 413 |
package com.github.cdow.actor.vm
import java.net.InetSocketAddress
import akka.actor.{FSM, ActorRef, Props}
import akka.io.{IO, Tcp}
import akka.io.Tcp._
import akka.util.ByteString
import com.github.cdow.actor.MainMessage
import com.github.cdow.actor.vm.VmConnection.Disconnected
sealed trait VmMessage
object VmMessage {
case object Connect
case object Disconnect
}
sealed trait VmState
object VmState {
case object Idle extends VmState
case object Binding extends VmState
case object Bound extends VmState
case object Connected extends VmState
case object Running extends VmState
}
// TODO link this to VmState better
sealed trait VmConnection
object VmConnection {
case object Disconnected extends VmConnection{
def bind(binding: ActorRef): Bound = {
Bound(binding)
}
}
case class Bound(server: ActorRef) extends VmConnection {
def unBind: Disconnected.type = {
Disconnected
}
def connect(connection: ActorRef): Connected = {
Connected(server, connection)
}
}
case class Connected(server: ActorRef, connection: ActorRef) extends VmConnection {
def unBind: Disconnected.type = {
Disconnected
}
def disconnect: Bound = {
Bound(server)
}
}
}
object VmActor {
def props(port: Int, listener: ActorRef) = Props(new VmActor(port, listener))
}
class VmActor(port: Int, listener: ActorRef) extends FSM[VmState, VmConnection] {
import context.system
import VmState._
val HANDSHAKE = ByteString.fromString("JDWP-Handshake", "US-ASCII")
startWith(Idle, VmConnection.Disconnected)
when(Idle) {
case Event(VmMessage.Connect, VmConnection.Disconnected) =>
IO(Tcp) ! Bind(self, new InetSocketAddress(port))
goto(Binding)
}
when(Binding) {
case Event(Tcp.Bound(localAddress), vmConn: VmConnection.Disconnected.type) =>
val server = sender()
goto(Bound) using vmConn.bind(server)
case Event(CommandFailed(_: Bind), VmConnection.Disconnected) =>
context stop self
stay()
}
when(Bound) {
case Event(Tcp.Connected(remote, local), vmConn: VmConnection.Bound) =>
val connection = sender()
connection ! Register(self)
connection ! Write(HANDSHAKE)
goto(Connected) using vmConn.connect(connection)
case Event(VmMessage.Disconnect, vmConn: VmConnection.Bound) =>
IO(Tcp) ! Unbind
goto(Idle) using vmConn.unBind
}
when(Connected) {
case Event(Received(HANDSHAKE), _: VmConnection.Connected) =>
listener ! MainMessage.VmConnected
goto(Running)
case Event(_: ConnectionClosed, vmConn: VmConnection.Connected) =>
listener ! MainMessage.VmDisconnected
goto(Bound) using vmConn.disconnect
case Event(VmMessage.Disconnect, vmConn: VmConnection.Connected) =>
vmConn.connection ! Close
vmConn.server ! Unbind
goto(Idle) using vmConn.unBind
}
when(Running) {
case Event(data: ByteString, vmConn: VmConnection.Connected) =>
vmConn.connection ! Write(data)
stay()
case Event(Received(data), _: VmConnection.Connected) =>
listener ! data
stay()
case Event(_: ConnectionClosed, vmConn: VmConnection.Connected) =>
listener ! MainMessage.VmDisconnected
goto(Bound) using vmConn.disconnect
case Event(VmMessage.Disconnect, vmConn: VmConnection.Connected) =>
vmConn.connection ! Close
vmConn.server ! Unbind
goto(Idle) using vmConn.unBind
}
initialize()
}
|
cdow/sbt-debug-plugin
|
src/main/scala/com/github/cdow/actor/vm/VmActor.scala
|
Scala
|
isc
| 3,307 |
package slick.sql
import slick.basic.{BasicStreamingAction, BasicAction}
import slick.compiler.QueryCompiler
import slick.relational.{RelationalActionComponent, RelationalTableComponent, RelationalProfile}
import scala.language.higherKinds
import slick.dbio._
import slick.ast.{TableNode, Symbol, SymbolNamer, ColumnOption}
import slick.util.DumpInfo
/** Abstract profile for SQL-based databases. */
trait SqlProfile extends RelationalProfile with SqlTableComponent with SqlActionComponent
/* internal: */ with SqlUtilsComponent {
@deprecated("Use the Profile object directly instead of calling `.profile` on it", "3.2")
override val profile: SqlProfile = this
override protected def computeQueryCompiler = super.computeQueryCompiler ++ QueryCompiler.sqlPhases
override protected def computeCapabilities = super.computeCapabilities ++ SqlCapabilities.all
type SchemaDescription = DDL
trait DDL extends SchemaDescriptionDef { self =>
/** Statements to execute first for create(), e.g. creating tables and indexes. */
protected def createPhase1: Iterable[String]
/** Statements to execute after createPhase1, e.g. creating foreign keys. */
protected def createPhase2: Iterable[String]
protected def createIfNotExistsPhase: Iterable[String]
/** All statements to execute for create() */
def createStatements: Iterator[String] = createPhase1.iterator ++ createPhase2.iterator
/** All statements to execute for createIfNotExists() */
def createIfNotExistsStatements: Iterator[String] = createIfNotExistsPhase.iterator
/** Statements to execute first for drop(), e.g. removing connections from other entities. */
protected def dropPhase1: Iterable[String]
protected def dropIfExistsPhase: Iterable[String]
/** Statements to execute after dropPhase1, e.g. actually dropping a table. */
protected def dropPhase2: Iterable[String]
/** All statements to execute for drop() */
def dropStatements: Iterator[String] = dropPhase1.iterator ++ dropPhase2.iterator
/** All statements to execute for dropIfExists() */
def dropIfExistsStatements: Iterator[String] = dropIfExistsPhase.iterator
/** Statements to execute first for truncate() */
protected def truncatePhase: Iterable[String]
/** All statements to execute for truncate */
def truncateStatements: Iterator[String] = truncatePhase.iterator
/**
* Create a new DDL object which combines this and the other DDL object.
*
* Composition is such that given {{{A.ddl ++ B.ddl}}} the create phases will be
* run in FIFO order and the drop phases will be run in LIFO order.
*/
override def ++(other: DDL): DDL = new DDL {
protected lazy val createPhase1 = self.createPhase1 ++ other.createPhase1
protected lazy val createPhase2 = self.createPhase2 ++ other.createPhase2
protected lazy val createIfNotExistsPhase = self.createIfNotExistsPhase ++ other.createIfNotExistsPhase
protected lazy val dropPhase1 = other.dropPhase1 ++ self.dropPhase1
protected lazy val dropIfExistsPhase = other.dropIfExistsPhase ++ self.dropIfExistsPhase
protected lazy val dropPhase2 = other.dropPhase2 ++ self.dropPhase2
protected lazy val truncatePhase = other.truncatePhase ++ self.truncatePhase
}
override def hashCode() =
Vector(self.createPhase1, self.createPhase2, self.dropPhase1, self.dropPhase2 , self.truncatePhase).hashCode
override def equals(o: Any) = o match {
case ddl: DDL =>
self.createPhase1 == ddl.createPhase1 &&
self.createIfNotExistsPhase == ddl.createIfNotExistsPhase &&
self.createPhase2 == ddl.createPhase2 &&
self.dropPhase1 == ddl.dropPhase1 &&
self.dropIfExistsPhase == ddl.dropIfExistsPhase &&
self.dropPhase2 == ddl.dropPhase2 &&
self.truncatePhase == ddl.truncatePhase
case _ => false
}
}
object DDL {
def apply(create1: Iterable[String], createIfNotExists: Iterable[String], create2: Iterable[String], drop1: Iterable[String],
dropIfExists: Iterable[String], drop2: Iterable[String] , truncate: Iterable[String]): DDL = new DDL {
protected def createPhase1 = create1
protected def createIfNotExistsPhase = createIfNotExists
protected def createPhase2 = create2
protected def dropPhase1 = drop1
protected def dropIfExistsPhase = dropIfExists
protected def dropPhase2 = drop2
protected def truncatePhase = truncate
}
def apply(create1: Iterable[String], drop2: Iterable[String]): DDL = apply(create1, Nil, Nil, Nil, Nil, drop2 , Nil)
def apply(create1: String, drop2: String): DDL = apply(Iterable(create1), Iterable(drop2))
}
}
object SqlProfile {
/** Extra column options for SqlProfile */
object ColumnOption {
case object NotNull extends ColumnOption[Nothing]
case object Nullable extends ColumnOption[Nothing]
/** Type as expected by the DBMS, e.g. VARCHAR or VARCHAR(254). Note that Slick's model omits
* the optional length ascription for string columns here and carries the length in the
* separate ColumnOption Length instead. A length ascription for string column is allowed
* though and can be used in a Slick Table subclass to pass it to the DBMS. As this is the
* type of the underlying DBMS it may not be portable to other DBMS.
*
* Note that Slick uses VARCHAR or VARCHAR(254) in DDL for String columns if neither
* ColumnOption DBType nor Length are given. */
case class SqlType(typeName: String) extends ColumnOption[Nothing]
}
}
trait SqlUtilsComponent { self: SqlProfile =>
/** quotes identifiers to avoid collisions with SQL keywords and other syntax issues */
def quoteIdentifier(id: String): String = {
val s = new StringBuilder(id.length + 4) append '"'
for(c <- id) if(c == '"') s append "\\"\\"" else s append c
(s append '"').toString
}
def quoteTableName(t: TableNode): String = t.schemaName match {
case Some(s) => quoteIdentifier(s) + "." + quoteIdentifier(t.tableName)
case None => quoteIdentifier(t.tableName)
}
def likeEncode(s: String) = {
val b = new StringBuilder
for(c <- s) c match {
case '%' | '_' | '^' => b append '^' append c
case _ => b append c
}
b.toString
}
class QuotingSymbolNamer(parent: Option[SymbolNamer]) extends SymbolNamer("x", "y", parent) {
override def namedSymbolName(s: Symbol) = quoteIdentifier(s.name)
}
}
trait SqlTableComponent extends RelationalTableComponent { this: SqlProfile =>
trait ColumnOptions extends super.ColumnOptions {
def SqlType(typeName: String) = SqlProfile.ColumnOption.SqlType(typeName)
}
override val columnOptions: ColumnOptions = new ColumnOptions {}
}
trait SqlActionComponent extends RelationalActionComponent { this: SqlProfile =>
type ProfileAction[+R, +S <: NoStream, -E <: Effect] <: SqlAction[R, S, E]
type StreamingProfileAction[+R, +T, -E <: Effect] <: SqlStreamingAction[R, T, E] with ProfileAction[R, Streaming[T], E]
}
trait SqlAction[+R, +S <: NoStream, -E <: Effect] extends BasicAction[R, S, E] {
type ResultAction[+R, +S <: NoStream, -E <: Effect] <: SqlAction[R, S, E]
/** Return the SQL statements that will be executed for this Action */
def statements: Iterable[String]
/** Create an Action that uses the specified SQL statement(s) but otherwise
* behaves the same as this Action. */
def overrideStatements(statements: Iterable[String]): ResultAction[R, S, E]
def getDumpInfo = DumpInfo(DumpInfo.simpleNameFor(getClass), mainInfo = statements.mkString("[", "; ", "]"))
}
trait SqlStreamingAction[+R, +T, -E <: Effect] extends BasicStreamingAction[R, T, E] with SqlAction[R, Streaming[T], E]
trait FixedSqlAction[+R, +S <: NoStream, -E <: Effect] extends SqlAction[R, S, E] {
type ResultAction[+R, +S <: NoStream, -E <: Effect] = SqlAction[R, S, E]
}
trait FixedSqlStreamingAction[+R, +T, -E <: Effect] extends SqlStreamingAction[R, T, E] with FixedSqlAction[R, Streaming[T], E]
|
slick/slick
|
slick/src/main/scala/slick/sql/SqlProfile.scala
|
Scala
|
bsd-2-clause
| 8,081 |
package com.atomist.project.generate
import com.atomist.param.ParameterValues
import com.atomist.project.ProjectOperation
import com.atomist.project.common.InvalidParametersException
import com.atomist.rug.kind.core.ProjectContext
import com.atomist.rug.runtime.js.LocalRugContext
import com.atomist.source.ArtifactSource
/**
* Implemented by classes that can generate projects,
* given parameter values, which should match
* parameters specified in the parameters() method.
*/
trait ProjectGenerator
extends ProjectOperation {
@throws(classOf[InvalidParametersException])
def generate(projectName: String, pvs: ParameterValues, ProjectContext: ProjectContext = new ProjectContext(LocalRugContext)): ArtifactSource
}
|
atomist/rug
|
src/main/scala/com/atomist/project/generate/ProjectGenerator.scala
|
Scala
|
gpl-3.0
| 734 |
package io.getquill.norm
import io.getquill.Spec
import io.getquill.ast.AscNullsFirst
import io.getquill.ast.Constant
import io.getquill.ast.Ident
import io.getquill.ast.Map
import io.getquill.ast.SortBy
import io.getquill.testContext._
class AttachToEntitySpec extends Spec {
val attachToEntity = AttachToEntity(SortBy(_, _, Constant(1), AscNullsFirst)) _
"attaches clause to the root of the query (entity)" - {
"query is the entity" in {
val n = quote {
qr1.sortBy(x => 1)
}
attachToEntity(qr1.ast) mustEqual n.ast
}
"query is a composition" - {
"map" in {
val q = quote {
qr1.filter(t => t.i == 1).map(t => t.s)
}
val n = quote {
qr1.sortBy(t => 1).filter(t => t.i == 1).map(t => t.s)
}
attachToEntity(q.ast) mustEqual n.ast
}
"flatMap" in {
val q = quote {
qr1.filter(t => t.i == 1).flatMap(t => qr2)
}
val n = quote {
qr1.sortBy(t => 1).filter(t => t.i == 1).flatMap(t => qr2)
}
attachToEntity(q.ast) mustEqual n.ast
}
"concatMap" in {
val q = quote {
qr1.filter(t => t.i == 1).concatMap(t => t.s.split(" "))
}
val n = quote {
qr1.sortBy(t => 1).filter(t => t.i == 1).concatMap(t => t.s.split(" "))
}
attachToEntity(q.ast) mustEqual n.ast
}
"filter" in {
val q = quote {
qr1.filter(t => t.i == 1).filter(t => t.s == "s1")
}
val n = quote {
qr1.sortBy(t => 1).filter(t => t.i == 1).filter(t => t.s == "s1")
}
attachToEntity(q.ast) mustEqual n.ast
}
"sortBy" in {
val q = quote {
qr1.sortBy(t => t.s)
}
val n = quote {
qr1.sortBy(t => 1).sortBy(t => t.s)
}
attachToEntity(q.ast) mustEqual n.ast
}
"take" in {
val q = quote {
qr1.sortBy(b => b.s).take(1)
}
val n = quote {
qr1.sortBy(b => 1).sortBy(b => b.s).take(1)
}
attachToEntity(q.ast) mustEqual n.ast
}
"drop" in {
val q = quote {
qr1.sortBy(b => b.s).drop(1)
}
val n = quote {
qr1.sortBy(b => 1).sortBy(b => b.s).drop(1)
}
attachToEntity(q.ast) mustEqual n.ast
}
"distinct" in {
val q = quote {
qr1.sortBy(b => b.s).drop(1).distinct
}
val n = quote {
qr1.sortBy(b => 1).sortBy(b => b.s).drop(1).distinct
}
attachToEntity(q.ast) mustEqual n.ast
}
}
}
val iqr1 = quote {
infix"$qr1".as[Query[TestEntity]]
}
"attaches clause to the root of the query (infix)" - {
"query is the entity" in {
val n = quote {
iqr1.sortBy(x => 1)
}
attachToEntity(iqr1.ast) mustEqual n.ast
}
"query is a composition" - {
"map" in {
val q = quote {
iqr1.filter(t => t.i == 1).map(t => t.s)
}
val n = quote {
iqr1.sortBy(t => 1).filter(t => t.i == 1).map(t => t.s)
}
attachToEntity(q.ast) mustEqual n.ast
}
"flatMap" in {
val q = quote {
iqr1.filter(t => t.i == 1).flatMap(t => qr2)
}
val n = quote {
iqr1.sortBy(t => 1).filter(t => t.i == 1).flatMap(t => qr2)
}
attachToEntity(q.ast) mustEqual n.ast
}
"concatMap" in {
val q = quote {
iqr1.filter(t => t.i == 1).concatMap(t => t.s.split(" "))
}
val n = quote {
iqr1.sortBy(t => 1).filter(t => t.i == 1).concatMap(t => t.s.split(" "))
}
attachToEntity(q.ast) mustEqual n.ast
}
"filter" in {
val q = quote {
iqr1.filter(t => t.i == 1).filter(t => t.s == "s1")
}
val n = quote {
iqr1.sortBy(t => 1).filter(t => t.i == 1).filter(t => t.s == "s1")
}
attachToEntity(q.ast) mustEqual n.ast
}
"sortBy" in {
val q = quote {
iqr1.sortBy(t => t.s)
}
val n = quote {
iqr1.sortBy(t => 1).sortBy(t => t.s)
}
attachToEntity(q.ast) mustEqual n.ast
}
"take" in {
val q = quote {
iqr1.sortBy(b => b.s).take(1)
}
val n = quote {
iqr1.sortBy(b => 1).sortBy(b => b.s).take(1)
}
attachToEntity(q.ast) mustEqual n.ast
}
"drop" in {
val q = quote {
iqr1.sortBy(b => b.s).drop(1)
}
val n = quote {
iqr1.sortBy(b => 1).sortBy(b => b.s).drop(1)
}
attachToEntity(q.ast) mustEqual n.ast
}
"distinct" in {
val q = quote {
iqr1.sortBy(b => b.s).drop(1).distinct
}
val n = quote {
iqr1.sortBy(b => 1).sortBy(b => b.s).drop(1).distinct
}
attachToEntity(q.ast) mustEqual n.ast
}
}
}
"falls back to the query if it's not possible to flatten it" - {
"union" in {
val q = quote {
qr1.union(qr2)
}
val n = quote {
qr1.union(qr2).sortBy(x => 1)
}
attachToEntity(q.ast) mustEqual n.ast
}
"unionAll" in {
val q = quote {
qr1.unionAll(qr2)
}
val n = quote {
qr1.unionAll(qr2).sortBy(x => 1)
}
attachToEntity(q.ast) mustEqual n.ast
}
"outer join" in {
val q = quote {
qr1.leftJoin(qr2).on((a, b) => true)
}
val n = quote {
qr1.leftJoin(qr2).on((a, b) => true).sortBy(x => 1)
}
attachToEntity(q.ast) mustEqual n.ast
}
"groupBy.map" in {
val q = quote {
qr1.groupBy(a => a.i).map(a => 1)
}
val n = quote {
qr1.groupBy(a => a.i).map(a => 1).sortBy(x => 1)
}
attachToEntity(q.ast) mustEqual n.ast
}
}
"fails if the entity isn't found" in {
intercept[IllegalStateException] {
attachToEntity(Map(Ident("a"), Ident("b"), Ident("c")))
}
()
}
}
|
mentegy/quill
|
quill-core/src/test/scala/io/getquill/norm/AttachToEntitySpec.scala
|
Scala
|
apache-2.0
| 6,081 |
package view
import service.RequestCache
import twirl.api.Html
import util.StringUtil
trait AvatarImageProvider { self: RequestCache =>
/**
* Returns <img> which displays the avatar icon.
* Looks up Gravatar if avatar icon has not been configured in user settings.
*/
protected def getAvatarImageHtml(userName: String, size: Int,
mailAddress: String = "", tooltip: Boolean = false)(implicit context: app.Context): Html = {
val src = if(mailAddress.isEmpty){
// by user name
getAccountByUserName(userName).map { account =>
if(account.image.isEmpty && getSystemSettings().gravatar){
s"""https://www.gravatar.com/avatar/${StringUtil.md5(account.mailAddress.toLowerCase)}?s=${size}"""
} else {
s"""${context.path}/${account.userName}/_avatar"""
}
} getOrElse {
s"""${context.path}/_unknown/_avatar"""
}
} else {
// by mail address
getAccountByMailAddress(mailAddress).map { account =>
if(account.image.isEmpty && getSystemSettings().gravatar){
s"""https://www.gravatar.com/avatar/${StringUtil.md5(account.mailAddress.toLowerCase)}?s=${size}"""
} else {
s"""${context.path}/${account.userName}/_avatar"""
}
} getOrElse {
if(getSystemSettings().gravatar){
s"""https://www.gravatar.com/avatar/${StringUtil.md5(mailAddress.toLowerCase)}?s=${size}"""
} else {
s"""${context.path}/_unknown/_avatar"""
}
}
}
if(tooltip){
Html(s"""<img src="${src}" class="avatar" style="width: ${size}px; height: ${size}px;" data-toggle="tooltip" title="${userName}"/>""")
} else {
Html(s"""<img src="${src}" class="avatar" style="width: ${size}px; height: ${size}px;" />""")
}
}
}
|
chu888chu888/gitbucket
|
src/main/scala/view/AvatarImageProvider.scala
|
Scala
|
apache-2.0
| 1,810 |
package controllers
import play.api.mvc._
import play.api.libs.json.Json
import play.api.libs.concurrent.Execution.Implicits._
import models._
import reactivemongo.bson.BSONObjectID
import scala.concurrent.Future
import org.joda.time.DateTime
import org.joda.time.DateTimeZone.UTC
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import reactivemongo.api._
import reactivemongo.api.collections.default.BSONCollection
import reactivemongo.bson.{BSONDateTime, BSONDocument, BSONDocumentReader, BSONDocumentWriter, BSONObjectID}
import reactivemongo.core.commands.LastError
object PhantomController extends Controller {
val mongoDriver = new MongoDriver
val mongoConnection = mongoDriver.connection(List("localhost"))
val mongoDb = mongoConnection("matt")
val collection : BSONCollection = mongoDb.collection("phantoms")
def index = Action {
Ok(views.html.index())
}
def show(example: String) = Action { implicit request =>
example match {
case "cat" => Ok(views.html.cat())
case "click" => Ok(views.html.click())
case "circle1" => Ok(views.html.circle1())
case "circle2" => Ok(views.html.circle2())
case _ => Ok(views.html.index())
}
}
def edit(id: String) = Action.async {
val selector = BSONDocument("_id" -> id)
val foundPhantom = collection.find(selector).one[Phantom]
foundPhantom.map { phantom =>
phantom match {
case Some(p) => Ok(views.html.edit(p))
case None => Redirect(routes.PhantomController.index)
}
}
}
def update (id: String) = Action { implicit request =>
val params = request.body.asFormUrlEncoded.get
val message = params("message")(0).toString
val selector = BSONDocument("_id" -> id)
val modifier = BSONDocument(
"$set" -> BSONDocument(
"message" -> message,
"whenUpdated" -> BSONDateTime(DateTime.now(UTC).getMillis)
)
)
val futureUpdate = collection.update(selector, modifier, multi = false)
Redirect(routes.PhantomController.index)
}
def delete (id: String) = Action {
val selector = BSONDocument("_id" -> BSONObjectID(id))
collection.remove(selector, firstMatchOnly = true)
Redirect(routes.PhantomController.index)
}
}
|
marinatedpork/pixi
|
app/controllers/PhantomController.scala
|
Scala
|
mit
| 2,212 |
/*
* Copyright 2013 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.component
import simx.core.entity.Entity
import simx.core.entity.component._
import simx.core.entity.typeconversion.ProvideConversionInfo
import simx.core.entity.description.{SValSet, EntityAspect}
import simx.core.ontology.{types, Symbols}
import simx.core.component.remote.{RemoteActor, Start}
import simx.core.svaractor.{TimedRingBuffer, SVarActor}
import simx.core.svaractor.TimedRingBuffer.{Unbuffered, BufferMode}
import collection.mutable
import java.util.UUID
protected[core] trait ComponentCreation extends SVarActor with EntityCreationHandling with StackableEntityConfigLayer {
private val openCreateRequests = mutable.Map[Entity, (SVarActor.Ref, List[(ProvideConversionInfo[_ , _], BufferMode)])]()
provideInitialValuesFor{
case (toProvide, aspect, e, given) if aspect.semanticsEqual(Symbols.component) =>
aspect match {
case InnerComponentAspect(c : RemoteComponentAspect[_]) =>
ask[SVarActor.Ref](RemoteActor.self, Start(c.nodeName.get, c)){ requestConfig(_, c, e) }
case InnerComponentAspect(c : ComponentAspect[_]) =>
requestConfig(createActor(c.props){ _ => {}}(_.printStackTrace()), c, e)
case _ => throw new Exception("invalid aspect " + aspect)
}
}
private def requestConfig(component : SVarActor.Ref, c : ComponentAspect[_], e : Entity){
ask[SValSet](component, GetInitialConfigValuesMsg(UUID.randomUUID(), c, e)){ set =>
val toCreate = set.values.flatMap( _.map( _.asProvide.wrapped -> TimedRingBuffer.defaultMode) )
if (toCreate.nonEmpty)
openCreateRequests.update(e, component -> toCreate.toList)
provideInitialValues(e, SValSet( types.Component(component), types.Name(c.cName.name) ) )
}
}
override protected def injectSVarCreation(entity : Entity) =
if (openCreateRequests contains entity) openCreateRequests(entity) :: Nil else Nil
protected def entityConfigComplete(e: Entity, aspect: EntityAspect){
openCreateRequests remove e
}
}
|
simulator-x/core
|
src/simx/core/component/ComponentCreation.scala
|
Scala
|
apache-2.0
| 2,867 |
package debop4s.data.orm.jpa.mysql.repository
import javax.persistence.QueryHint
import debop4s.data.orm.jpa.ScalaJpaEntity2
import org.springframework.data.jpa.repository.{JpaRepository, Query, QueryHints}
import org.springframework.data.repository.query.Param
import org.springframework.stereotype.Repository
import org.springframework.transaction.annotation.Transactional
@Repository
trait ScalaJpaEntity2Repository extends JpaRepository[ScalaJpaEntity2, java.lang.Long] {
@Transactional(readOnly = true)
@Query("select x from ScalaJpaEntity2 x where x.id = :id")
@QueryHints(value = Array(new QueryHint(name = "org.hibernate.readOnly", value = "true")), forCounting = false)
def findById(@Param("id") id: java.lang.Long): ScalaJpaEntity2
}
|
debop/debop4s
|
debop4s-data-orm/src/test/scala/debop4s/data/orm/jpa/mysql/repository/ScalaJpaEntity2Repository.scala
|
Scala
|
apache-2.0
| 757 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.kafka010
import org.apache.kafka.common.security.auth.SecurityProtocol.{SASL_SSL, SSL}
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.{SparkConf, SparkFunSuite}
class KafkaTokenSparkConfSuite extends SparkFunSuite with BeforeAndAfterEach {
private val identifier1 = "cluster1"
private val identifier2 = "cluster2"
private val authBootStrapServers = "127.0.0.1:0"
private val targetServersRegex = "127.0.0.*:0"
private val securityProtocol = SSL.name
private val kerberosServiceName = "kafka1"
private val trustStoreType = "customTrustStoreType"
private val trustStoreLocation = "/path/to/trustStore"
private val trustStorePassword = "trustStoreSecret"
private val keyStoreType = "customKeyStoreType"
private val keyStoreLocation = "/path/to/keyStore"
private val keyStorePassword = "keyStoreSecret"
private val keyPassword = "keySecret"
private val tokenMechanism = "SCRAM-SHA-256"
private var sparkConf: SparkConf = null
override def beforeEach(): Unit = {
super.beforeEach()
sparkConf = new SparkConf()
}
test("getClusterConfig should trow exception when not exists") {
val thrown = intercept[NoSuchElementException] {
KafkaTokenSparkConf.getClusterConfig(sparkConf, "invalid")
}
assert(thrown.getMessage contains "spark.kafka.clusters.invalid.auth.bootstrap.servers")
}
test("getClusterConfig should return entry with defaults") {
sparkConf.set(s"spark.kafka.clusters.$identifier1.auth.bootstrap.servers", authBootStrapServers)
val clusterConfig = KafkaTokenSparkConf.getClusterConfig(sparkConf, identifier1)
assert(clusterConfig.identifier === identifier1)
assert(clusterConfig.authBootstrapServers === authBootStrapServers)
assert(clusterConfig.targetServersRegex === KafkaTokenSparkConf.DEFAULT_TARGET_SERVERS_REGEX)
assert(clusterConfig.securityProtocol === SASL_SSL.name)
assert(clusterConfig.kerberosServiceName ===
KafkaTokenSparkConf.DEFAULT_SASL_KERBEROS_SERVICE_NAME)
assert(clusterConfig.trustStoreType === None)
assert(clusterConfig.trustStoreLocation === None)
assert(clusterConfig.trustStorePassword === None)
assert(clusterConfig.keyStoreType === None)
assert(clusterConfig.keyStoreLocation === None)
assert(clusterConfig.keyStorePassword === None)
assert(clusterConfig.keyPassword === None)
assert(clusterConfig.tokenMechanism === KafkaTokenSparkConf.DEFAULT_SASL_TOKEN_MECHANISM)
}
test("getClusterConfig should return entry overwrite defaults") {
sparkConf.set(s"spark.kafka.clusters.$identifier1.auth.bootstrap.servers", authBootStrapServers)
sparkConf.set(s"spark.kafka.clusters.$identifier1.target.bootstrap.servers.regex",
targetServersRegex)
sparkConf.set(s"spark.kafka.clusters.$identifier1.security.protocol", securityProtocol)
sparkConf.set(s"spark.kafka.clusters.$identifier1.sasl.kerberos.service.name",
kerberosServiceName)
sparkConf.set(s"spark.kafka.clusters.$identifier1.ssl.truststore.type", trustStoreType)
sparkConf.set(s"spark.kafka.clusters.$identifier1.ssl.truststore.location", trustStoreLocation)
sparkConf.set(s"spark.kafka.clusters.$identifier1.ssl.truststore.password", trustStorePassword)
sparkConf.set(s"spark.kafka.clusters.$identifier1.ssl.keystore.type", keyStoreType)
sparkConf.set(s"spark.kafka.clusters.$identifier1.ssl.keystore.location", keyStoreLocation)
sparkConf.set(s"spark.kafka.clusters.$identifier1.ssl.keystore.password", keyStorePassword)
sparkConf.set(s"spark.kafka.clusters.$identifier1.ssl.key.password", keyPassword)
sparkConf.set(s"spark.kafka.clusters.$identifier1.sasl.token.mechanism", tokenMechanism)
val clusterConfig = KafkaTokenSparkConf.getClusterConfig(sparkConf, identifier1)
assert(clusterConfig.identifier === identifier1)
assert(clusterConfig.authBootstrapServers === authBootStrapServers)
assert(clusterConfig.targetServersRegex === targetServersRegex)
assert(clusterConfig.securityProtocol === securityProtocol)
assert(clusterConfig.kerberosServiceName === kerberosServiceName)
assert(clusterConfig.trustStoreType === Some(trustStoreType))
assert(clusterConfig.trustStoreLocation === Some(trustStoreLocation))
assert(clusterConfig.trustStorePassword === Some(trustStorePassword))
assert(clusterConfig.keyStoreType === Some(keyStoreType))
assert(clusterConfig.keyStoreLocation === Some(keyStoreLocation))
assert(clusterConfig.keyStorePassword === Some(keyStorePassword))
assert(clusterConfig.keyPassword === Some(keyPassword))
assert(clusterConfig.tokenMechanism === tokenMechanism)
}
test("getClusterConfig should return specified kafka params") {
sparkConf.set(s"spark.kafka.clusters.$identifier1.auth.bootstrap.servers", authBootStrapServers)
sparkConf.set(s"spark.kafka.clusters.$identifier1.kafka.customKey", "customValue")
val clusterConfig = KafkaTokenSparkConf.getClusterConfig(sparkConf, identifier1)
assert(clusterConfig.identifier === identifier1)
assert(clusterConfig.authBootstrapServers === authBootStrapServers)
assert(clusterConfig.specifiedKafkaParams === Map("customKey" -> "customValue"))
}
test("getAllClusterConfigs should return empty list when nothing configured") {
assert(KafkaTokenSparkConf.getAllClusterConfigs(sparkConf).isEmpty)
}
test("getAllClusterConfigs should return empty list with malformed configuration") {
sparkConf.set(s"spark.kafka.clusters.", authBootStrapServers)
assert(KafkaTokenSparkConf.getAllClusterConfigs(sparkConf).isEmpty)
}
test("getAllClusterConfigs should return multiple entries") {
sparkConf.set(s"spark.kafka.clusters.$identifier1.auth.bootstrap.servers", authBootStrapServers)
sparkConf.set(s"spark.kafka.clusters.$identifier2.auth.bootstrap.servers", authBootStrapServers)
val clusterConfigs = KafkaTokenSparkConf.getAllClusterConfigs(sparkConf)
assert(clusterConfigs.size === 2)
clusterConfigs.foreach { clusterConfig =>
assert(clusterConfig.authBootstrapServers === authBootStrapServers)
assert(clusterConfig.targetServersRegex === KafkaTokenSparkConf.DEFAULT_TARGET_SERVERS_REGEX)
assert(clusterConfig.securityProtocol === SASL_SSL.name)
assert(clusterConfig.kerberosServiceName ===
KafkaTokenSparkConf.DEFAULT_SASL_KERBEROS_SERVICE_NAME)
assert(clusterConfig.trustStoreType === None)
assert(clusterConfig.trustStoreLocation === None)
assert(clusterConfig.trustStorePassword === None)
assert(clusterConfig.keyStoreType === None)
assert(clusterConfig.keyStoreLocation === None)
assert(clusterConfig.keyStorePassword === None)
assert(clusterConfig.keyPassword === None)
assert(clusterConfig.tokenMechanism === KafkaTokenSparkConf.DEFAULT_SASL_TOKEN_MECHANISM)
}
}
}
|
maropu/spark
|
external/kafka-0-10-token-provider/src/test/scala/org/apache/spark/kafka010/KafkaTokenSparkConfSuite.scala
|
Scala
|
apache-2.0
| 7,681 |
package controllers.techsupport
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
/**
* Created by hooxin on 15-2-23.
*/
object Tracking extends Controller{
def add(id:Long) = TODO
def remove(id:Long) =TODO
def update(id:Long) = TODO
def get(id:Long) = TODO
def list = TODO
}
|
firefoxmmx2/techsupport_ext4_scala
|
app/controllers/techsupport/Tracking.scala
|
Scala
|
apache-2.0
| 313 |
package fpinscala
import org.scalatest.Matchers
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.FlatSpec
trait SpecBase extends FlatSpec with Matchers with GeneratorDrivenPropertyChecks {
}
|
castle8080/fpinscala
|
exercises/src/test/scala/fpinscala/SpecBase.scala
|
Scala
|
mit
| 223 |
package au.csiro.ict
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTimeZone, DateTime}
object Utils {
val TIMESTAMP_YYYYD_FORMAT = DateTimeFormat.forPattern("yyyyD").withZoneUTC()
val yyyyFormat = DateTimeFormat.forPattern("yyyy").withZoneUTC()
val yyyyWWFormat = DateTimeFormat.forPattern("yyyyww").withZoneUTC()
val yyyyMMFormat = DateTimeFormat.forPattern("yyyyMM").withZoneUTC()
val yyyyDDDFormat = DateTimeFormat.forPattern("yyyyDDD").withZoneUTC()
val yyyyDDDHHFormat = DateTimeFormat.forPattern("yyyyDDDHH").withZoneUTC()
val yyyyDDDHHMMFormat = DateTimeFormat.forPattern("yyyyDDDHHmm").withZoneUTC()
val ukDateTimeFormat = DateTimeFormat.forPattern("dd-MM-yyyy'T'HH:mm:ss").withZoneUTC()
val ukDateFormat = DateTimeFormat.forPattern("dd-MM-yyyy").withZoneUTC()
val TimeParser = DateTimeFormat.forPattern("HH:mm:ss").withZoneUTC()
val zoneUTC = DateTimeZone.UTC
val SEPARATOR = '$'
def uuid() = java.util.UUID.randomUUID().toString
DateTimeZone.setDefault(zoneUTC)
def generateRowKey(sensor:String, tsInSeconds:Int) = sensor+"$"+Utils.yyyyDDDFormat.print(tsInSeconds*1000L)
def parseRowKey(rowKey:String):(String,Int) = {
val Array(sid,dayInyyyyDDD)=rowKey.split("$")
sid -> (yyyyDDDFormat.parseDateTime(dayInyyyyDDD).getMillis/1000L).asInstanceOf[Int]
}
def generateNidStreamDayKey(nid:String, streamDayKey:String) = nid+"@"+streamDayKey
val TOKEN_LEN = Utils.uuid().length
val KeyPattern = ("[a-zA-Z0-9\\\\-]{"+TOKEN_LEN+"}").r.pattern
def keyPatternMatcher(s:String) = KeyPattern.matcher(s).matches
def inputQueueIdFor(nId:String,streamId:String)= "q@"+nId+"@"+streamId
def dateTimeToInt(ts:DateTime):Int=(ts.getMillis/1000L).asInstanceOf[Int]
}
|
sensorgroup/sensordb
|
src/main/scala/au/csiro/ict/Helpers.scala
|
Scala
|
mpl-2.0
| 1,735 |
package xyz.hyperreal.typesetter
import scala.swing._
import java.awt.RenderingHints._
import java.awt.Color._
import java.awt.Font._
import java.awt.geom._
import java.io._
object TypesetterTest extends MainFrame with App
{
val t = new Typesetter
val fs = System.getProperties.getProperty( "file.separator" )
val home = System.getProperties.getProperty( "user.home" )
val fonts = home + fs + "Dropbox" + fs + "Typography" + fs + "Fonts" + fs
// println( t.dimen("2parindent") )
// t.font( Font(null, fonts + "GentiumPlus-1.510" + fs + "GentiumPlus-R.ttf", "plain", 30) )
// t.vbox
// t.hbox
// t text "line 1"
// t.end
// t.vskip( 5, 0, 0 )
// t.hrule( 1 )
// t.vskip( 5, 0, 0 )
// t.hbox
// t text "line 2"
// t.end
t.vertical
t text "framed"
t.space
t.frame( "text items", 3, 1 )
t.space
t shift ("is", -5)
t.hrule( 10, 0, 5 )
t.space
t.list
t text "/"
t.rlap( t.arg )
t text "="
t.space
t.underline( "cool" )
t.par
//
// t text "moved"
// t.space
// t.vbox
// t.hbox( To(100) )
// t text "text 1"
// t.end
// t.hbox
// t text "text 2"
// t.move( t.box, 30 )
// t.end
// t.space
// t text "wow"
// t.par
//
// t text "rounded"
// t.space
// t add new ShapeBox( new RoundRectangle2D.Double(0, 0, 50, 25, 20, 20), false, t.color, t.alpha, "Box", 0 )
// t.space
// t text "rectangle"
// t.par
//
// t vskip 5
// t hrule 1
// t vskip 5
t.raggedright
t text "This is a very very very very very very very very very very very very very very very very very very very very boring test."
t.par
//
// t.vbox
// t.variable( 'hsize, 600 )
// t.hbox
// t text "top line"
// t.end
// t text "This is a very very very very very very very very very very very very very very very very very very very boring test."
// t.par
// t.hbox
// t text "bottom line"
// t.end
// t.end
//
// t.list
// t text "1."
// t.item( t.arg )
// t text "first item ljksdfljk fdsjkl fd lkjfdsjkl fd jlkfsdl jkfdsl jkfa ljkfd ljkfds ljkfds jlkf jlkfds jfds"
//
// t.list
// t text "a)"
// t.itemitem( t.arg )
// t text "first sub-item ljksdfljk fdsjkl fd lkjfdsjkl fd jlkfsdl jkfdsl jkfa ljkfd ljkfds ljkfds jlkf jlkfds jfds"
//
// t.list
// t text "b)"
// t.itemitem( t.arg )
// t text "second sub-item ljksdfljk fdsjkl fd lkjfdsjkl fd jlkfsdl jkfdsl jkfa ljkfd ljkfds ljkfds jlkf jlkfds jfds"
//
// t.list
// t text "2."
// t.item( t.arg )
// t text "second item ljksdfljk fdsjkl fd lkjfdsjkl fd jlkfsdl jkfdsl jkfa ljkfd ljkfds ljkfds jlkf jlkfds jfds"
//
// t text """
// You don't know about me without you have read a book by the name of
// ``The Adventures of Tom Sawyer;'' but that ain't no matter. That book
// was made by Mr. Mark Twain, and he told the truth, mainly. There was
// things which he stretched, but mainly he told the truth. That is
// nothing. I never seen anybody but lied one time or another, without it
// was Aunt Polly, or the widow, or maybe Mary. Aunt Polly---Tom's Aunt
// Polly, she is---and Mary, and the Widow Douglas is all told about in
// that book, which is mostly a true book, with some stretchers, as I
// said before.
// """
t.par
t.vfil
// t.draw( None )
//
// val path = new Path2D.Double
//
// path moveTo (0, 0)
// path lineTo (10, 10)
// path lineTo (20, -10)
//
// t.string( "0" )
// t.string( "0" )
// t.string( "org" )
// t += new ShapeBox( path, false, t.color, t.alpha, t.stroke, "", 0 )
// t.end
//
// t.rectangle( 50, 50, false )
// t.text( "f ff \\ufb00" )
val p = t box
contents =
new Panel
{
background = WHITE
preferredSize = new Dimension( 1280, 600 )
override def paint( g: Graphics2D ) = {
super.paint( g )
g.setRenderingHint( KEY_ANTIALIASING, VALUE_ANTIALIAS_ON )
p.draw( g, 0, 0 )
// p.box( g, MARGIN, MARGIN, CYAN )
}
}
visible = true
}
|
edadma/typesetter
|
src/main/scala/TypesetterTest.scala
|
Scala
|
mit
| 3,819 |
// Copyright 2012 Twitter, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.twitter.cassie
import org.apache.cassandra.finagle.thrift
/**
* The level of consistency required for a read operation.
*/
sealed case class ReadConsistency(level: thrift.ConsistencyLevel) {
override def toString = "ReadConsistency." +
level.toString.toLowerCase.capitalize
}
object ReadConsistency {
/**
* Will return the record returned by the first node to respond. A consistency
* check is sometimes done in a background thread to fix any consistency issues
* when ReadConsistency.One is used (see read_repair_chance in cassandra). This
* means eventuall subsequent calls will have correct data even if the initial read gets
* an older value. (This is called read repair.)
*/
val One = ReadConsistency(thrift.ConsistencyLevel.ONE)
/**
* Will query all nodes and return the record with the most recent timestamp
* once it has at least a majority of replicas reported. Again, the remaining
* replicas will be checked in the background.
*/
val Quorum = ReadConsistency(thrift.ConsistencyLevel.QUORUM)
/**
* Returns the record with the most recent timestamp once a majority of replicas within
* the local datacenter have replied. Requres NetworkTopologyStrategy on the server side.
*/
val LocalQuorum = ReadConsistency(thrift.ConsistencyLevel.LOCAL_QUORUM)
/**
* Returns the record with the most recent timestamp once a majority of replicas within
* each datacenter have replied.
*/
val EachQuorum = ReadConsistency(thrift.ConsistencyLevel.EACH_QUORUM)
/**
* Will query all nodes and return the record with the most recent timestamp
* once all nodes have replied. Any unresponsive nodes will fail the
* operation.
*/
val All = ReadConsistency(thrift.ConsistencyLevel.ALL)
}
|
travisbrown/zipkin
|
zipkin-cassandra/src/main/scala/com/twitter/cassie/ReadConsistency.scala
|
Scala
|
apache-2.0
| 2,363 |
package benchmarks.philosophers
import benchmarks.philosophers.PhilosopherTable._
import rescala.core.{Engine, Struct}
import rescala.reactives.{Signals, Var}
class DynamicPhilosopherTable[S <: Struct](philosopherCount: Int, work: Long)(override implicit val engine: Engine[S]) extends PhilosopherTable(philosopherCount, work)(engine) {
import engine.Signal
override def createTable(tableSize: Int): Seq[Seating[S]] = {
def mod(n: Int): Int = (n + tableSize) % tableSize
val phils = for (i <- 0 until tableSize) yield Var[Philosopher, S](Thinking)
val forks = for (i <- 0 until tableSize) yield {
val nextCircularIndex = mod(i + 1)
Signal {
phils(i)() match {
case Eating => Taken(i.toString)
case Thinking =>
phils(nextCircularIndex)() match {
case Eating => Taken(nextCircularIndex.toString)
case Thinking => Free
}
}
}
}
for (i <- 0 until tableSize) yield {
val ownName = i.toString
val vision = Signal {
forks(i)() match {
case Taken(name) if name != ownName => BlockedBy(name)
case Taken(`ownName`) => Done
case Free => forks(mod(i - 1))() match {
case Free => Ready
case Taken(name) => BlockedBy(name)
}
}
}
Seating(i, phils(i), forks(i), forks(mod(i - 1)), vision)
}
}
}
class HalfDynamicPhilosopherTable[S <: Struct](philosopherCount: Int, work: Long)(override implicit val engine: Engine[S]) extends PhilosopherTable(philosopherCount, work)(engine) {
import engine.Signal
override def createTable(tableSize: Int): Seq[Seating[S]] = {
def mod(n: Int): Int = (n + tableSize) % tableSize
val phils = for (i <- 0 until tableSize) yield Var[Philosopher, S](Thinking)
val forks = for (i <- 0 until tableSize) yield {
val nextCircularIndex = mod(i + 1)
Signals.lift(phils(i), phils(nextCircularIndex))(calcFork(i.toString, nextCircularIndex.toString))
}
for (i <- 0 until tableSize) yield {
val ownName = i.toString
val vision = Signal {
forks(i)() match {
case Taken(name) if name != ownName => BlockedBy(name)
case Taken(`ownName`) => Done
case Free => forks(mod(i - 1))() match {
case Free => Ready
case Taken(name) => BlockedBy(name)
}
}
}
Seating(i, phils(i), forks(i), forks(mod(i - 1)), vision)
}
}
}
class OtherHalfDynamicPhilosopherTable[S <: Struct](philosopherCount: Int, work: Long)(override implicit val engine: Engine[S]) extends PhilosopherTable(philosopherCount, work)(engine) {
import engine.Signal
override def createTable(tableSize: Int): Seq[Seating[S]] = {
def mod(n: Int): Int = (n + tableSize) % tableSize
val phils = for (i <- 0 until tableSize) yield Var[Philosopher, S](Thinking)
val forks = for (i <- 0 until tableSize) yield {
val nextCircularIndex = mod(i + 1)
Signal {
phils(i)() match {
case Eating => Taken(i.toString)
case Thinking =>
phils(nextCircularIndex)() match {
case Eating => Taken(nextCircularIndex.toString)
case Thinking => Free
}
}
}
}
for (i <- 0 until tableSize) yield {
val vision = Signals.lift(forks(i), forks(mod(i - 1)))(calcVision(i.toString))
Seating(i, phils(i), forks(i), forks(mod(i - 1)), vision)
}
}
}
|
volkc/REScala
|
Research/Microbenchmarks/src/main/scala/benchmarks/philosophers/DynamicPhilosopherTable.scala
|
Scala
|
apache-2.0
| 3,523 |
package com.twitter.finagle.http
import com.twitter.conversions.StorageUnitOps._
import com.twitter.conversions.DurationOps._
import com.twitter.finagle
import com.twitter.finagle.{Service, ServiceFactory}
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.http2.RstException
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.service.ServiceFactoryRef
import com.twitter.finagle.util.DefaultTimer
import com.twitter.io.Buf
import com.twitter.util._
import io.netty.handler.codec.http2.Http2CodecUtil
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.ArrayBuffer
abstract class AbstractH2CEndToEndTest extends AbstractHttp2EndToEndTest {
def clientImpl(): finagle.Http.Client =
finagle.Http.client.withHttp2
.withStatsReceiver(statsRecv)
def serverImpl(): finagle.Http.Server = finagle.Http.server.withHttp2
// Stats test requires examining the upgrade itself.
private[this] val ShouldUpgrade = Contexts.local.newKey[Boolean]()
/**
* The client and server start with the plain-text upgrade so the first request
* is actually a HTTP/1.x request and all subsequent requests are legit HTTP/2, so, we
* fire a throw-away request first so we are testing a real HTTP/2 connection.
*/
override def initClient(client: HttpService): Unit = {
if (Contexts.local.get(ShouldUpgrade).getOrElse(true)) {
val request = Request("/")
await(client(request))
eventually {
assert(statsRecv.counters(Seq("client", "requests")) == 1L)
}
statsRecv.clear()
}
}
override def initService: HttpService = Service.mk { _: Request => Future.value(Response()) }
def featureImplemented(feature: Feature): Boolean = true
test("Upgrade stats are properly recorded") {
Contexts.local.let(ShouldUpgrade, false) {
val client = nonStreamingConnect(Service.mk { _: Request => Future.value(Response()) })
await(client(Request("/"))) // Should be an upgrade request
assert(statsRecv.counters(Seq("client", "upgrade", "success")) == 1)
assert(statsRecv.counters(Seq("server", "upgrade", "success")) == 1)
await(client.close())
}
}
test("Upgrade ignored") {
val req = Request(Method.Post, "/")
req.contentString = "body"
Contexts.local.let(ShouldUpgrade, false) {
val client = nonStreamingConnect(Service.mk { _: Request => Future.value(Response()) })
await(client(req))
// Should have been ignored by upgrade mechanisms since the request has a body
assert(statsRecv.counters(Seq("client", "upgrade", "ignored")) == 1)
// Should still be zero since the client didn't attempt the upgrade at all
assert(statsRecv.counters(Seq("server", "upgrade", "ignored")) == 0)
await(client.close())
}
Contexts.local.let(ShouldUpgrade, false) {
val client = nonStreamingConnect(Service.mk { _: Request => Future.value(Response()) })
// Spoof the upgrade: the client won't attempt it but the Upgrade header should
// still cause the server to consider it an upgrade request and tick the counter.
req.headerMap.set(Fields.Upgrade, "h2c")
await(client(req))
assert(statsRecv.counters(Seq("client", "upgrade", "ignored")) == 2)
assert(statsRecv.counters(Seq("server", "upgrade", "ignored")) == 1)
await(client.close())
}
}
// TODO: Consolidate behavior between h1 and h2
// note that this behavior is implementation-dependent
// the spec says MaxHeaderListSize is advisory
test("Server sets & enforces MaxHeaderSize") {
val server = serverImpl()
.withMaxHeaderSize(1.kilobyte)
.serve("localhost:*", initService)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
initClient(client)
val req = Request("/")
req.headerMap.set("foo", "*" * 2.kilobytes.bytes.toInt)
assert(await(client(req)).status == Status.RequestHeaderFieldsTooLarge)
await(client.close())
await(server.close())
}
test("H1 related connection headers are stripped") {
val connectionHeaders = Seq(
"Keep-Alive",
"Proxy-Connection",
"Upgrade",
"Transfer-Encoding",
"TE",
"custom1",
"custom2"
)
val client = nonStreamingConnect(Service.mk { req: Request =>
val res = Response()
connectionHeaders.foreach(res.headerMap.add(_, "bad"))
res.headerMap.add("Connection", "custom1")
res.headerMap.add("Connection", "custom2")
res.headerMap.add("ok-header", ":)")
Future.value(res)
})
val rh = await(client(Request("/"))).headerMap
connectionHeaders.foreach { header => assert(rh.get(header).isEmpty) }
assert(rh.get("ok-header").get == ":)")
}
test("The TE header is allowed if its value is trailers") {
val client = nonStreamingConnect(Service.mk { _: Request =>
val res = Response()
res.headerMap.add("TE", "trailers")
Future.value(res)
})
val rh = await(client(Request("/"))).headerMap
assert(rh.get("TE").get == "trailers")
}
if (!sys.props.contains("SKIP_FLAKY_TRAVIS"))
test("The upgrade request is ineligible for flow control") {
val server = serverImpl()
.withMaxHeaderSize(1.kilobyte)
.serve(
"localhost:*",
Service.mk[Request, Response] { _ =>
// we need to make this slow or else it'll race the window updating
Future.sleep(50.milliseconds)(DefaultTimer).map(_ => Response())
}
)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val request = Request(Method.Post, "/")
// send a request that the client *should* have fragmented if it was
// sending an http/2 message
request.content = Buf.Utf8("*" * 70000)
// check that this doesn't throw an exception
val rep = await(client(request))
assert(rep.status == Status.Ok)
}
test("Upgrades to HTTP/2 only if both have the toggle on, and it's H2C, not H2") {
for {
clientUseHttp2 <- Seq(true, false)
serverUseHttp2 <- Seq(true, false)
} {
val sr = new InMemoryStatsReceiver()
val server = {
val srv = finagle.Http.server
.withStatsReceiver(sr)
.withLabel("server")
(if (serverUseHttp2) srv else srv.withNoHttp2).serve("localhost:*", initService)
}
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = {
val clnt = finagle.Http.client
.withStatsReceiver(sr)
(if (clientUseHttp2) clnt else clnt.withNoHttp2)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
}
val rep = client(Request("/"))
await(rep)
if (clientUseHttp2 && serverUseHttp2) {
assert(
sr.counters.get(Seq("client", "upgrade", "success")) == Some(1),
"Failed to upgrade when both parties were toggled on"
)
assert(
sr.counters.get(Seq("server", "upgrade", "success")) == Some(1),
"Failed to upgrade when both parties were toggled on"
)
} else {
val clientStatus = if (clientUseHttp2) "on" else "off"
val serverStatus = if (serverUseHttp2) "on" else "off"
val errorMsg = s"Upgraded when the client was $clientStatus, the server was " +
s"$serverStatus"
val clientSuccess = sr.counters.get(Seq("client", "upgrade", "success"))
assert(clientSuccess.isEmpty || clientSuccess.contains(0), errorMsg)
val serverSuccess = sr.counters.get(Seq("server", "upgrade", "success"))
assert(serverSuccess.isEmpty || serverSuccess.contains(0), errorMsg)
}
await(Closable.all(client, server).close())
}
}
test("Configuration params take precedence over the defaults for the client") {
for {
clientUseHttp2 <- Seq(true, false)
} {
val sr = new InMemoryStatsReceiver()
val server = serverImpl
.withStatsReceiver(sr)
.withLabel("server")
.serve("localhost:*", initService)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = {
val c = finagle.Http.client
.withStatsReceiver(sr)
(if (clientUseHttp2) c.withHttp2
else c.withNoHttp2)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
}
val rep = client(Request("/"))
await(rep)
if (clientUseHttp2) {
assert(
sr.counters.get(Seq("client", "upgrade", "success")) == Some(1),
"Failed to upgrade when both parties were on"
)
assert(
sr.counters.get(Seq("server", "upgrade", "success")) == Some(1),
"Failed to upgrade when both parties were on"
)
} else {
assert(!sr.counters.contains(Seq("client", "upgrade", "success")))
val serverSuccess = sr.counters.get(Seq("server", "upgrade", "success"))
assert(serverSuccess.isEmpty || serverSuccess.contains(0L))
}
await(Closable.all(client, server).close())
}
}
test("Configuration params take precedence over the defaults for the server") {
for {
serverUseHttp2 <- Seq(true, false)
} {
val sr = new InMemoryStatsReceiver()
val server = {
val s = finagle.Http.server
.withStatsReceiver(sr)
.withLabel("server")
(if (serverUseHttp2) s.withHttp2
else s.withNoHttp2)
.serve("localhost:*", initService)
}
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStatsReceiver(sr)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val rep = client(Request("/"))
await(rep)
if (serverUseHttp2) {
assert(
sr.counters.get(Seq("client", "upgrade", "success")) == Some(1),
"Failed to upgrade when both parties were on"
)
assert(
sr.counters.get(Seq("server", "upgrade", "success")) == Some(1),
"Failed to upgrade when both parties were on"
)
} else {
assert(sr.counters(Seq("client", "upgrade", "success")) == 0)
assert(!sr.counters.contains(Seq("server", "upgrade", "success")))
}
await(Closable.all(client, server).close())
}
}
test("We delete the HTTP2-SETTINGS header properly") {
@volatile var headers: HeaderMap = null
val server = serverImpl().serve(
"localhost:*",
Service.mk { req: Request =>
headers = req.headerMap
Future.value(Response())
})
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl().newService("%s:%d".format(addr.getHostName, addr.getPort), "client")
await(client(Request("/")))
assert(!headers.contains(Http2CodecUtil.HTTP_UPGRADE_SETTINGS_HEADER.toString))
}
private final class Srv extends Closable {
val responses = new ArrayBuffer[Promise[Response]]
private[this] val ref = new ServiceFactoryRef(ServiceFactory.const(initService))
private[this] val service = Service.mk[Request, Response] { _ =>
_pending.incrementAndGet()
val p = new Promise[Response]
responses.append(p)
p.ensure(_pending.decrementAndGet())
}
private[this] val _pending = new AtomicInteger
private[this] val _ls = finagle.Http.server.withHttp2
.withLabel("server")
.serve("localhost:*", ref)
def pending(): Int = _pending.get()
def startProcessing(idx: Int) = responses(idx).setValue(Response())
def boundAddr = _ls.boundAddress.asInstanceOf[InetSocketAddress]
def close(deadline: Time): Future[Unit] = _ls.close(deadline)
def upgrade(svc: Service[Request, Response]): Unit = {
initClient(svc)
ref() = ServiceFactory.const(service)
}
}
test("draining servers process pending requests") {
val srv = new Srv
val dest = s"${srv.boundAddr.getHostName}:${srv.boundAddr.getPort}"
val client =
finagle.Http.client.withHttp2
.withStatsReceiver(statsRecv)
.newService(dest, "client")
srv.upgrade(client)
// dispatch a request that will be pending when the
// server shutsdown.
val pendingReply = client(Request("/"))
while (srv.pending() != 1) { Thread.sleep(100) }
// shutdown server w/ grace period
srv.close(10.minutes)
// new connection attempt fails
val rep2 = client(Request("/"))
Await.ready(rep2, 30.seconds)
assert(rep2.poll.get.isThrow)
srv.startProcessing(0)
// pending request is finally successfully processed
Await.ready(pendingReply, 30.seconds)
pendingReply.poll.get match {
case Return(resp) => assert(resp.status == Status.Ok)
case Throw(t) => fail("didn't expect pendingReply to fail", t)
}
}
test("illegal headers produce a non-zero error code on the client") {
val srv = serverImpl()
.serve("localhost:*", Service.mk[Request, Response](_ => Future.value(Response())))
val bound = srv.boundAddress.asInstanceOf[InetSocketAddress]
val dest = s"${bound.getHostName}:${bound.getPort}"
val client = clientImpl().newService(dest, "client")
val req = new Request.Proxy {
val underlying = Request("/")
def request: Request = underlying
}
initClient(client)
// this sends illegal pseudo headers to the server, it should reject them with a non-zero
// error code.
req.headerMap.setUnsafe(":invalid", "foo")
val rep = client(req)
val error = intercept[RstException] {
Await.result(rep, 5.seconds)
}
assert(error.errorCode != 0) // assert that this was not an error-free rejection
}
}
|
twitter/finagle
|
finagle-http/src/test/scala/com/twitter/finagle/http/AbstractH2CEndToEndTest.scala
|
Scala
|
apache-2.0
| 13,967 |
/*
* Copyright 2015 Goldman Sachs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gs.collections.impl
import java.util.Comparator
import com.gs.collections.api.block.function.primitive.{DoubleFunction, FloatFunction, IntFunction, LongFunction}
import com.gs.collections.api.block.function.{Function, Function0, Function2, Function3}
import com.gs.collections.api.block.predicate.{Predicate, Predicate2}
import com.gs.collections.api.block.procedure.primitive.ObjectIntProcedure
import com.gs.collections.api.block.procedure.{Procedure, Procedure2}
object Prelude
{
// A "singleton" object to hold the implicit conversion methods
/*
* These three methods each take a closure and return an anonymous instance
* of the corresponding GS Collections interface
*/
implicit def closure2Procedure[T](closure: (T) => Unit): Procedure[T] =
new Procedure[T]
{
def value(each: T) = closure(each)
}
implicit def closure2Procedure2[T1, T2](closure: (T1, T2) => Unit): Procedure2[T1, T2] =
new Procedure2[T1, T2]
{
def value(t1: T1, t2: T2) = closure(t1, t2)
}
implicit def closure2Function[T, V](closure: (T) => V): Function[T, V] =
new Function[T, V]
{
def valueOf(t: T) = closure(t)
}
implicit def closure2Function2[T1, T2, V](closure: (T1, T2) => V): Function2[T1, T2, V] =
new Function2[T1, T2, V]
{
def value(t1: T1, t2: T2) = closure(t1, t2)
}
implicit def closure2Function3[T1, T2, T3, V](closure: (T1, T2, T3) => V): Function3[T1, T2, T3, V] =
new Function3[T1, T2, T3, V]
{
def value(t1: T1, t2: T2, t3: T3) = closure(t1, t2, t3)
}
implicit def closure2Predicate[T](closure: (T) => Boolean): Predicate[T] =
new Predicate[T]
{
def accept(each: T) = closure(each)
}
implicit def closure2Predicate2[T1, T2](closure: (T1, T2) => Boolean): Predicate2[T1, T2] =
new Predicate2[T1, T2]
{
def accept(t1: T1, t2: T2) = closure(t1, t2)
}
implicit def closure2ObjectIntProcedure[T](closure: (T, Int) => Unit): ObjectIntProcedure[T] =
new ObjectIntProcedure[T]
{
def value(each: T, index: Int) = closure(each, index)
}
implicit def closure2Runnable(closure: () => Unit): Runnable =
new Runnable
{
def run() = closure()
}
implicit def closure2CodeBlock[T](closure: () => T): Function0[T] =
new Function0[T]
{
def value = closure()
}
implicit def closure2Comparator[T](closure: (T, T) => Int): Comparator[T] =
new Comparator[T]
{
def compare(o1: T, o2: T) = closure(o1, o2)
}
implicit def closure2IntFunction[T](closure: (T) => Int): IntFunction[T] =
new IntFunction[T]
{
def intValueOf(each: T) = closure(each)
}
implicit def closure2LongFunction[T](closure: (T) => Long): LongFunction[T] =
new LongFunction[T]
{
def longValueOf(each: T) = closure(each)
}
implicit def closure2DoubleFunction[T](closure: (T) => Double): DoubleFunction[T] =
new DoubleFunction[T]
{
def doubleValueOf(each: T) = closure(each)
}
implicit def closure2FloatFunction[T](closure: (T) => Float): FloatFunction[T] =
new FloatFunction[T]
{
def floatValueOf(each: T) = closure(each)
}
}
|
Pelumi/gs-collections
|
scala-unit-tests/src/test/scala/com/gs/collections/impl/Prelude.scala
|
Scala
|
apache-2.0
| 4,107 |
package de.ljfa.advbackport.asm
import de.ljfa.advbackport.Config
import de.ljfa.advbackport.logic.ItemLogic
import net.minecraft.block.Block
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.ItemStack
import de.ljfa.advbackport.logic.PlayerLogic
class AdventureHooks
object AdventureHooks {
def isToolAdventureModeExempt(player: EntityPlayer, x: Int, y: Int, z: Int): Boolean = {
if(player.capabilities.allowEdit)
true
else
PlayerLogic.canDestroy(player, player.worldObj.getBlock(x, y, z))
}
}
|
ljfa-ag/Adventure-Backport
|
src/main/scala/de/ljfa/advbackport/asm/AdventureHooks.scala
|
Scala
|
mit
| 570 |
package com.nthportal.euler
package h0.t1
import com.nthportal.euler.maths.NumericFormat
import scala.util.{Success, Try}
object Problem11 extends ProjectEulerProblem {
private val numAdjacent = 4
private val numStr =
"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08\\n" +
"49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00\\n" +
"81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65\\n" +
"52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91\\n" +
"22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80\\n" +
"24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50\\n" +
"32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70\\n" +
"67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21\\n" +
"24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72\\n" +
"21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95\\n" +
"78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92\\n" +
"16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57\\n" +
"86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58\\n" +
"19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40\\n" +
"04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66\\n" +
"88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69\\n" +
"04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36\\n" +
"20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16\\n" +
"20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54\\n" +
"01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"
private val numbers =
numStr.lines.toStream
.map(_.split(' ').toList)
.map(_.map(NumericFormat.twoDigitStringAsNum))
.toList
override def apply(): Long = {
for {
i <- numbers.indices
j <- numbers(i).indices
} yield productsAt(i, j)
}.flatten.max
private def productsAt(i: Int, j: Int): List[Long] = List(
productFrom(i, j, (t: (Int, Int)) => (t._1, t._2 + 1)), // Horizontal
productFrom(i, j, (t: (Int, Int)) => (t._1 + 1, t._2)), // Vertical
productFrom(i, j, (t: (Int, Int)) => (t._1 - 1, t._2 + 1)), // Diagonal up
productFrom(i, j, (t: (Int, Int)) => (t._1 + 1, t._2 + 1)) // Diagonal down
).flatten
private def productFrom(i: Int, j: Int, f: (((Int, Int)) => (Int, Int))): Option[Long] = {
Stream.iterate((i, j))(f)
.take(numAdjacent)
.map { case (a, b) => Try(numbers(a)(b).toLong) }
.fold[Try[Long]](Success(1))((t1, t2) => for (l1 <- t1; l2 <- t2) yield l1 * l2)
.toOption
}
}
|
NthPortal/euler-n-scala
|
src/main/scala/com/nthportal/euler/h0/t1/Problem11.scala
|
Scala
|
mit
| 2,666 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package lib.graphs
import api._
import lib.linalg._
import test.util._
import resource._
import java.io.File
import java.io.PrintWriter
trait TransitiveClosureSpec extends lib.BaseLibSpec {
val path = "/graphs/trans-closure"
val temp = tempPath(path)
override def tempPaths: Seq[String] = Seq(path)
override def resources = Seq(
() => generateInput(s"$temp/edges.tsv"): Unit
)
"transitiveClosure" should "compute correct results" in {
val act = run(s"$temp/edges.tsv", CSV())
val exp = expectedClosure()
act should contain theSameElementsAs exp
}
def run(input: String, csv: CSV): Set[Edge[Long]]
lazy val paths = {
val S = 3415434314L
val P = 5
val ws = shuffle(P)(util.RanHash(S, 0)).map(_.toLong)
val xs = shuffle(P)(util.RanHash(S, 1)).map(_.toLong + P)
val ys = shuffle(P)(util.RanHash(S, 2)).map(_.toLong + P * 2)
val zs = shuffle(P)(util.RanHash(S, 3)).map(_.toLong + P * 3)
ws zip xs zip ys zip zs
}
private def generateInput(path: String): Unit = {
val edges = {
for {
(((w, x), y), z) <- paths
e <- Seq(Edge(w, x), Edge(x, y), Edge(y, z))
} yield e
}.distinct
for (pw <- managed(new PrintWriter(new File(path))))
yield for (e <- edges.sortBy(_.src)) pw.write(s"${e.src}\\t${e.dst}\\n")
}.acquireAndGet(_ => ())
private def expectedClosure(): Set[Edge[Long]] = {
for {
(((w, x), y), z) <- paths
e <- Seq(Edge(w, x), Edge(x, y), Edge(y, z), Edge(w, y), Edge(x, z), Edge(w, z))
} yield e
}.toSet
}
|
emmalanguage/emma
|
emma-lib/src/test/scala/org/emmalanguage/lib/graphs/TransitiveClosureSpec.scala
|
Scala
|
apache-2.0
| 2,203 |
package org.http4s
package server
package middleware
import cats.ApplicativeError
import cats.data.Kleisli
import fs2._
import scala.util.control.NoStackTrace
object EntityLimiter {
final case class EntityTooLarge(limit: Long) extends Exception with NoStackTrace
val DefaultMaxEntitySize: Long = 2L * 1024L * 1024L // 2 MB default
def apply[F[_], G[_], B](http: Kleisli[F, Request[G], B], limit: Long = DefaultMaxEntitySize)(
implicit G: ApplicativeError[G, Throwable]): Kleisli[F, Request[G], B] =
Kleisli { req =>
http(req.withBodyStream(req.body.through(takeLimited(limit))))
}
private def takeLimited[F[_]](n: Long)(
implicit F: ApplicativeError[F, Throwable]): Pipe[F, Byte, Byte] =
_.pull
.take(n)
.flatMap {
case Some(_) => Pull.raiseError[F](EntityTooLarge(n))
case None => Pull.done
}
.stream
}
|
aeons/http4s
|
server/src/main/scala/org/http4s/server/middleware/EntityLimiter.scala
|
Scala
|
apache-2.0
| 889 |
package almond.channels
import java.nio.charset.StandardCharsets
import java.{util => ju}
import scala.util.Try
final case class Message(
idents: Seq[Seq[Byte]],
header: Array[Byte],
parentHeader: Array[Byte],
metadata: Array[Byte],
content: Array[Byte]
) {
override def toString: String = {
val b = new StringBuilder("Message(")
b.append(idents.toString)
def byteArray(bytes: Array[Byte]): Unit = {
b.append(", ")
val s = Try(new String(bytes, StandardCharsets.UTF_8)).getOrElse(b.toString)
b.append(s)
}
byteArray(header)
byteArray(parentHeader)
byteArray(metadata)
byteArray(content)
b.append(')')
b.toString
}
override def equals(obj: Any): Boolean =
obj match {
case other: Message =>
idents == other.idents &&
ju.Arrays.equals(header, other.header) &&
ju.Arrays.equals(parentHeader, other.parentHeader) &&
ju.Arrays.equals(metadata, other.metadata) &&
ju.Arrays.equals(content, other.content)
case _ => false
}
override def hashCode: Int = {
var code = 17 + "Message".##
code = 37 * code + idents.##
code = 37 * code + ju.Arrays.hashCode(header)
code = 37 * code + ju.Arrays.hashCode(parentHeader)
code = 37 * code + ju.Arrays.hashCode(metadata)
code = 37 * code + ju.Arrays.hashCode(content)
37 * code
}
}
|
alexarchambault/jupyter-scala
|
modules/shared/channels/src/main/scala/almond/channels/Message.scala
|
Scala
|
apache-2.0
| 1,395 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views.registration
import iht.testhelpers.viewSpecshelper.registration.RegistrationChecklistMessages
import iht.views.ViewTestHelper
import iht.views.html.registration.registration_checklist
import org.jsoup.Jsoup
class RegistrationChecklistViewTest extends ViewTestHelper with RegistrationChecklistMessages {
lazy val registrationChecklistView: registration_checklist = app.injector.instanceOf[registration_checklist]
"RegistrationChecklistView" should {
lazy val view = registrationChecklistView()(createFakeRequest(), messages)
lazy val doc = Jsoup.parse(view.body)
"have the correct title" in {
doc.title() mustBe pageIhtRegistrationChecklistTitle
}
"have h1 tage with page title" in {
doc.select("h1").text() mustBe pageIhtRegistrationChecklistTitle
}
"have introduction paragraphs" in {
doc.select("p").get(2).text() mustBe pageIhtRegistrationChecklistLabel1
doc.select("p").get(3).text() mustBe pageIhtRegistrationChecklistLabel2
}
"have bullet points for user details required" in {
doc.select("li").get(0).text() mustBe ihtRegistrationChecklistYourNino
doc.select("li").get(1).text() mustBe ihtRegistrationChecklist2FA
doc.select("li").get(2).text() mustBe ihtRegistrationChecklistPassport
doc.select("li").get(3).text() mustBe ihtRegistrationChecklistPayslip
doc.select("li").get(4).text() mustBe ihtRegistrationChecklistTaxCredit
}
"have a h2 tag" in {
doc.select("div#applicant-details-list h2").text() mustBe ihtRegistrationDetailsNeededTitle
}
"have a details needed paragraphs" in {
doc.select("p").get(4).text() mustBe ihtRegistrationDetailsNeededLabel1
doc.select("p").get(5).text() mustBe ihtRegistrationDetailsNeededLabel2
}
"have bullet points for deceased details required" in {
doc.select("li").get(5).text() mustBe ihtRegistrationDetailsNeededOname
doc.select("li").get(6).text() mustBe ihtRegistrationChecklistDateOfBirth
doc.select("li").get(7).text() mustBe pageIhtRegistrationChecklistDeceasedLabel3
doc.select("li").get(8).text() mustBe ihtNationalInsuranceNo
doc.select("li").get(9).text() mustBe pageIhtRegistrationChecklistDeceasedLabel5
doc.select("li").get(10).text() mustBe pageIhtRegistrationChecklistDeceasedLabel7
}
"have a progressive disclosure relating to deceased details" should {
"have a reveal text in" in {
doc.getElementById("application-details-reveal").select("summary").text() mustBe pageIhtRegistrationChecklistRevealTextDied
}
"have information relating to deceased details required" in {
doc.getElementById("application-details-reveal").select("p").get(0).text() mustBe ihtRegistrationDetailsNeededLabel3
doc.getElementById("application-details-reveal").select("p").get(1).text() mustBe ihtRegistrationDetailsNeededLabel4
doc.getElementById("application-details-reveal").select("p").get(2).text() mustBe ihtRegistrationDetailsNeededLabel5
}
}
"executor details section" should {
"have a introduction text" in {
doc.getElementById("co-execs-details-list").select("p").get(0).text() mustBe ihtRegistrationExecutorLabel1
}
"have a list of bullet points" in {
doc.getElementById("co-execs-details-list").select("li").get(0).text() mustBe ihtRegistrationDetailsNeededOname
doc.getElementById("co-execs-details-list").select("li").get(1).text() mustBe ihtNationalInsuranceNo
doc.getElementById("co-execs-details-list").select("li").get(2).text() mustBe ihtRegistrationChecklistDateOfBirth
doc.getElementById("co-execs-details-list").select("li").get(3).text() mustBe ihtRegistrationExecutorAddress
doc.getElementById("co-execs-details-list").select("li").get(4).text() mustBe ihtRegistrationChecklistPhoneNoLowerCaseInitial
}
"have a progressive disclosure relating to executor details" should {
"have a reveal text in" in {
doc.getElementById("co-execs-details-reveal").select("summary").text() mustBe pageIhtRegistrationChecklistRevealTextExecutors
}
"have information relating to executor details required" in {
doc.getElementById("co-execs-details-reveal").select("p").get(0).text() mustBe ihtRegistrationExecutorLabel2
doc.getElementById("co-execs-details-reveal").select("p").get(1).text() mustBe ihtRegistrationExecutorLabel3
}
}
}
"have a continue button" in {
doc.getElementById("start-registration").text() mustBe pageIhtRegistrationChecklistContinueButton
doc.getElementById("start-registration").attr("href") mustBe iht.controllers.registration.deceased.routes.DeceasedDateOfDeathController.onPageLoad().url
}
"have a leave this page text link" in {
doc.getElementById("leave-page").text() mustBe pageIhtRegistrationChecklistLeaveLink
doc.getElementById("leave-page").attr("href") mustBe iht.controllers.filter.routes.FilterController.onPageLoad().url
}
"have a save link text" in {
doc.select("p").get(12).text() mustBe pageIhtRegistrationChecklistSaveLink
}
}
}
|
hmrc/iht-frontend
|
test/iht/views/registration/RegistrationChecklistViewTest.scala
|
Scala
|
apache-2.0
| 5,811 |
package controllers
import play.api.i18n.Lang
import play.api.mvc.RequestHeader
import play.api.templates.{Html, Txt}
import securesocial.controllers.MailTemplates
import securesocial.core.BasicProfile
/**
* @author Joseph Dessens
* @since 2014-09-03
*/
object AngularMailTemplates extends MailTemplates {
override def getSignUpEmail(token: String)(implicit request: RequestHeader, lang: Lang): (Option[Txt], Option[Html]) = {
(None, Option(Html("Go to http://" + request.host + "/#/signup/" + token)))
}
override def getSendPasswordResetEmail(user: BasicProfile, token: String)(implicit request: RequestHeader, lang: Lang): (Option[Txt], Option[Html]) = {
(None, Option(Html("Go to http://" + request.host + "/#/password/reset/" + token)))
}
override def getWelcomeEmail(user: BasicProfile)(implicit request: RequestHeader, lang: Lang): (Option[Txt], Option[Html]) = {
(None, Option(Html("Go to http://" + request.host)))
}
override def getAlreadyRegisteredEmail(user: BasicProfile)(implicit request: RequestHeader, lang: Lang): (Option[Txt], Option[Html]) = {
(None, None)
}
override def getUnknownEmailNotice()(implicit request: RequestHeader, lang: Lang): (Option[Txt], Option[Html]) = {
(None, None)
}
override def getPasswordChangedNoticeEmail(user: BasicProfile)(implicit request: RequestHeader, lang: Lang): (Option[Txt], Option[Html]) = {
(None, None)
}
}
|
vega113/emotracker
|
app/controllers/AngularMailTemplates.scala
|
Scala
|
apache-2.0
| 1,423 |
package name.abhijitsarkar.akka
import java.nio.file.{Files, Paths}
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.{ImplicitSender, TestKit}
import org.scalatest._
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* @author Abhijit Sarkar
*/
class TransformerSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FlatSpecLike with Matchers with BeforeAndAfterAll {
def this() = this(ActorSystem("TransformerSpec"))
implicit val materializer = ActorMaterializer()
/* Use the system's dispatcher as ExecutionContext */
import system.dispatcher
override def afterAll {
TestKit.shutdownActorSystem(system)
}
/* https://github.com/matyjas/testing-akka/blob/master/src/test/scala/events/EventActorSpec.scala */
"Transformer" should "extract temperature" in {
val path = NoaaCurrentConditionsClient.currentConditionsPath(false)
val files = Files.newDirectoryStream(Paths.get(path), "*.xml")
val transformer = system.actorOf(Transformer.props, "transformer")
system.scheduler.scheduleOnce(50.milliseconds, transformer, Message(files, "temp_f"))
within(10.second) {
val flow = expectMsgClass(classOf[Source[(String, Seq[String]), Unit]])
val future = flow.runWith(Sink.foreach(e => println(s"${e._1} -> ${e._2}")))
Await.result(future, 10.seconds)
}
files.close
}
}
|
asarkar/akka
|
akka-streams-learning/weather-streaming/src/test/scala/name/abhijitsarkar/akka/TransformerSpec.scala
|
Scala
|
gpl-3.0
| 1,501 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.math.{BigDecimal => JavaBigDecimal}
import java.time.ZoneId
import java.util.Locale
import java.util.concurrent.TimeUnit._
import org.apache.spark.SparkException
import org.apache.spark.sql.catalyst.{InternalRow, WalkedTypePath}
import org.apache.spark.sql.catalyst.analysis.{TypeCheckResult, TypeCoercion}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.DateTimeUtils._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.UTF8StringBuilder
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
import org.apache.spark.unsafe.types.UTF8String.{IntWrapper, LongWrapper}
object Cast {
/**
* Returns true iff we can cast `from` type to `to` type.
*/
def canCast(from: DataType, to: DataType): Boolean = (from, to) match {
case (fromType, toType) if fromType == toType => true
case (NullType, _) => true
case (_, StringType) => true
case (StringType, BinaryType) => true
case (_: IntegralType, BinaryType) => true
case (StringType, BooleanType) => true
case (DateType, BooleanType) => true
case (TimestampType, BooleanType) => true
case (_: NumericType, BooleanType) => true
case (StringType, TimestampType) => true
case (BooleanType, TimestampType) => true
case (DateType, TimestampType) => true
case (_: NumericType, TimestampType) => true
case (StringType, DateType) => true
case (TimestampType, DateType) => true
case (StringType, CalendarIntervalType) => true
case (StringType, _: NumericType) => true
case (BooleanType, _: NumericType) => true
case (DateType, _: NumericType) => true
case (TimestampType, _: NumericType) => true
case (_: NumericType, _: NumericType) => true
case (ArrayType(fromType, fn), ArrayType(toType, tn)) =>
canCast(fromType, toType) &&
resolvableNullability(fn || forceNullable(fromType, toType), tn)
case (MapType(fromKey, fromValue, fn), MapType(toKey, toValue, tn)) =>
canCast(fromKey, toKey) &&
(!forceNullable(fromKey, toKey)) &&
canCast(fromValue, toValue) &&
resolvableNullability(fn || forceNullable(fromValue, toValue), tn)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).forall {
case (fromField, toField) =>
canCast(fromField.dataType, toField.dataType) &&
resolvableNullability(
fromField.nullable || forceNullable(fromField.dataType, toField.dataType),
toField.nullable)
}
case (udt1: UserDefinedType[_], udt2: UserDefinedType[_]) if udt1.userClass == udt2.userClass =>
true
case _ => false
}
/**
* Return true if we need to use the `timeZone` information casting `from` type to `to` type.
* The patterns matched reflect the current implementation in the Cast node.
* c.f. usage of `timeZone` in:
* * Cast.castToString
* * Cast.castToDate
* * Cast.castToTimestamp
*/
def needsTimeZone(from: DataType, to: DataType): Boolean = (from, to) match {
case (StringType, TimestampType | DateType) => true
case (DateType, TimestampType) => true
case (TimestampType, StringType) => true
case (TimestampType, DateType) => true
case (ArrayType(fromType, _), ArrayType(toType, _)) => needsTimeZone(fromType, toType)
case (MapType(fromKey, fromValue, _), MapType(toKey, toValue, _)) =>
needsTimeZone(fromKey, toKey) || needsTimeZone(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).exists {
case (fromField, toField) =>
needsTimeZone(fromField.dataType, toField.dataType)
}
case _ => false
}
/**
* Returns true iff we can safely up-cast the `from` type to `to` type without any truncating or
* precision lose or possible runtime failures. For example, long -> int, string -> int are not
* up-cast.
*/
def canUpCast(from: DataType, to: DataType): Boolean = (from, to) match {
case _ if from == to => true
case (from: NumericType, to: DecimalType) if to.isWiderThan(from) => true
case (from: DecimalType, to: NumericType) if from.isTighterThan(to) => true
case (f, t) if legalNumericPrecedence(f, t) => true
case (DateType, TimestampType) => true
case (_: AtomicType, StringType) => true
case (_: CalendarIntervalType, StringType) => true
case (NullType, _) => true
// Spark supports casting between long and timestamp, please see `longToTimestamp` and
// `timestampToLong` for details.
case (TimestampType, LongType) => true
case (LongType, TimestampType) => true
case (ArrayType(fromType, fn), ArrayType(toType, tn)) =>
resolvableNullability(fn, tn) && canUpCast(fromType, toType)
case (MapType(fromKey, fromValue, fn), MapType(toKey, toValue, tn)) =>
resolvableNullability(fn, tn) && canUpCast(fromKey, toKey) && canUpCast(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).forall {
case (f1, f2) =>
resolvableNullability(f1.nullable, f2.nullable) && canUpCast(f1.dataType, f2.dataType)
}
case _ => false
}
/**
* Returns true iff we can cast the `from` type to `to` type as per the ANSI SQL.
* In practice, the behavior is mostly the same as PostgreSQL. It disallows certain unreasonable
* type conversions such as converting `string` to `int` or `double` to `boolean`.
*/
def canANSIStoreAssign(from: DataType, to: DataType): Boolean = (from, to) match {
case _ if from == to => true
case (_: NumericType, _: NumericType) => true
case (_: AtomicType, StringType) => true
case (_: CalendarIntervalType, StringType) => true
case (DateType, TimestampType) => true
case (TimestampType, DateType) => true
case (ArrayType(fromType, fn), ArrayType(toType, tn)) =>
resolvableNullability(fn, tn) && canANSIStoreAssign(fromType, toType)
case (MapType(fromKey, fromValue, fn), MapType(toKey, toValue, tn)) =>
resolvableNullability(fn, tn) && canANSIStoreAssign(fromKey, toKey) &&
canANSIStoreAssign(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).forall {
case (f1, f2) =>
resolvableNullability(f1.nullable, f2.nullable) &&
canANSIStoreAssign(f1.dataType, f2.dataType)
}
case _ => false
}
private def legalNumericPrecedence(from: DataType, to: DataType): Boolean = {
val fromPrecedence = TypeCoercion.numericPrecedence.indexOf(from)
val toPrecedence = TypeCoercion.numericPrecedence.indexOf(to)
fromPrecedence >= 0 && fromPrecedence < toPrecedence
}
def canNullSafeCastToDecimal(from: DataType, to: DecimalType): Boolean = from match {
case from: BooleanType if to.isWiderThan(DecimalType.BooleanDecimal) => true
case from: NumericType if to.isWiderThan(from) => true
case from: DecimalType =>
// truncating or precision lose
(to.precision - to.scale) > (from.precision - from.scale)
case _ => false // overflow
}
def forceNullable(from: DataType, to: DataType): Boolean = (from, to) match {
case (NullType, _) => true
case (_, _) if from == to => false
case (StringType, BinaryType) => false
case (StringType, _) => true
case (_, StringType) => false
case (FloatType | DoubleType, TimestampType) => true
case (TimestampType, DateType) => false
case (_, DateType) => true
case (DateType, TimestampType) => false
case (DateType, _) => true
case (_, CalendarIntervalType) => true
case (_, to: DecimalType) if !canNullSafeCastToDecimal(from, to) => true
case (_: FractionalType, _: IntegralType) => true // NaN, infinity
case _ => false
}
def resolvableNullability(from: Boolean, to: Boolean): Boolean = !from || to
/**
* We process literals such as 'Infinity', 'Inf', '-Infinity' and 'NaN' etc in case
* insensitive manner to be compatible with other database systems such as PostgreSQL and DB2.
*/
def processFloatingPointSpecialLiterals(v: String, isFloat: Boolean): Any = {
v.trim.toLowerCase(Locale.ROOT) match {
case "inf" | "+inf" | "infinity" | "+infinity" =>
if (isFloat) Float.PositiveInfinity else Double.PositiveInfinity
case "-inf" | "-infinity" =>
if (isFloat) Float.NegativeInfinity else Double.NegativeInfinity
case "nan" =>
if (isFloat) Float.NaN else Double.NaN
case _ => null
}
}
}
/**
* Cast the child expression to the target data type.
*
* When cast from/to timezone related types, we need timeZoneId, which will be resolved with
* session local timezone by an analyzer [[ResolveTimeZone]].
*/
@ExpressionDescription(
usage = "_FUNC_(expr AS type) - Casts the value `expr` to the target data type `type`.",
examples = """
Examples:
> SELECT _FUNC_('10' as int);
10
""")
case class Cast(child: Expression, dataType: DataType, timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with NullIntolerant {
def this(child: Expression, dataType: DataType) = this(child, dataType, None)
override def toString: String = s"cast($child as ${dataType.simpleString})"
override def checkInputDataTypes(): TypeCheckResult = {
if (Cast.canCast(child.dataType, dataType)) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(
s"cannot cast ${child.dataType.catalogString} to ${dataType.catalogString}")
}
}
override def nullable: Boolean = Cast.forceNullable(child.dataType, dataType) || child.nullable
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
// When this cast involves TimeZone, it's only resolved if the timeZoneId is set;
// Otherwise behave like Expression.resolved.
override lazy val resolved: Boolean =
childrenResolved && checkInputDataTypes().isSuccess && (!needsTimeZone || timeZoneId.isDefined)
private[this] def needsTimeZone: Boolean = Cast.needsTimeZone(child.dataType, dataType)
// [[func]] assumes the input is no longer null because eval already does the null check.
@inline private[this] def buildCast[T](a: Any, func: T => Any): Any = func(a.asInstanceOf[T])
private lazy val dateFormatter = DateFormatter(zoneId)
private lazy val timestampFormatter = TimestampFormatter.getFractionFormatter(zoneId)
private val failOnIntegralTypeOverflow = SQLConf.get.ansiEnabled
// UDFToString
private[this] def castToString(from: DataType): Any => Any = from match {
case BinaryType => buildCast[Array[Byte]](_, UTF8String.fromBytes)
case DateType => buildCast[Int](_, d => UTF8String.fromString(dateFormatter.format(d)))
case TimestampType => buildCast[Long](_,
t => UTF8String.fromString(DateTimeUtils.timestampToString(timestampFormatter, t)))
case ArrayType(et, _) =>
buildCast[ArrayData](_, array => {
val builder = new UTF8StringBuilder
builder.append("[")
if (array.numElements > 0) {
val toUTF8String = castToString(et)
if (!array.isNullAt(0)) {
builder.append(toUTF8String(array.get(0, et)).asInstanceOf[UTF8String])
}
var i = 1
while (i < array.numElements) {
builder.append(",")
if (!array.isNullAt(i)) {
builder.append(" ")
builder.append(toUTF8String(array.get(i, et)).asInstanceOf[UTF8String])
}
i += 1
}
}
builder.append("]")
builder.build()
})
case MapType(kt, vt, _) =>
buildCast[MapData](_, map => {
val builder = new UTF8StringBuilder
builder.append("[")
if (map.numElements > 0) {
val keyArray = map.keyArray()
val valueArray = map.valueArray()
val keyToUTF8String = castToString(kt)
val valueToUTF8String = castToString(vt)
builder.append(keyToUTF8String(keyArray.get(0, kt)).asInstanceOf[UTF8String])
builder.append(" ->")
if (!valueArray.isNullAt(0)) {
builder.append(" ")
builder.append(valueToUTF8String(valueArray.get(0, vt)).asInstanceOf[UTF8String])
}
var i = 1
while (i < map.numElements) {
builder.append(", ")
builder.append(keyToUTF8String(keyArray.get(i, kt)).asInstanceOf[UTF8String])
builder.append(" ->")
if (!valueArray.isNullAt(i)) {
builder.append(" ")
builder.append(valueToUTF8String(valueArray.get(i, vt))
.asInstanceOf[UTF8String])
}
i += 1
}
}
builder.append("]")
builder.build()
})
case StructType(fields) =>
buildCast[InternalRow](_, row => {
val builder = new UTF8StringBuilder
builder.append("[")
if (row.numFields > 0) {
val st = fields.map(_.dataType)
val toUTF8StringFuncs = st.map(castToString)
if (!row.isNullAt(0)) {
builder.append(toUTF8StringFuncs(0)(row.get(0, st(0))).asInstanceOf[UTF8String])
}
var i = 1
while (i < row.numFields) {
builder.append(",")
if (!row.isNullAt(i)) {
builder.append(" ")
builder.append(toUTF8StringFuncs(i)(row.get(i, st(i))).asInstanceOf[UTF8String])
}
i += 1
}
}
builder.append("]")
builder.build()
})
case pudt: PythonUserDefinedType => castToString(pudt.sqlType)
case udt: UserDefinedType[_] =>
buildCast[Any](_, o => UTF8String.fromString(udt.deserialize(o).toString))
case _ => buildCast[Any](_, o => UTF8String.fromString(o.toString))
}
// BinaryConverter
private[this] def castToBinary(from: DataType): Any => Any = from match {
case StringType => buildCast[UTF8String](_, _.getBytes)
case ByteType => buildCast[Byte](_, NumberConverter.toBinary)
case ShortType => buildCast[Short](_, NumberConverter.toBinary)
case IntegerType => buildCast[Int](_, NumberConverter.toBinary)
case LongType => buildCast[Long](_, NumberConverter.toBinary)
}
// UDFToBoolean
private[this] def castToBoolean(from: DataType): Any => Any = from match {
case StringType =>
buildCast[UTF8String](_, s => {
if (StringUtils.isTrueString(s)) {
true
} else if (StringUtils.isFalseString(s)) {
false
} else {
null
}
})
case TimestampType =>
buildCast[Long](_, t => t != 0)
case DateType =>
// Hive would return null when cast from date to boolean
buildCast[Int](_, d => null)
case LongType =>
buildCast[Long](_, _ != 0)
case IntegerType =>
buildCast[Int](_, _ != 0)
case ShortType =>
buildCast[Short](_, _ != 0)
case ByteType =>
buildCast[Byte](_, _ != 0)
case DecimalType() =>
buildCast[Decimal](_, !_.isZero)
case DoubleType =>
buildCast[Double](_, _ != 0)
case FloatType =>
buildCast[Float](_, _ != 0)
}
// TimestampConverter
private[this] def castToTimestamp(from: DataType): Any => Any = from match {
case StringType =>
buildCast[UTF8String](_, utfs => DateTimeUtils.stringToTimestamp(utfs, zoneId).orNull)
case BooleanType =>
buildCast[Boolean](_, b => if (b) 1L else 0)
case LongType =>
buildCast[Long](_, l => longToTimestamp(l))
case IntegerType =>
buildCast[Int](_, i => longToTimestamp(i.toLong))
case ShortType =>
buildCast[Short](_, s => longToTimestamp(s.toLong))
case ByteType =>
buildCast[Byte](_, b => longToTimestamp(b.toLong))
case DateType =>
buildCast[Int](_, d => epochDaysToMicros(d, zoneId))
// TimestampWritable.decimalToTimestamp
case DecimalType() =>
buildCast[Decimal](_, d => decimalToTimestamp(d))
// TimestampWritable.doubleToTimestamp
case DoubleType =>
buildCast[Double](_, d => doubleToTimestamp(d))
// TimestampWritable.floatToTimestamp
case FloatType =>
buildCast[Float](_, f => doubleToTimestamp(f.toDouble))
}
private[this] def decimalToTimestamp(d: Decimal): Long = {
(d.toBigDecimal * MICROS_PER_SECOND).longValue()
}
private[this] def doubleToTimestamp(d: Double): Any = {
if (d.isNaN || d.isInfinite) null else (d * MICROS_PER_SECOND).toLong
}
// converting seconds to us
private[this] def longToTimestamp(t: Long): Long = SECONDS.toMicros(t)
// converting us to seconds
private[this] def timestampToLong(ts: Long): Long = {
Math.floorDiv(ts, MICROS_PER_SECOND)
}
// converting us to seconds in double
private[this] def timestampToDouble(ts: Long): Double = {
ts / MICROS_PER_SECOND.toDouble
}
// DateConverter
private[this] def castToDate(from: DataType): Any => Any = from match {
case StringType =>
buildCast[UTF8String](_, s => DateTimeUtils.stringToDate(s, zoneId).orNull)
case TimestampType =>
// throw valid precision more than seconds, according to Hive.
// Timestamp.nanos is in 0 to 999,999,999, no more than a second.
buildCast[Long](_, t => microsToEpochDays(t, zoneId))
}
// IntervalConverter
private[this] def castToInterval(from: DataType): Any => Any = from match {
case StringType =>
buildCast[UTF8String](_, s => CalendarInterval.fromString(s.toString))
}
// LongConverter
private[this] def castToLong(from: DataType): Any => Any = from match {
case StringType =>
val result = new LongWrapper()
buildCast[UTF8String](_, s => if (s.toLong(result)) result.value else null)
case BooleanType =>
buildCast[Boolean](_, b => if (b) 1L else 0L)
case DateType =>
buildCast[Int](_, d => null)
case TimestampType =>
buildCast[Long](_, t => timestampToLong(t))
case x: NumericType if failOnIntegralTypeOverflow =>
b => x.exactNumeric.asInstanceOf[Numeric[Any]].toLong(b)
case x: NumericType =>
b => x.numeric.asInstanceOf[Numeric[Any]].toLong(b)
}
// IntConverter
private[this] def castToInt(from: DataType): Any => Any = from match {
case StringType =>
val result = new IntWrapper()
buildCast[UTF8String](_, s => if (s.toInt(result)) result.value else null)
case BooleanType =>
buildCast[Boolean](_, b => if (b) 1 else 0)
case DateType =>
buildCast[Int](_, d => null)
case TimestampType if failOnIntegralTypeOverflow =>
buildCast[Long](_, t => LongExactNumeric.toInt(timestampToLong(t)))
case TimestampType =>
buildCast[Long](_, t => timestampToLong(t).toInt)
case x: NumericType if failOnIntegralTypeOverflow =>
b => x.exactNumeric.asInstanceOf[Numeric[Any]].toInt(b)
case x: NumericType =>
b => x.numeric.asInstanceOf[Numeric[Any]].toInt(b)
}
// ShortConverter
private[this] def castToShort(from: DataType): Any => Any = from match {
case StringType =>
val result = new IntWrapper()
buildCast[UTF8String](_, s => if (s.toShort(result)) {
result.value.toShort
} else {
null
})
case BooleanType =>
buildCast[Boolean](_, b => if (b) 1.toShort else 0.toShort)
case DateType =>
buildCast[Int](_, d => null)
case TimestampType if failOnIntegralTypeOverflow =>
buildCast[Long](_, t => {
val longValue = timestampToLong(t)
if (longValue == longValue.toShort) {
longValue.toShort
} else {
throw new ArithmeticException(s"Casting $t to short causes overflow")
}
})
case TimestampType =>
buildCast[Long](_, t => timestampToLong(t).toShort)
case x: NumericType if failOnIntegralTypeOverflow =>
b =>
val intValue = try {
x.exactNumeric.asInstanceOf[Numeric[Any]].toInt(b)
} catch {
case _: ArithmeticException =>
throw new ArithmeticException(s"Casting $b to short causes overflow")
}
if (intValue == intValue.toShort) {
intValue.toShort
} else {
throw new ArithmeticException(s"Casting $b to short causes overflow")
}
case x: NumericType =>
b => x.numeric.asInstanceOf[Numeric[Any]].toInt(b).toShort
}
// ByteConverter
private[this] def castToByte(from: DataType): Any => Any = from match {
case StringType =>
val result = new IntWrapper()
buildCast[UTF8String](_, s => if (s.toByte(result)) {
result.value.toByte
} else {
null
})
case BooleanType =>
buildCast[Boolean](_, b => if (b) 1.toByte else 0.toByte)
case DateType =>
buildCast[Int](_, d => null)
case TimestampType if failOnIntegralTypeOverflow =>
buildCast[Long](_, t => {
val longValue = timestampToLong(t)
if (longValue == longValue.toByte) {
longValue.toByte
} else {
throw new ArithmeticException(s"Casting $t to byte causes overflow")
}
})
case TimestampType =>
buildCast[Long](_, t => timestampToLong(t).toByte)
case x: NumericType if failOnIntegralTypeOverflow =>
b =>
val intValue = try {
x.exactNumeric.asInstanceOf[Numeric[Any]].toInt(b)
} catch {
case _: ArithmeticException =>
throw new ArithmeticException(s"Casting $b to byte causes overflow")
}
if (intValue == intValue.toByte) {
intValue.toByte
} else {
throw new ArithmeticException(s"Casting $b to byte causes overflow")
}
case x: NumericType =>
b => x.numeric.asInstanceOf[Numeric[Any]].toInt(b).toByte
}
private val nullOnOverflow = !SQLConf.get.ansiEnabled
/**
* Change the precision / scale in a given decimal to those set in `decimalType` (if any),
* modifying `value` in-place and returning it if successful. If an overflow occurs, it
* either returns null or throws an exception according to the value set for
* `spark.sql.ansi.enabled`.
*
* NOTE: this modifies `value` in-place, so don't call it on external data.
*/
private[this] def changePrecision(value: Decimal, decimalType: DecimalType): Decimal = {
if (value.changePrecision(decimalType.precision, decimalType.scale)) {
value
} else {
if (nullOnOverflow) {
null
} else {
throw new ArithmeticException(s"${value.toDebugString} cannot be represented as " +
s"Decimal(${decimalType.precision}, ${decimalType.scale}).")
}
}
}
/**
* Create new `Decimal` with precision and scale given in `decimalType` (if any).
* If overflow occurs, if `spark.sql.ansi.enabled` is false, null is returned;
* otherwise, an `ArithmeticException` is thrown.
*/
private[this] def toPrecision(value: Decimal, decimalType: DecimalType): Decimal =
value.toPrecision(
decimalType.precision, decimalType.scale, Decimal.ROUND_HALF_UP, nullOnOverflow)
private[this] def castToDecimal(from: DataType, target: DecimalType): Any => Any = from match {
case StringType =>
buildCast[UTF8String](_, s => try {
changePrecision(Decimal(new JavaBigDecimal(s.toString)), target)
} catch {
case _: NumberFormatException => null
})
case BooleanType =>
buildCast[Boolean](_, b => toPrecision(if (b) Decimal.ONE else Decimal.ZERO, target))
case DateType =>
buildCast[Int](_, d => null) // date can't cast to decimal in Hive
case TimestampType =>
// Note that we lose precision here.
buildCast[Long](_, t => changePrecision(Decimal(timestampToDouble(t)), target))
case dt: DecimalType =>
b => toPrecision(b.asInstanceOf[Decimal], target)
case t: IntegralType =>
b => changePrecision(Decimal(t.integral.asInstanceOf[Integral[Any]].toLong(b)), target)
case x: FractionalType =>
b => try {
changePrecision(Decimal(x.fractional.asInstanceOf[Fractional[Any]].toDouble(b)), target)
} catch {
case _: NumberFormatException => null
}
}
// DoubleConverter
private[this] def castToDouble(from: DataType): Any => Any = from match {
case StringType =>
buildCast[UTF8String](_, s => {
val doubleStr = s.toString
try doubleStr.toDouble catch {
case _: NumberFormatException =>
Cast.processFloatingPointSpecialLiterals(doubleStr, false)
}
})
case BooleanType =>
buildCast[Boolean](_, b => if (b) 1d else 0d)
case DateType =>
buildCast[Int](_, d => null)
case TimestampType =>
buildCast[Long](_, t => timestampToDouble(t))
case x: NumericType =>
b => x.numeric.asInstanceOf[Numeric[Any]].toDouble(b)
}
// FloatConverter
private[this] def castToFloat(from: DataType): Any => Any = from match {
case StringType =>
buildCast[UTF8String](_, s => {
val floatStr = s.toString
try floatStr.toFloat catch {
case _: NumberFormatException =>
Cast.processFloatingPointSpecialLiterals(floatStr, true)
}
})
case BooleanType =>
buildCast[Boolean](_, b => if (b) 1f else 0f)
case DateType =>
buildCast[Int](_, d => null)
case TimestampType =>
buildCast[Long](_, t => timestampToDouble(t).toFloat)
case x: NumericType =>
b => x.numeric.asInstanceOf[Numeric[Any]].toFloat(b)
}
private[this] def castArray(fromType: DataType, toType: DataType): Any => Any = {
val elementCast = cast(fromType, toType)
// TODO: Could be faster?
buildCast[ArrayData](_, array => {
val values = new Array[Any](array.numElements())
array.foreach(fromType, (i, e) => {
if (e == null) {
values(i) = null
} else {
values(i) = elementCast(e)
}
})
new GenericArrayData(values)
})
}
private[this] def castMap(from: MapType, to: MapType): Any => Any = {
val keyCast = castArray(from.keyType, to.keyType)
val valueCast = castArray(from.valueType, to.valueType)
buildCast[MapData](_, map => {
val keys = keyCast(map.keyArray()).asInstanceOf[ArrayData]
val values = valueCast(map.valueArray()).asInstanceOf[ArrayData]
new ArrayBasedMapData(keys, values)
})
}
private[this] def castStruct(from: StructType, to: StructType): Any => Any = {
val castFuncs: Array[(Any) => Any] = from.fields.zip(to.fields).map {
case (fromField, toField) => cast(fromField.dataType, toField.dataType)
}
// TODO: Could be faster?
buildCast[InternalRow](_, row => {
val newRow = new GenericInternalRow(from.fields.length)
var i = 0
while (i < row.numFields) {
newRow.update(i,
if (row.isNullAt(i)) null else castFuncs(i)(row.get(i, from.apply(i).dataType)))
i += 1
}
newRow
})
}
private[this] def cast(from: DataType, to: DataType): Any => Any = {
// If the cast does not change the structure, then we don't really need to cast anything.
// We can return what the children return. Same thing should happen in the codegen path.
if (DataType.equalsStructurally(from, to)) {
identity
} else if (from == NullType) {
// According to `canCast`, NullType can be casted to any type.
// For primitive types, we don't reach here because the guard of `nullSafeEval`.
// But for nested types like struct, we might reach here for nested null type field.
// We won't call the returned function actually, but returns a placeholder.
_ => throw new SparkException(s"should not directly cast from NullType to $to.")
} else {
to match {
case dt if dt == from => identity[Any]
case StringType => castToString(from)
case BinaryType => castToBinary(from)
case DateType => castToDate(from)
case decimal: DecimalType => castToDecimal(from, decimal)
case TimestampType => castToTimestamp(from)
case CalendarIntervalType => castToInterval(from)
case BooleanType => castToBoolean(from)
case ByteType => castToByte(from)
case ShortType => castToShort(from)
case IntegerType => castToInt(from)
case FloatType => castToFloat(from)
case LongType => castToLong(from)
case DoubleType => castToDouble(from)
case array: ArrayType =>
castArray(from.asInstanceOf[ArrayType].elementType, array.elementType)
case map: MapType => castMap(from.asInstanceOf[MapType], map)
case struct: StructType => castStruct(from.asInstanceOf[StructType], struct)
case udt: UserDefinedType[_]
if udt.userClass == from.asInstanceOf[UserDefinedType[_]].userClass =>
identity[Any]
case _: UserDefinedType[_] =>
throw new SparkException(s"Cannot cast $from to $to.")
}
}
}
private[this] lazy val cast: Any => Any = cast(child.dataType, dataType)
protected override def nullSafeEval(input: Any): Any = cast(input)
override def genCode(ctx: CodegenContext): ExprCode = {
// If the cast does not change the structure, then we don't really need to cast anything.
// We can return what the children return. Same thing should happen in the interpreted path.
if (DataType.equalsStructurally(child.dataType, dataType)) {
child.genCode(ctx)
} else {
super.genCode(ctx)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval = child.genCode(ctx)
val nullSafeCast = nullSafeCastFunction(child.dataType, dataType, ctx)
ev.copy(code = eval.code +
castCode(ctx, eval.value, eval.isNull, ev.value, ev.isNull, dataType, nullSafeCast))
}
// The function arguments are: `input`, `result` and `resultIsNull`. We don't need `inputIsNull`
// in parameter list, because the returned code will be put in null safe evaluation region.
private[this] type CastFunction = (ExprValue, ExprValue, ExprValue) => Block
private[this] def nullSafeCastFunction(
from: DataType,
to: DataType,
ctx: CodegenContext): CastFunction = to match {
case _ if from == NullType => (c, evPrim, evNull) => code"$evNull = true;"
case _ if to == from => (c, evPrim, evNull) => code"$evPrim = $c;"
case StringType => castToStringCode(from, ctx)
case BinaryType => castToBinaryCode(from)
case DateType => castToDateCode(from, ctx)
case decimal: DecimalType => castToDecimalCode(from, decimal, ctx)
case TimestampType => castToTimestampCode(from, ctx)
case CalendarIntervalType => castToIntervalCode(from)
case BooleanType => castToBooleanCode(from)
case ByteType => castToByteCode(from, ctx)
case ShortType => castToShortCode(from, ctx)
case IntegerType => castToIntCode(from, ctx)
case FloatType => castToFloatCode(from, ctx)
case LongType => castToLongCode(from, ctx)
case DoubleType => castToDoubleCode(from, ctx)
case array: ArrayType =>
castArrayCode(from.asInstanceOf[ArrayType].elementType, array.elementType, ctx)
case map: MapType => castMapCode(from.asInstanceOf[MapType], map, ctx)
case struct: StructType => castStructCode(from.asInstanceOf[StructType], struct, ctx)
case udt: UserDefinedType[_]
if udt.userClass == from.asInstanceOf[UserDefinedType[_]].userClass =>
(c, evPrim, evNull) => code"$evPrim = $c;"
case _: UserDefinedType[_] =>
throw new SparkException(s"Cannot cast $from to $to.")
}
// Since we need to cast input expressions recursively inside ComplexTypes, such as Map's
// Key and Value, Struct's field, we need to name out all the variable names involved in a cast.
private[this] def castCode(ctx: CodegenContext, input: ExprValue, inputIsNull: ExprValue,
result: ExprValue, resultIsNull: ExprValue, resultType: DataType, cast: CastFunction): Block = {
val javaType = JavaCode.javaType(resultType)
code"""
boolean $resultIsNull = $inputIsNull;
$javaType $result = ${CodeGenerator.defaultValue(resultType)};
if (!$inputIsNull) {
${cast(input, result, resultIsNull)}
}
"""
}
private def writeArrayToStringBuilder(
et: DataType,
array: ExprValue,
buffer: ExprValue,
ctx: CodegenContext): Block = {
val elementToStringCode = castToStringCode(et, ctx)
val funcName = ctx.freshName("elementToString")
val element = JavaCode.variable("element", et)
val elementStr = JavaCode.variable("elementStr", StringType)
val elementToStringFunc = inline"${ctx.addNewFunction(funcName,
s"""
|private UTF8String $funcName(${CodeGenerator.javaType(et)} $element) {
| UTF8String $elementStr = null;
| ${elementToStringCode(element, elementStr, null /* resultIsNull won't be used */)}
| return elementStr;
|}
""".stripMargin)}"
val loopIndex = ctx.freshVariable("loopIndex", IntegerType)
code"""
|$buffer.append("[");
|if ($array.numElements() > 0) {
| if (!$array.isNullAt(0)) {
| $buffer.append($elementToStringFunc(${CodeGenerator.getValue(array, et, "0")}));
| }
| for (int $loopIndex = 1; $loopIndex < $array.numElements(); $loopIndex++) {
| $buffer.append(",");
| if (!$array.isNullAt($loopIndex)) {
| $buffer.append(" ");
| $buffer.append($elementToStringFunc(${CodeGenerator.getValue(array, et, loopIndex)}));
| }
| }
|}
|$buffer.append("]");
""".stripMargin
}
private def writeMapToStringBuilder(
kt: DataType,
vt: DataType,
map: ExprValue,
buffer: ExprValue,
ctx: CodegenContext): Block = {
def dataToStringFunc(func: String, dataType: DataType) = {
val funcName = ctx.freshName(func)
val dataToStringCode = castToStringCode(dataType, ctx)
val data = JavaCode.variable("data", dataType)
val dataStr = JavaCode.variable("dataStr", StringType)
val functionCall = ctx.addNewFunction(funcName,
s"""
|private UTF8String $funcName(${CodeGenerator.javaType(dataType)} $data) {
| UTF8String $dataStr = null;
| ${dataToStringCode(data, dataStr, null /* resultIsNull won't be used */)}
| return dataStr;
|}
""".stripMargin)
inline"$functionCall"
}
val keyToStringFunc = dataToStringFunc("keyToString", kt)
val valueToStringFunc = dataToStringFunc("valueToString", vt)
val loopIndex = ctx.freshVariable("loopIndex", IntegerType)
val mapKeyArray = JavaCode.expression(s"$map.keyArray()", classOf[ArrayData])
val mapValueArray = JavaCode.expression(s"$map.valueArray()", classOf[ArrayData])
val getMapFirstKey = CodeGenerator.getValue(mapKeyArray, kt, JavaCode.literal("0", IntegerType))
val getMapFirstValue = CodeGenerator.getValue(mapValueArray, vt,
JavaCode.literal("0", IntegerType))
val getMapKeyArray = CodeGenerator.getValue(mapKeyArray, kt, loopIndex)
val getMapValueArray = CodeGenerator.getValue(mapValueArray, vt, loopIndex)
code"""
|$buffer.append("[");
|if ($map.numElements() > 0) {
| $buffer.append($keyToStringFunc($getMapFirstKey));
| $buffer.append(" ->");
| if (!$map.valueArray().isNullAt(0)) {
| $buffer.append(" ");
| $buffer.append($valueToStringFunc($getMapFirstValue));
| }
| for (int $loopIndex = 1; $loopIndex < $map.numElements(); $loopIndex++) {
| $buffer.append(", ");
| $buffer.append($keyToStringFunc($getMapKeyArray));
| $buffer.append(" ->");
| if (!$map.valueArray().isNullAt($loopIndex)) {
| $buffer.append(" ");
| $buffer.append($valueToStringFunc($getMapValueArray));
| }
| }
|}
|$buffer.append("]");
""".stripMargin
}
private def writeStructToStringBuilder(
st: Seq[DataType],
row: ExprValue,
buffer: ExprValue,
ctx: CodegenContext): Block = {
val structToStringCode = st.zipWithIndex.map { case (ft, i) =>
val fieldToStringCode = castToStringCode(ft, ctx)
val field = ctx.freshVariable("field", ft)
val fieldStr = ctx.freshVariable("fieldStr", StringType)
val javaType = JavaCode.javaType(ft)
code"""
|${if (i != 0) code"""$buffer.append(",");""" else EmptyBlock}
|if (!$row.isNullAt($i)) {
| ${if (i != 0) code"""$buffer.append(" ");""" else EmptyBlock}
|
| // Append $i field into the string buffer
| $javaType $field = ${CodeGenerator.getValue(row, ft, s"$i")};
| UTF8String $fieldStr = null;
| ${fieldToStringCode(field, fieldStr, null /* resultIsNull won't be used */)}
| $buffer.append($fieldStr);
|}
""".stripMargin
}
val writeStructCode = ctx.splitExpressions(
expressions = structToStringCode.map(_.code),
funcName = "fieldToString",
arguments = ("InternalRow", row.code) ::
(classOf[UTF8StringBuilder].getName, buffer.code) :: Nil)
code"""
|$buffer.append("[");
|$writeStructCode
|$buffer.append("]");
""".stripMargin
}
private[this] def castToStringCode(from: DataType, ctx: CodegenContext): CastFunction = {
from match {
case BinaryType =>
(c, evPrim, evNull) => code"$evPrim = UTF8String.fromBytes($c);"
case DateType =>
val df = JavaCode.global(
ctx.addReferenceObj("dateFormatter", dateFormatter),
dateFormatter.getClass)
(c, evPrim, evNull) => code"""$evPrim = UTF8String.fromString(${df}.format($c));"""
case TimestampType =>
val tf = JavaCode.global(
ctx.addReferenceObj("timestampFormatter", timestampFormatter),
timestampFormatter.getClass)
(c, evPrim, evNull) => code"""$evPrim = UTF8String.fromString(
org.apache.spark.sql.catalyst.util.DateTimeUtils.timestampToString($tf, $c));"""
case ArrayType(et, _) =>
(c, evPrim, evNull) => {
val buffer = ctx.freshVariable("buffer", classOf[UTF8StringBuilder])
val bufferClass = JavaCode.javaType(classOf[UTF8StringBuilder])
val writeArrayElemCode = writeArrayToStringBuilder(et, c, buffer, ctx)
code"""
|$bufferClass $buffer = new $bufferClass();
|$writeArrayElemCode;
|$evPrim = $buffer.build();
""".stripMargin
}
case MapType(kt, vt, _) =>
(c, evPrim, evNull) => {
val buffer = ctx.freshVariable("buffer", classOf[UTF8StringBuilder])
val bufferClass = JavaCode.javaType(classOf[UTF8StringBuilder])
val writeMapElemCode = writeMapToStringBuilder(kt, vt, c, buffer, ctx)
code"""
|$bufferClass $buffer = new $bufferClass();
|$writeMapElemCode;
|$evPrim = $buffer.build();
""".stripMargin
}
case StructType(fields) =>
(c, evPrim, evNull) => {
val row = ctx.freshVariable("row", classOf[InternalRow])
val buffer = ctx.freshVariable("buffer", classOf[UTF8StringBuilder])
val bufferClass = JavaCode.javaType(classOf[UTF8StringBuilder])
val writeStructCode = writeStructToStringBuilder(fields.map(_.dataType), row, buffer, ctx)
code"""
|InternalRow $row = $c;
|$bufferClass $buffer = new $bufferClass();
|$writeStructCode
|$evPrim = $buffer.build();
""".stripMargin
}
case pudt: PythonUserDefinedType => castToStringCode(pudt.sqlType, ctx)
case udt: UserDefinedType[_] =>
val udtRef = JavaCode.global(ctx.addReferenceObj("udt", udt), udt.sqlType)
(c, evPrim, evNull) => {
code"$evPrim = UTF8String.fromString($udtRef.deserialize($c).toString());"
}
case _ =>
(c, evPrim, evNull) => code"$evPrim = UTF8String.fromString(String.valueOf($c));"
}
}
private[this] def castToBinaryCode(from: DataType): CastFunction = from match {
case StringType =>
(c, evPrim, evNull) =>
code"$evPrim = $c.getBytes();"
case _: IntegralType =>
(c, evPrim, evNull) =>
code"$evPrim = ${NumberConverter.getClass.getName.stripSuffix("$")}.toBinary($c);"
}
private[this] def castToDateCode(
from: DataType,
ctx: CodegenContext): CastFunction = {
def getZoneId() = {
val zoneIdClass = classOf[ZoneId]
JavaCode.global(
ctx.addReferenceObj("zoneId", zoneId, zoneIdClass.getName),
zoneIdClass)
}
from match {
case StringType =>
val intOpt = ctx.freshVariable("intOpt", classOf[Option[Integer]])
val zid = getZoneId()
(c, evPrim, evNull) =>
code"""
scala.Option<Integer> $intOpt =
org.apache.spark.sql.catalyst.util.DateTimeUtils.stringToDate($c, $zid);
if ($intOpt.isDefined()) {
$evPrim = ((Integer) $intOpt.get()).intValue();
} else {
$evNull = true;
}
"""
case TimestampType =>
val zid = getZoneId()
(c, evPrim, evNull) =>
code"""$evPrim =
org.apache.spark.sql.catalyst.util.DateTimeUtils.microsToEpochDays($c, $zid);"""
case _ =>
(c, evPrim, evNull) => code"$evNull = true;"
}
}
private[this] def changePrecision(d: ExprValue, decimalType: DecimalType,
evPrim: ExprValue, evNull: ExprValue, canNullSafeCast: Boolean): Block = {
if (canNullSafeCast) {
code"""
|$d.changePrecision(${decimalType.precision}, ${decimalType.scale});
|$evPrim = $d;
""".stripMargin
} else {
val overflowCode = if (nullOnOverflow) {
s"$evNull = true;"
} else {
s"""
|throw new ArithmeticException($d.toDebugString() + " cannot be represented as " +
| "Decimal(${decimalType.precision}, ${decimalType.scale}).");
""".stripMargin
}
code"""
|if ($d.changePrecision(${decimalType.precision}, ${decimalType.scale})) {
| $evPrim = $d;
|} else {
| $overflowCode
|}
""".stripMargin
}
}
private[this] def castToDecimalCode(
from: DataType,
target: DecimalType,
ctx: CodegenContext): CastFunction = {
val tmp = ctx.freshVariable("tmpDecimal", classOf[Decimal])
val canNullSafeCast = Cast.canNullSafeCastToDecimal(from, target)
from match {
case StringType =>
(c, evPrim, evNull) =>
code"""
try {
Decimal $tmp = Decimal.apply(new java.math.BigDecimal($c.toString()));
${changePrecision(tmp, target, evPrim, evNull, canNullSafeCast)}
} catch (java.lang.NumberFormatException e) {
$evNull = true;
}
"""
case BooleanType =>
(c, evPrim, evNull) =>
code"""
Decimal $tmp = $c ? Decimal.apply(1) : Decimal.apply(0);
${changePrecision(tmp, target, evPrim, evNull, canNullSafeCast)}
"""
case DateType =>
// date can't cast to decimal in Hive
(c, evPrim, evNull) => code"$evNull = true;"
case TimestampType =>
// Note that we lose precision here.
(c, evPrim, evNull) =>
code"""
Decimal $tmp = Decimal.apply(
scala.math.BigDecimal.valueOf(${timestampToDoubleCode(c)}));
${changePrecision(tmp, target, evPrim, evNull, canNullSafeCast)}
"""
case DecimalType() =>
(c, evPrim, evNull) =>
code"""
Decimal $tmp = $c.clone();
${changePrecision(tmp, target, evPrim, evNull, canNullSafeCast)}
"""
case x: IntegralType =>
(c, evPrim, evNull) =>
code"""
Decimal $tmp = Decimal.apply((long) $c);
${changePrecision(tmp, target, evPrim, evNull, canNullSafeCast)}
"""
case x: FractionalType =>
// All other numeric types can be represented precisely as Doubles
(c, evPrim, evNull) =>
code"""
try {
Decimal $tmp = Decimal.apply(scala.math.BigDecimal.valueOf((double) $c));
${changePrecision(tmp, target, evPrim, evNull, canNullSafeCast)}
} catch (java.lang.NumberFormatException e) {
$evNull = true;
}
"""
}
}
private[this] def castToTimestampCode(
from: DataType,
ctx: CodegenContext): CastFunction = from match {
case StringType =>
val zoneIdClass = classOf[ZoneId]
val zid = JavaCode.global(
ctx.addReferenceObj("zoneId", zoneId, zoneIdClass.getName),
zoneIdClass)
val longOpt = ctx.freshVariable("longOpt", classOf[Option[Long]])
(c, evPrim, evNull) =>
code"""
scala.Option<Long> $longOpt =
org.apache.spark.sql.catalyst.util.DateTimeUtils.stringToTimestamp($c, $zid);
if ($longOpt.isDefined()) {
$evPrim = ((Long) $longOpt.get()).longValue();
} else {
$evNull = true;
}
"""
case BooleanType =>
(c, evPrim, evNull) => code"$evPrim = $c ? 1L : 0L;"
case _: IntegralType =>
(c, evPrim, evNull) => code"$evPrim = ${longToTimeStampCode(c)};"
case DateType =>
val zoneIdClass = classOf[ZoneId]
val zid = JavaCode.global(
ctx.addReferenceObj("zoneId", zoneId, zoneIdClass.getName),
zoneIdClass)
(c, evPrim, evNull) =>
code"""$evPrim =
org.apache.spark.sql.catalyst.util.DateTimeUtils.epochDaysToMicros($c, $zid);"""
case DecimalType() =>
(c, evPrim, evNull) => code"$evPrim = ${decimalToTimestampCode(c)};"
case DoubleType =>
(c, evPrim, evNull) =>
code"""
if (Double.isNaN($c) || Double.isInfinite($c)) {
$evNull = true;
} else {
$evPrim = (long)($c * $MICROS_PER_SECOND);
}
"""
case FloatType =>
(c, evPrim, evNull) =>
code"""
if (Float.isNaN($c) || Float.isInfinite($c)) {
$evNull = true;
} else {
$evPrim = (long)($c * $MICROS_PER_SECOND);
}
"""
}
private[this] def castToIntervalCode(from: DataType): CastFunction = from match {
case StringType =>
(c, evPrim, evNull) =>
code"""$evPrim = CalendarInterval.fromString($c.toString());
if(${evPrim} == null) {
${evNull} = true;
}
""".stripMargin
}
private[this] def decimalToTimestampCode(d: ExprValue): Block = {
val block = inline"new java.math.BigDecimal($MICROS_PER_SECOND)"
code"($d.toBigDecimal().bigDecimal().multiply($block)).longValue()"
}
private[this] def longToTimeStampCode(l: ExprValue): Block = code"$l * (long)$MICROS_PER_SECOND"
private[this] def timestampToLongCode(ts: ExprValue): Block =
code"java.lang.Math.floorDiv($ts, $MICROS_PER_SECOND)"
private[this] def timestampToDoubleCode(ts: ExprValue): Block =
code"$ts / (double)$MICROS_PER_SECOND"
private[this] def castToBooleanCode(from: DataType): CastFunction = from match {
case StringType =>
val stringUtils = inline"${StringUtils.getClass.getName.stripSuffix("$")}"
(c, evPrim, evNull) =>
code"""
if ($stringUtils.isTrueString($c)) {
$evPrim = true;
} else if ($stringUtils.isFalseString($c)) {
$evPrim = false;
} else {
$evNull = true;
}
"""
case TimestampType =>
(c, evPrim, evNull) => code"$evPrim = $c != 0;"
case DateType =>
// Hive would return null when cast from date to boolean
(c, evPrim, evNull) => code"$evNull = true;"
case DecimalType() =>
(c, evPrim, evNull) => code"$evPrim = !$c.isZero();"
case n: NumericType =>
(c, evPrim, evNull) => code"$evPrim = $c != 0;"
}
private[this] def castTimestampToIntegralTypeCode(
ctx: CodegenContext,
integralType: String): CastFunction = {
if (failOnIntegralTypeOverflow) {
val longValue = ctx.freshName("longValue")
(c, evPrim, evNull) =>
code"""
long $longValue = ${timestampToLongCode(c)};
if ($longValue == ($integralType) $longValue) {
$evPrim = ($integralType) $longValue;
} else {
throw new ArithmeticException("Casting " + $c + " to $integralType causes overflow");
}
"""
} else {
(c, evPrim, evNull) => code"$evPrim = ($integralType) ${timestampToLongCode(c)};"
}
}
private[this] def castDecimalToIntegralTypeCode(
ctx: CodegenContext,
integralType: String): CastFunction = {
if (failOnIntegralTypeOverflow) {
(c, evPrim, evNull) => code"$evPrim = $c.roundTo${integralType.capitalize}();"
} else {
(c, evPrim, evNull) => code"$evPrim = $c.to${integralType.capitalize}();"
}
}
private[this] def castIntegralTypeToIntegralTypeExactCode(integralType: String): CastFunction = {
assert(failOnIntegralTypeOverflow)
(c, evPrim, evNull) =>
code"""
if ($c == ($integralType) $c) {
$evPrim = ($integralType) $c;
} else {
throw new ArithmeticException("Casting " + $c + " to $integralType causes overflow");
}
"""
}
private[this] def lowerAndUpperBound(
fractionType: String,
integralType: String): (String, String) = {
assert(fractionType == "float" || fractionType == "double")
val typeIndicator = fractionType.charAt(0)
val (min, max) = integralType.toLowerCase(Locale.ROOT) match {
case "long" => (Long.MinValue, Long.MaxValue)
case "int" => (Int.MinValue, Int.MaxValue)
case "short" => (Short.MinValue, Short.MaxValue)
case "byte" => (Byte.MinValue, Byte.MaxValue)
}
(min.toString + typeIndicator, max.toString + typeIndicator)
}
private[this] def castFractionToIntegralTypeCode(
fractionType: String,
integralType: String): CastFunction = {
assert(failOnIntegralTypeOverflow)
val (min, max) = lowerAndUpperBound(fractionType, integralType)
val mathClass = classOf[Math].getName
// When casting floating values to integral types, Spark uses the method `Numeric.toInt`
// Or `Numeric.toLong` directly. For positive floating values, it is equivalent to `Math.floor`;
// for negative floating values, it is equivalent to `Math.ceil`.
// So, we can use the condition `Math.floor(x) <= upperBound && Math.ceil(x) >= lowerBound`
// to check if the floating value x is in the range of an integral type after rounding.
(c, evPrim, evNull) =>
code"""
if ($mathClass.floor($c) <= $max && $mathClass.ceil($c) >= $min) {
$evPrim = ($integralType) $c;
} else {
throw new ArithmeticException("Casting " + $c + " to $integralType causes overflow");
}
"""
}
private[this] def castToByteCode(from: DataType, ctx: CodegenContext): CastFunction = from match {
case StringType =>
val wrapper = ctx.freshVariable("intWrapper", classOf[UTF8String.IntWrapper])
(c, evPrim, evNull) =>
code"""
UTF8String.IntWrapper $wrapper = new UTF8String.IntWrapper();
if ($c.toByte($wrapper)) {
$evPrim = (byte) $wrapper.value;
} else {
$evNull = true;
}
$wrapper = null;
"""
case BooleanType =>
(c, evPrim, evNull) => code"$evPrim = $c ? (byte) 1 : (byte) 0;"
case DateType =>
(c, evPrim, evNull) => code"$evNull = true;"
case TimestampType => castTimestampToIntegralTypeCode(ctx, "byte")
case DecimalType() => castDecimalToIntegralTypeCode(ctx, "byte")
case _: ShortType | _: IntegerType | _: LongType if failOnIntegralTypeOverflow =>
castIntegralTypeToIntegralTypeExactCode("byte")
case _: FloatType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("float", "byte")
case _: DoubleType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("double", "byte")
case x: NumericType =>
(c, evPrim, evNull) => code"$evPrim = (byte) $c;"
}
private[this] def castToShortCode(
from: DataType,
ctx: CodegenContext): CastFunction = from match {
case StringType =>
val wrapper = ctx.freshVariable("intWrapper", classOf[UTF8String.IntWrapper])
(c, evPrim, evNull) =>
code"""
UTF8String.IntWrapper $wrapper = new UTF8String.IntWrapper();
if ($c.toShort($wrapper)) {
$evPrim = (short) $wrapper.value;
} else {
$evNull = true;
}
$wrapper = null;
"""
case BooleanType =>
(c, evPrim, evNull) => code"$evPrim = $c ? (short) 1 : (short) 0;"
case DateType =>
(c, evPrim, evNull) => code"$evNull = true;"
case TimestampType => castTimestampToIntegralTypeCode(ctx, "short")
case DecimalType() => castDecimalToIntegralTypeCode(ctx, "short")
case _: IntegerType | _: LongType if failOnIntegralTypeOverflow =>
castIntegralTypeToIntegralTypeExactCode("short")
case _: FloatType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("float", "short")
case _: DoubleType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("double", "short")
case x: NumericType =>
(c, evPrim, evNull) => code"$evPrim = (short) $c;"
}
private[this] def castToIntCode(from: DataType, ctx: CodegenContext): CastFunction = from match {
case StringType =>
val wrapper = ctx.freshVariable("intWrapper", classOf[UTF8String.IntWrapper])
(c, evPrim, evNull) =>
code"""
UTF8String.IntWrapper $wrapper = new UTF8String.IntWrapper();
if ($c.toInt($wrapper)) {
$evPrim = $wrapper.value;
} else {
$evNull = true;
}
$wrapper = null;
"""
case BooleanType =>
(c, evPrim, evNull) => code"$evPrim = $c ? 1 : 0;"
case DateType =>
(c, evPrim, evNull) => code"$evNull = true;"
case TimestampType => castTimestampToIntegralTypeCode(ctx, "int")
case DecimalType() => castDecimalToIntegralTypeCode(ctx, "int")
case _: LongType if failOnIntegralTypeOverflow => castIntegralTypeToIntegralTypeExactCode("int")
case _: FloatType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("float", "int")
case _: DoubleType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("double", "int")
case x: NumericType =>
(c, evPrim, evNull) => code"$evPrim = (int) $c;"
}
private[this] def castToLongCode(from: DataType, ctx: CodegenContext): CastFunction = from match {
case StringType =>
val wrapper = ctx.freshVariable("longWrapper", classOf[UTF8String.LongWrapper])
(c, evPrim, evNull) =>
code"""
UTF8String.LongWrapper $wrapper = new UTF8String.LongWrapper();
if ($c.toLong($wrapper)) {
$evPrim = $wrapper.value;
} else {
$evNull = true;
}
$wrapper = null;
"""
case BooleanType =>
(c, evPrim, evNull) => code"$evPrim = $c ? 1L : 0L;"
case DateType =>
(c, evPrim, evNull) => code"$evNull = true;"
case TimestampType =>
(c, evPrim, evNull) => code"$evPrim = (long) ${timestampToLongCode(c)};"
case DecimalType() => castDecimalToIntegralTypeCode(ctx, "long")
case _: FloatType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("float", "long")
case _: DoubleType if failOnIntegralTypeOverflow =>
castFractionToIntegralTypeCode("double", "long")
case x: NumericType =>
(c, evPrim, evNull) => code"$evPrim = (long) $c;"
}
private[this] def castToFloatCode(from: DataType, ctx: CodegenContext): CastFunction = {
from match {
case StringType =>
val floatStr = ctx.freshVariable("floatStr", StringType)
(c, evPrim, evNull) =>
code"""
final String $floatStr = $c.toString();
try {
$evPrim = Float.valueOf($floatStr);
} catch (java.lang.NumberFormatException e) {
final Float f = (Float) Cast.processFloatingPointSpecialLiterals($floatStr, true);
if (f == null) {
$evNull = true;
} else {
$evPrim = f.floatValue();
}
}
"""
case BooleanType =>
(c, evPrim, evNull) => code"$evPrim = $c ? 1.0f : 0.0f;"
case DateType =>
(c, evPrim, evNull) => code"$evNull = true;"
case TimestampType =>
(c, evPrim, evNull) => code"$evPrim = (float) (${timestampToDoubleCode(c)});"
case DecimalType() =>
(c, evPrim, evNull) => code"$evPrim = $c.toFloat();"
case x: NumericType =>
(c, evPrim, evNull) => code"$evPrim = (float) $c;"
}
}
private[this] def castToDoubleCode(from: DataType, ctx: CodegenContext): CastFunction = {
from match {
case StringType =>
val doubleStr = ctx.freshVariable("doubleStr", StringType)
(c, evPrim, evNull) =>
code"""
final String $doubleStr = $c.toString();
try {
$evPrim = Double.valueOf($doubleStr);
} catch (java.lang.NumberFormatException e) {
final Double d = (Double) Cast.processFloatingPointSpecialLiterals($doubleStr, false);
if (d == null) {
$evNull = true;
} else {
$evPrim = d.doubleValue();
}
}
"""
case BooleanType =>
(c, evPrim, evNull) => code"$evPrim = $c ? 1.0d : 0.0d;"
case DateType =>
(c, evPrim, evNull) => code"$evNull = true;"
case TimestampType =>
(c, evPrim, evNull) => code"$evPrim = ${timestampToDoubleCode(c)};"
case DecimalType() =>
(c, evPrim, evNull) => code"$evPrim = $c.toDouble();"
case x: NumericType =>
(c, evPrim, evNull) => code"$evPrim = (double) $c;"
}
}
private[this] def castArrayCode(
fromType: DataType, toType: DataType, ctx: CodegenContext): CastFunction = {
val elementCast = nullSafeCastFunction(fromType, toType, ctx)
val arrayClass = JavaCode.javaType(classOf[GenericArrayData])
val fromElementNull = ctx.freshVariable("feNull", BooleanType)
val fromElementPrim = ctx.freshVariable("fePrim", fromType)
val toElementNull = ctx.freshVariable("teNull", BooleanType)
val toElementPrim = ctx.freshVariable("tePrim", toType)
val size = ctx.freshVariable("n", IntegerType)
val j = ctx.freshVariable("j", IntegerType)
val values = ctx.freshVariable("values", classOf[Array[Object]])
val javaType = JavaCode.javaType(fromType)
(c, evPrim, evNull) =>
code"""
final int $size = $c.numElements();
final Object[] $values = new Object[$size];
for (int $j = 0; $j < $size; $j ++) {
if ($c.isNullAt($j)) {
$values[$j] = null;
} else {
boolean $fromElementNull = false;
$javaType $fromElementPrim =
${CodeGenerator.getValue(c, fromType, j)};
${castCode(ctx, fromElementPrim,
fromElementNull, toElementPrim, toElementNull, toType, elementCast)}
if ($toElementNull) {
$values[$j] = null;
} else {
$values[$j] = $toElementPrim;
}
}
}
$evPrim = new $arrayClass($values);
"""
}
private[this] def castMapCode(from: MapType, to: MapType, ctx: CodegenContext): CastFunction = {
val keysCast = castArrayCode(from.keyType, to.keyType, ctx)
val valuesCast = castArrayCode(from.valueType, to.valueType, ctx)
val mapClass = JavaCode.javaType(classOf[ArrayBasedMapData])
val keys = ctx.freshVariable("keys", ArrayType(from.keyType))
val convertedKeys = ctx.freshVariable("convertedKeys", ArrayType(to.keyType))
val convertedKeysNull = ctx.freshVariable("convertedKeysNull", BooleanType)
val values = ctx.freshVariable("values", ArrayType(from.valueType))
val convertedValues = ctx.freshVariable("convertedValues", ArrayType(to.valueType))
val convertedValuesNull = ctx.freshVariable("convertedValuesNull", BooleanType)
(c, evPrim, evNull) =>
code"""
final ArrayData $keys = $c.keyArray();
final ArrayData $values = $c.valueArray();
${castCode(ctx, keys, FalseLiteral,
convertedKeys, convertedKeysNull, ArrayType(to.keyType), keysCast)}
${castCode(ctx, values, FalseLiteral,
convertedValues, convertedValuesNull, ArrayType(to.valueType), valuesCast)}
$evPrim = new $mapClass($convertedKeys, $convertedValues);
"""
}
private[this] def castStructCode(
from: StructType, to: StructType, ctx: CodegenContext): CastFunction = {
val fieldsCasts = from.fields.zip(to.fields).map {
case (fromField, toField) => nullSafeCastFunction(fromField.dataType, toField.dataType, ctx)
}
val tmpResult = ctx.freshVariable("tmpResult", classOf[GenericInternalRow])
val rowClass = JavaCode.javaType(classOf[GenericInternalRow])
val tmpInput = ctx.freshVariable("tmpInput", classOf[InternalRow])
val fieldsEvalCode = fieldsCasts.zipWithIndex.map { case (cast, i) =>
val fromFieldPrim = ctx.freshVariable("ffp", from.fields(i).dataType)
val fromFieldNull = ctx.freshVariable("ffn", BooleanType)
val toFieldPrim = ctx.freshVariable("tfp", to.fields(i).dataType)
val toFieldNull = ctx.freshVariable("tfn", BooleanType)
val fromType = JavaCode.javaType(from.fields(i).dataType)
val setColumn = CodeGenerator.setColumn(tmpResult, to.fields(i).dataType, i, toFieldPrim)
code"""
boolean $fromFieldNull = $tmpInput.isNullAt($i);
if ($fromFieldNull) {
$tmpResult.setNullAt($i);
} else {
$fromType $fromFieldPrim =
${CodeGenerator.getValue(tmpInput, from.fields(i).dataType, i.toString)};
${castCode(ctx, fromFieldPrim,
fromFieldNull, toFieldPrim, toFieldNull, to.fields(i).dataType, cast)}
if ($toFieldNull) {
$tmpResult.setNullAt($i);
} else {
$setColumn;
}
}
"""
}
val fieldsEvalCodes = ctx.splitExpressions(
expressions = fieldsEvalCode.map(_.code),
funcName = "castStruct",
arguments = ("InternalRow", tmpInput.code) :: (rowClass.code, tmpResult.code) :: Nil)
(input, result, resultIsNull) =>
code"""
final $rowClass $tmpResult = new $rowClass(${fieldsCasts.length});
final InternalRow $tmpInput = $input;
$fieldsEvalCodes
$result = $tmpResult;
"""
}
override def sql: String = dataType match {
// HiveQL doesn't allow casting to complex types. For logical plans translated from HiveQL, this
// type of casting can only be introduced by the analyzer, and can be omitted when converting
// back to SQL query string.
case _: ArrayType | _: MapType | _: StructType => child.sql
case _ => s"CAST(${child.sql} AS ${dataType.sql})"
}
}
/**
* Cast the child expression to the target data type, but will throw error if the cast might
* truncate, e.g. long -> int, timestamp -> data.
*/
case class UpCast(child: Expression, dataType: DataType, walkedTypePath: Seq[String] = Nil)
extends UnaryExpression with Unevaluable {
override lazy val resolved = false
}
|
bdrillard/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
|
Scala
|
apache-2.0
| 65,206 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.disk.stubs
import org.scalatest.Assertions, Assertions.fail
/** Tracks the number of scheduler steps to use for each effect introduced into a scenario. */
private [disk] class Counter [E] {
/** A scenario. */
type Effects = Seq [E]
case class Phase (effects: Effects, crashed: Boolean) {
assert (!effects.isEmpty)
override def toString: String =
s"(${effects mkString ", "}${if (crashed) ", CRASH" else ""})"
}
type Phases = Seq [Phase]
/** The count of steps used for each effect in the scenario. */
type Counts = Seq [(E, Int)]
/** A map from a two phase scenarios to the counts of steps for each effect in each phase. For
* example, if
*```
* (Seq (e1, e2), Seq (e3, e4), true) -> ((1, 2), (3, 4))
*```
* is in the map, then we
* 1. run effect e1 for 1 step in phase 1
* 2. effect e2 for 2 steps in phase 1
* 3. crash phase 1
* 4. run effect 3 for 3 steps in phase 2
* 5. run effect 4 for 4 steps in phase 2
*/
private var counts = Map.empty [Phases, Counts]
counts += Seq.empty -> Seq.empty
private def _get (ps: Phases): Counts =
counts get (ps) match {
case Some (cs) => cs
case None => fail (s"Need to test ${ps mkString ", "}")
}
private def _add (ps: Phases, cs: Counts) {
if (counts contains ps)
fail (s"Already tested (${ps mkString ", "})")
counts += ps -> cs
}
/** The name of a one phase scenario. */
private def _phases (es: Effects): Seq [Phase] =
Seq (Phase (es, false))
/** The name of a two phase scenario. */
private def _phases (es1: Effects, c1: Boolean, es2: Effects): Seq [Phase] =
Seq (Phase (es1, c1), Phase (es2, false))
/** The name of a three phase scenario. */
private def _phases (es1: Effects, c1: Boolean, es2: Effects, c2: Boolean, es3: Effects): Seq [Phase] =
Seq (Phase (es1, c1), Phase (es2, c2), Phase (es3, false))
/** Get the counts for a one phase scenario.
* @param es The effects of the scenario, which we use as a name for the scenario.
*/
def get (es: Effects): Counts =
if (es.isEmpty)
Seq.empty
else
_get (_phases (es))
/** Add the counts for a one phase scenario.
* @param es The effects of the scenario, which we use as a name for the scenario.
* @param cs The counts for each effect of the scenario.
*/
def add (es: Effects, cs: Counts): Unit =
_add (_phases (es), cs)
/** Get the counts for a two phase scenario.
* @param es1 The effects of the first phase of the scenario.
* @param c1 Wether or not phase one crashed in this scenario.
* @param es2 The effects of the second phase of the scenario.
*/
def get (es1: Effects, c1: Boolean, es2: Effects): Counts =
if (es2.isEmpty)
Seq.empty
else
_get (_phases (es1, c1, es2))
/** Add the counts for a two phase scenario.
* @param es1 The effects of the first phase of the scenario.
* @param c1 Wether or not phase one crashed in this scenario.
* @param es2 The effects of the second phase of the scenario.
* @param cs2 The counts for each effect of phase two of the scenario.
*/
def add (es1: Effects, c1: Boolean, es2: Effects, cs2: Counts): Unit =
_add (_phases (es1, c1, es2), cs2)
/** Get the counts for a three phase scenario.
* @param es1 The effects of the first phase of the scenario.
* @param c1 Wether or not phase one crashed in this scenario.
* @param es2 The effects of the second phase of the scenario.
* @param c2 Wether or not phase two crashed in this scenario.
* @param es3 The effects of the third phase of the scenario.
*/
def get (es1: Effects, c1: Boolean, es2: Effects, c2: Boolean, es3: Effects): Counts =
if (es3.isEmpty)
Seq.empty
else
_get (_phases (es1, c1, es2, c2, es3))
/** Add the counts for a three phase scenario.
* @param es1 The effects of the first phase of the scenario.
* @param c1 Wether or not phase one crashed in this scenario.
* @param es2 The effects of the second phase of the scenario.
* @param c2 Wether or not phase two crashed in this scenario.
* @param es3 The effects of the third phase of the scenario.
* @param cs3 The counts for each effect of phase two of the scenario.
*/
def add (es1: Effects, c1: Boolean, es2: Effects, c2: Boolean, es3: Effects, cs3: Counts): Unit =
_add (_phases (es1, c1, es2, c2, es3), cs3)
}
|
Treode/store
|
disk/stub/com/treode/disk/stubs/Counter.scala
|
Scala
|
apache-2.0
| 5,053 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.collection
/**
* Similar to java.util.IdentityHashMap,but using chaining bucket
* But do not support null key and null value
* (not thread safe)
* @since 4.2.3
*/
final class IdentityMap[K <: AnyRef, V](capacity: Int = 1024) {
assert(capacity % 2 == 0)
private val table = new Array[Entry[K, V]](capacity)
private val mask = capacity - 1
final def get(key: K): V = {
val bucket = System.identityHashCode(key) & mask
var entry = table(bucket)
while (null != entry) {
if (key eq entry.key) return entry.value
entry = entry.next
}
null.asInstanceOf[V]
}
def clear(): Unit = {
var i = 0
val tab = table
while (i < tab.length) {
tab(i) = null
i += 1
}
}
def contains(key: K): Boolean =
null != get(key)
def put(key: K, value: V): Boolean = {
val hash = System.identityHashCode(key) & mask
val tab = table
var entry = tab(hash)
while (null != entry) {
if (key eq entry.key) {
entry.value = value
return true
}
entry = entry.next
}
tab(hash) = new Entry(key, value, tab(hash))
false
}
def remove(key: K): V = {
val tab = table
val hash = System.identityHashCode(key) & mask
var e = tab(hash)
var prev: Entry[K, V] = null
while (null != e) {
if (key eq e.key) {
if (prev != null) prev.next = e.next
else tab(hash) = e.next
val oldValue = e.value
e.value = null.asInstanceOf[V]
return oldValue
}
prev = e
e = e.next
}
null.asInstanceOf[V]
}
def size(): Int = {
var size = 0
(0 until table.length) foreach { bucket =>
var entry = table(bucket)
while (null != entry) {
size += 1
entry = entry.next
}
}
size
}
def keysIterator: Iterator[K] =
new KeyIterator(table)
class Entry[K, V](val key: K, var value: V, var next: Entry[K, V])
class EntryIterator[K, V](table: Array[Entry[K, V]]) {
var entry: Entry[K, V] = _
var hasNext = false
var index = -1
def move(): Unit = {
if (index < table.length)
if (null != entry && null != entry.next)
entry = entry.next
else {
entry = null
index += 1
while (null == entry && index < table.length) {
entry = table(index)
index += 1
}
}
else
entry = null
hasNext = (entry != null)
}
}
class KeyIterator[K](table: Array[Entry[K, V]]) extends EntryIterator(table) with Iterator[K] {
move()
override def next(): K = {
val key = entry.key
move()
key
}
}
}
|
beangle/commons
|
core/src/main/scala/org/beangle/commons/collection/IdentityMap.scala
|
Scala
|
lgpl-3.0
| 3,430 |
package feh.tec.cvis.common.cv
import feh.tec.cvis.common.cv.Helper._
import org.opencv.core.{Core, Mat, Point}
trait Clustering {
sealed abstract class CentersPolicy(val value: Int)
object CentersPolicy{
/** Select random initial centers in each attempt */
object Random extends CentersPolicy(Core.KMEANS_RANDOM_CENTERS){ override def toString = "Random" }
/** Use <code>kmeans++</code> center initialization by Arthur and Vassilvitskii [Arthur2007]. */
object PP extends CentersPolicy(Core.KMEANS_PP_CENTERS) { override def toString = "PP" }
/**
* During the first (and possibly the only)
* attempt, use the user-supplied labels instead of computing them from the
* initial centers. For the second and further attempts, use the random or
* semi-random centers. Use one of <code>KMEANS_*_CENTERS</code> flag to specify
* the exact method.
*/
object InitialLabels extends CentersPolicy(Core.KMEANS_USE_INITIAL_LABELS)
}
// C++: double kmeans(Mat data, int K, Mat& bestLabels, TermCriteria criteria, int attempts, int flags, Mat& centers = Mat())
/**
* <p>Finds centers of clusters and groups input samples around the clusters.</p>
*
* <p>The function <code>kmeans</code> implements a k-means algorithm that finds
* the centers of <code>cluster_count</code> clusters and groups the input
* samples around the clusters. As an output, <em>labels_i</em> contains a
* 0-based cluster index for the sample stored in the <em>i^(th)</em> row of the
* <code>samples</code> matrix.</p>
*
* <p>The compactness measure that is computed as</p>
*
* <p><em>sum _i|samples _i - centers _(labels _i)| ^2</em></p>
*
* <p>after every attempt. The best (minimum) value is chosen and the corresponding
* labels and the compactness value are returned by the function.
* Basically, you can use only the core of the function, set the number of
* attempts to 1, initialize labels each time using a custom algorithm, pass
* them with the (<code>flags</code> = <code>KMEANS_USE_INITIAL_LABELS</code>)
* flag, and then choose the best (most-compact) clustering.</p>
*
* <p>Note:</p>
* <ul>
* <li> An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp
* <li> (Python) An example on K-means clustering can be found at
* opencv_source_code/samples/python2/kmeans.py
* </ul>
*
* @param data Data for clustering. An array of N-Dimensional points with float
* coordinates is needed. Examples of this array can be:
* <ul>
* <li> <code>Mat points(count, 2, CV_32F);</code>
* <li> <code>Mat points(count, 1, CV_32FC2);</code>
* <li> <code>Mat points(1, count, CV_32FC2);</code>
* <li> <code>std.vector<cv.Point2f> points(sampleCount);</code>
* </ul>
* @param k Number of clusters to split the set by.
* @param criteria The algorithm termination criteria, that is, the maximum
* number of iterations and/or the desired accuracy. The accuracy is specified
* as <code>criteria.epsilon</code>. As soon as each of the cluster centers
* moves by less than <code>criteria.epsilon</code> on some iteration, the
* algorithm stops.
* @param attempts Flag to specify the number of times the algorithm is executed
* using different initial labellings. The algorithm returns the labels that
* yield the best compactness (see the last function parameter).
* @param centersPolicy CentersPolicy
* @return KMeansResult: Cluster centers, best labels and compactness measure.
*
* @see <a href="http://docs.opencv.org/modules/core/doc/clustering.html#kmeans">org.opencv.core.Core.kmeans</a>
*/
def kmeans(data: Mat,
k: Int,
criteria: TerminationCriteria,
attempts: Int,
centersPolicy: CentersPolicy,
labels: Mat = null): KMeansResult = {
val centers = new Mat() // Output matrix of the cluster centers, one row per each cluster center.
val bestLabels = Option(labels).getOrElse(new Mat())
val compactness = Core.kmeans(data, k, bestLabels, criteria, attempts, centersPolicy.value, centers)
val stream = centers.byRow(row => _.toArray[Float]: Point )
KMeansResult(stream.toList, bestLabels, compactness)
}
case class KMeansResult(centers: List[Point], bestLabels: Mat, compactness: Double){
def isEmpty = bestLabels == null && centers.isEmpty
}
object KMeansResult{
def empty = KMeansResult(Nil, null, 0)
}
}
|
fehu/comp-vis
|
common/src/main/scala/feh/tec/cvis/common/cv/Clustering.scala
|
Scala
|
mit
| 4,529 |
import math.ceil
object Problem1 {
def arithmetic_sum(x: Int) : Int = x*(x+1)/2
def special_sum(n: Int): Int = {
var a: Int = ceil(n/3.toFloat).toInt - 1
var b: Int = ceil(n/5.toFloat).toInt - 1
var c: Int = ceil(n/15.toFloat).toInt - 1
3*arithmetic_sum(a) + 5*arithmetic_sum(b) - 15*arithmetic_sum(c)
}
def main (args: Array[String]) = {
println("The sum of multiples of 3 or 5 below 1000 is: " + special_sum(1000))
}
}
|
LuqmanSahaf/Solve-Project-Euler
|
Problem1/scala/Problem1.scala
|
Scala
|
mit
| 457 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.jwt.jose4j
import java.time.Clock
import java.time.temporal.ChronoUnit
import io.circe.{ Json, JsonObject }
import org.jose4j.jwa.AlgorithmConstraints
import org.jose4j.jws.AlgorithmIdentifiers._
import org.jose4j.jws.JsonWebSignature
import org.jose4j.jwt.JwtClaims
import org.jose4j.jwt.consumer.JwtConsumerBuilder
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
import silhouette.jwt.jose4j.Jose4jReads._
import silhouette.jwt.jose4j.Jose4jWrites._
import silhouette.jwt.{ Claims, JwtException, ReservedClaims }
import silhouette.specs2.WithBouncyCastle
import scala.util.Try
/**
* Test case for the [[Jose4jReads]] and [[Jose4jWrites]] classes.
*/
class FormatSpec extends Specification with WithBouncyCastle {
"The `transformer`" should {
"transform a JWT with an `iss` claim" in new Context {
transform(Claims(issuer = Some("test")))
}
"transform a JWT with a `sub` claim" in new Context {
transform(Claims(subject = Some("test")))
}
"transform a JWT with an `aud` claim" in new Context {
transform(Claims(audience = Some(List("test1", "test2"))))
}
"transform a JWT with an `exp` claim" in new Context {
transform(Claims(expirationTime = Some(Clock.systemUTC().instant())))
}
"transform a JWT with a `nbf` claim" in new Context {
transform(Claims(notBefore = Some(Clock.systemUTC().instant())))
}
"transform a JWT with an `iat` claim" in new Context {
transform(Claims(issuedAt = Some(Clock.systemUTC().instant())))
}
"transform a JWT with a `jti` claim" in new Context {
transform(Claims(jwtID = Some("test")))
}
"transform a JWT with custom claims" in new Context {
transform(Claims(custom = customClaims))
}
"transform a complex JWT" in new Context {
transform(Claims(
issuer = Some("test"),
subject = Some("test"),
audience = Some(List("test1", "test2")),
expirationTime = Some(Clock.systemUTC().instant()),
notBefore = Some(Clock.systemUTC().instant()),
issuedAt = Some(Clock.systemUTC().instant()),
jwtID = Some("test"),
custom = customClaims
))
}
}
"The `write` method" should {
"throw a JwtException if a custom claim tries to override the reserved claim `iss`" in new Context {
reserved("iss")
}
"throw a JwtException if a custom claim tries to override the reserved claim `sub`" in new Context {
reserved("sub")
}
"throw a JwtException if a custom claim tries to override the reserved claim `aud`" in new Context {
reserved("aud")
}
"throw a JwtException if a custom claim tries to override the reserved claim `exp`" in new Context {
reserved("exp")
}
"throw a JwtException if a custom claim tries to override the reserved claim `nbf`" in new Context {
reserved("nbf")
}
"throw a JwtException if a custom claim tries to override the reserved claim `iat`" in new Context {
reserved("iat")
}
"throw a JwtException if a custom claim tries to override the reserved claim `jti`" in new Context {
reserved("jti")
}
}
"The `read` method" should {
"throw a JwtException if an error occurred during decoding" in new Context {
reads.read("invalid.token") must beFailedTry.like {
case e: JwtException => e.getMessage must be equalTo FraudulentJwtToken.format("invalid.token")
}
}
}
/**
* The context.
*/
trait Context extends Scope {
/**
* A simple producer for testing.
*/
val producer = new Jose4jProducer {
override def produce(claims: JwtClaims): String = {
val jws = new JsonWebSignature()
jws.setAlgorithmConstraints(AlgorithmConstraints.NO_CONSTRAINTS)
jws.setPayload(claims.toJson)
jws.setAlgorithmHeaderValue(NONE)
jws.getCompactSerialization
}
}
/**
* A simple consumer for testing.
*/
val consumer = new Jose4jConsumer {
override def consume(jwt: String): Try[JwtClaims] = {
Try(new JwtConsumerBuilder())
.map(builder => builder.setJwsAlgorithmConstraints(AlgorithmConstraints.NO_CONSTRAINTS))
.map(builder => builder.setDisableRequireSignature())
.map(builder => builder.setSkipAllValidators())
.map(builder => builder.setSkipAllDefaultValidators())
.map(_.build().processToClaims(jwt))
}
}
/**
* The reads to test.
*/
val reads = new Jose4jReads(consumer)
/**
* The writes to test.
*/
val writes = new Jose4jWrites(producer)
/**
* Some custom claims.
*/
val customClaims = JsonObject(
"boolean" -> Json.True,
"string" -> Json.fromString("string"),
"int" -> Json.fromInt(1234567890),
"long" -> Json.fromLong(1234567890L),
"float" -> Json.fromFloatOrNull(1.2F),
"double" -> Json.fromDoubleOrNull(1.2D),
"bigInt" -> Json.fromBigInt(new java.math.BigInteger("10000000000000000000000000000000")),
"bigDecimal" -> Json.fromBigDecimal(new java.math.BigDecimal("100000000000000000000000000000.00")),
"null" -> Json.Null,
"array" -> Json.arr(Json.fromInt(1), Json.fromInt(2)),
"object" -> Json.obj(
"array" -> Json.arr(Json.fromString("string1"), Json.fromString("string2")),
"object" -> Json.obj(
"array" -> Json.arr(
Json.fromString("string"),
Json.False,
Json.obj("number" -> Json.fromInt(1))
)
)
)
)
/**
* A helper method which transforms claims into a JWT and vice versa to check if the same
* claims were transformed.
*
* @param claims The claims to check for.
* @return A Specs2 match result.
*/
protected def transform(claims: Claims): MatchResult[Any] = {
writes.write(claims) must beSuccessfulTry.like {
case jwt =>
reads.read(jwt) must beSuccessfulTry.withValue(claims.copy(
expirationTime = claims.expirationTime.map(_.truncatedTo(ChronoUnit.SECONDS)),
notBefore = claims.notBefore.map(_.truncatedTo(ChronoUnit.SECONDS)),
issuedAt = claims.issuedAt.map(_.truncatedTo(ChronoUnit.SECONDS))
))
}
}
/**
* A helper method which overrides reserved claims and checks for an exception.
*
* @param claim The claim to override.
* @return A Specs2 match result.
*/
protected def reserved(claim: String): MatchResult[Any] = {
val message = OverrideReservedClaim.format(claim, ReservedClaims.mkString(", "))
writes.write(Claims(custom = JsonObject(claim -> Json.fromString("test")))) must beFailedTry.like {
case e: JwtException => e.getMessage must be equalTo message
}
}
}
}
|
mohiva/silhouette
|
modules/jwt-jose4j/src/test/scala/silhouette/jwt/jose4j/FormatSpec.scala
|
Scala
|
apache-2.0
| 7,672 |
#!/bin/bash
exec scala \\
-Djava.library.path="../lib/linux_x86" \\
-classpath "../bin:../lib:../lib/linux_x86/com.google.ortools.jar" \\
"$0" "$@"
!#
/** USAGE: ./coeff.scala DIMA DIMB - [u1 u2 ..] / [v1 v2 ...] / [w1 w2 ...]
*
* DIMA and DIMB are the size of the cubicles
* ui, vi and wi are permutations of 1 2 3 ...
* length of ui AND number of letters of permutation
* must be at most DIMA
* length of vi AND --- must be at most DIMB
* length of wi AND --- must be at most DIMA*DIMB
*/
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.HashMap
import polytope._
/** For every cubicle, print the product flag coefficient associated
* with the given permutations and cubicle
*/
object productFlagCoeff {
def main(args: Array[String]): Unit = {
if (args.count(_ == "/") != 2) {
println((args.count(_ == "/") + 1).toString +
" permutations were supplied, but exactly 3 are required.")
return
}
if (args.count(_ == "-") != 1) {
println("Two dimensions must be supplied and separated from the permutations by a -")
return
}
if (args.length < 5) {
println("Not enough arguments")
return
}
val dims = List(args(0).toInt, args(1).toInt)
val perms = args.drop(args.indexOf("-") + 1)
val u = perms.takeWhile(_ != "/").map(_.toInt)
val v = perms.drop(u.length + 1).takeWhile(_ != "/").map(_.toInt)
val w = perms.drop(u.length + v.length + 2).map(_.toInt)
println("dims = (" + dims.mkString(", ") + ")")
println("u: " + u.mkString(" "))
println("v: " + v.mkString(" "))
println("w: " + w.mkString(" "))
if (reducedWord(u).length > dims(0) || u.length > dims(0) ||
reducedWord(v).length > dims(1) || v.length > dims(1) ||
reducedWord(w).length > dims(0)*dims(1) || w.length > dims(0)*dims(1)) {
println("The number of letters of the permutations must be bounded by the dimensions")
println("The length of the permutations must also be bounded by the dimensions")
return
}
val cubicles = InequalityFactory.cubiclesDM(dims)
for (T <- cubicles) {
val c = InequalityFactory.c(u, v, w, T)
println("For T=" + T.toCSV() + ", c(u,v,w,T)=" + hashMapToString(c))
}
}
}
// Run the code
//productFlagCoeff.main(args)
|
expz/polytope
|
scripts/productFlagCoeffs.scala
|
Scala
|
gpl-3.0
| 2,339 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.util.{ HashMap => JHashMap }
import scala.collection.immutable.HashSet
import scala.collection.mutable
import scala.collection.JavaConversions._
import scala.concurrent.{ ExecutionContext, Future }
import org.apache.spark.rpc.{ RpcEndpointRef, RpcEnv, RpcCallContext, ThreadSafeRpcEndpoint }
import org.apache.spark.{ Logging, SparkConf }
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.scheduler._
import org.apache.spark.storage.BlockManagerMessages._
import org.apache.spark.util.{ ThreadUtils, Utils }
/**
* BlockManagerMasterEndpoint is an [[ThreadSafeRpcEndpoint]] on the master node to track statuses
* of all slaves' block managers.
* BlockManagerMasterEndpoint是主节点上的[[ThreadSafeRpcEndpoint]]来跟踪状态所有Slave节点的块管理
*
* 在Driver节点上的Actor,负责跟踪所有Slave节点的Block的块管理信息
*/
private[spark] class BlockManagerMasterEndpoint(
override val rpcEnv: RpcEnv,
val isLocal: Boolean,
conf: SparkConf,
listenerBus: LiveListenerBus)
extends ThreadSafeRpcEndpoint with Logging {
// Mapping from block manager id to the block manager's information.
//从块管理器ID映射到块管理器的信息,BlockManagerInfo保存了slave节点的(RpcEndpointRef)Action引用
//BlockManagerMaster通过个BlockManagerInfo的RpcEndpointRef可以向Slave发送命令请求
private val blockManagerInfo = new mutable.HashMap[BlockManagerId, BlockManagerInfo]
// Mapping from executor ID to block manager ID.
//从executor ID映射到块管理器ID,BlockManagerMaster通过executor ID查找到BlockManagerId
private val blockManagerIdByExecutor = new mutable.HashMap[String, BlockManagerId]
//保存Block是在那些BlockManager上的HashMap,由于Block可能在多个Slave上都有备份,因此注意Value是一个
//不可变HashSet,通过查询blockLocations就可以查询到某个Block所在的物理位置
// Mapping from block id to the set of block managers that have the block.
//从块ID映射到块管理器ID的集合,可以通过BlockId查询到BlockManagerId集合,即Block所在的物理位置的集合
private val blockLocations = new JHashMap[BlockId, mutable.HashSet[BlockManagerId]]
//线程池ThreadPoolExecutor
private val askThreadPool = ThreadUtils.newDaemonCachedThreadPool("block-manager-ask-thread-pool")
//定时调度器ExecutionContextExecutorService
//fromExecutorService使用默认的Reporter从给定的“ExecutorService”创建一个ExecutionContext
private implicit val askExecutionContext = ExecutionContext.fromExecutorService(askThreadPool)
//处理[[RpcEndpointRef.ask]]的消息,如果接收到不匹配的消息,[[SparkException]]将被抛出并发送到onError
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
//当BlockManager在创建后,向BlockManagerActor发送消息RegisterBlockManager进行注册
//Master Actor保存该BlockManage所包含的Block等信息
case RegisterBlockManager(blockManagerId, maxMemSize, slaveEndpoint) =>
register(blockManagerId, maxMemSize, slaveEndpoint)
//向发件人回复邮件
context.reply(true)
//向Master汇报Block的信息,Master会记录这些信息并且提供Slave查询
//UpdateBlockInfo(blockManagerId, blockId, storageLevel,memSize, diskSize, externalBlockStoreSize)
//@ UpdateBlockInfo注意UpdateBlockInfo属性使用方式
case _updateBlockInfo @ UpdateBlockInfo(
blockManagerId, blockId, storageLevel, deserializedSize, size, externalBlockStoreSize) =>
context.reply(updateBlockInfo(
blockManagerId, blockId, storageLevel, deserializedSize, size, externalBlockStoreSize))
listenerBus.post(SparkListenerBlockUpdated(BlockUpdatedInfo(_updateBlockInfo)))
//获得某个Block所在的位置信息,返回BlockManagerId组成的列表,Block可能在多个节点上都有备份
case GetLocations(blockId) =>
//向发件人回复邮件
context.reply(getLocations(blockId))
//获得某个Block所在的位置信息,返回BlockManagerId组成的列表,Block可能在多个节点上都有备份
//一次获取多个Block的位置信息
case GetLocationsMultipleBlockIds(blockIds) =>
//向发件人回复邮件
context.reply(getLocationsMultipleBlockIds(blockIds))
//getPeers获得其他相同的BlockManagerId,做Block的分布式存储副本时会用到
case GetPeers(blockManagerId) =>
//向发件人回复邮件
context.reply(getPeers(blockManagerId))
//根据executorId获取Executor的Thread Dump,获得了Executor的hostname和port后,会通过AkkA向Executor发送请求信息
case GetRpcHostPortForExecutor(executorId) =>
//向发件人回复邮件
context.reply(getRpcHostPortForExecutor(executorId))
//获取所有Executor的内存使用的状态,包括使用的最大的内存大小,剩余的内存大小
case GetMemoryStatus =>
//向发件人回复邮件
context.reply(memoryStatus)
//返回每个Executor的Storage的状态,包括每个Executor最大可用的内存数据和Block的信息
case GetStorageStatus =>
//向发件人回复邮件
context.reply(storageStatus)
//根据blockId和askSlaves向Master返回该Block的blockStatus
case GetBlockStatus(blockId, askSlaves) =>
//向发件人回复邮件
context.reply(blockStatus(blockId, askSlaves))
//根据BlockId获取Block的Status,如果askSlave为true,那么需要到所有的Slave上查询结果
case GetMatchingBlockIds(filter, askSlaves) =>
context.reply(getMatchingBlockIds(filter, askSlaves))
//根据RddId删除该Excutor上RDD所关联的所有Block
case RemoveRdd(rddId) =>
context.reply(removeRdd(rddId))
//根据shuffleId删除该Executor上所有和该Shuffle相关的Block
case RemoveShuffle(shuffleId) =>
context.reply(removeShuffle(shuffleId))
//根据broadcastId删除该Executor上和该广播变量相关的所有Block
case RemoveBroadcast(broadcastId, removeFromDriver) =>
context.reply(removeBroadcast(broadcastId, removeFromDriver))
//根据BlockId删除该Executor上所有和该Shuffle相关的Block
case RemoveBlock(blockId) =>
removeBlockFromWorkers(blockId)
context.reply(true)
//删除Master上保存的execId对应的Executor上的BlockManager的信息
case RemoveExecutor(execId) =>
removeExecutor(execId)
//向发件人回复邮件
context.reply(true)
case StopBlockManagerMaster =>
//向发件人回复邮件
context.reply(true)
stop()
//接收DAGScheduler.executorHeartbeatReceived消息
case BlockManagerHeartbeat(blockManagerId) =>
//向发件人回复邮件
context.reply(heartbeatReceived(blockManagerId))
case HasCachedBlocks(executorId) =>
blockManagerIdByExecutor.get(executorId) match {
case Some(bm) =>
if (blockManagerInfo.contains(bm)) {
val bmInfo = blockManagerInfo(bm)
context.reply(bmInfo.cachedBlocks.nonEmpty)
} else {
context.reply(false)
}
case None => context.reply(false)
}
}
private def removeRdd(rddId: Int): Future[Seq[Int]] = {
// First remove the metadata for the given RDD, and then asynchronously remove the blocks
// from the slaves.
//首先删除给定RDD的元数据,然后从从站异步移除块。
// Find all blocks for the given RDD, remove the block from both blockLocations and
// the blockManagerInfo that is tracking the blocks.
//找到给定RDD的所有块,从两个blockLocations和正在跟踪块的blockManagerInfo中移除该块。
//首先删除Master保存的RDD相关的元数据信息
val blocks = blockLocations.keys.flatMap(_.asRDDId).filter(_.rddId == rddId)
blocks.foreach { blockId =>
val bms: mutable.HashSet[BlockManagerId] = blockLocations.get(blockId)
bms.foreach(bm => blockManagerInfo.get(bm).foreach(_.removeBlock(blockId)))
blockLocations.remove(blockId)
}
// Ask the slaves to remove the RDD, and put the result in a sequence of Futures.
// The dispatcher is used as an implicit argument into the Future sequence construction.
//请求slaves删除RDD,并将结果放入Futures序列中,调度程序用作未来序列构建的隐含参数。
//其次删除Slave上的RDD的信息
val removeMsg = RemoveRdd(rddId)
/**
sequence将Seq[Future[Int]]转换为Future[Seq[Int]]
*/
Future.sequence(
blockManagerInfo.values.map { bm =>
//向BlockManagerSlaveEndpoint发送RemoveRdd信息
bm.slaveEndpoint.ask[Int](removeMsg)
}.toSeq)
}
private def removeShuffle(shuffleId: Int): Future[Seq[Boolean]] = {
// Nothing to do in the BlockManagerMasterEndpoint data structures
//在BlockManagerMasterEndpoint数据结构中无法做到
val removeMsg = RemoveShuffle(shuffleId)
/**
sequence将Seq[Future[Boolean]]转换为Future[Seq[Boolean]]
*/
Future.sequence(
blockManagerInfo.values.map { bm =>
bm.slaveEndpoint.ask[Boolean](removeMsg)
}.toSeq)
}
/**
* Delegate RemoveBroadcast messages to each BlockManager because the master may not notified
* of all broadcast blocks. If removeFromDriver is false, broadcast blocks are only removed
* from the executors, but not from the driver.
* 代理RemoveBroadcast消息到每个BlockManager,因为主节点可能没有通知所有广播块,如果removeFromDriver为false,
* 则广播块仅从执行程序中删除,但不能从驱动程序中删除,删除广播变量信息,通知所有广播块
*/
private def removeBroadcast(broadcastId: Long, removeFromDriver: Boolean): Future[Seq[Int]] = {
val removeMsg = RemoveBroadcast(broadcastId, removeFromDriver)
val requiredBlockManagers = blockManagerInfo.values.filter { info =>
removeFromDriver || !info.blockManagerId.isDriver
}
//sequence将Seq[Future[Int]]转换为Future[Seq[Int]]
Future.sequence(
requiredBlockManagers.map { bm =>
bm.slaveEndpoint.ask[Int](removeMsg)
}.toSeq)
}
/**
* 根据blockManagerId删除Executor
*/
private def removeBlockManager(blockManagerId: BlockManagerId) {
val info = blockManagerInfo(blockManagerId)
// Remove the block manager from blockManagerIdByExecutor.
//从blockManagerIdByExecutor(HashMap)中删除块管理器
blockManagerIdByExecutor -= blockManagerId.executorId
// Remove it from blockManagerInfo and remove all the blocks.
//从blockManagerInfo(HashMap)中删除它并删除所有块
blockManagerInfo.remove(blockManagerId)
val iterator = info.blocks.keySet.iterator
while (iterator.hasNext) {
val blockId = iterator.next
val locations = blockLocations.get(blockId)
locations -= blockManagerId
if (locations.size == 0) {
blockLocations.remove(blockId)
}
}
listenerBus.post(SparkListenerBlockManagerRemoved(System.currentTimeMillis(), blockManagerId))
logInfo(s"Removing block manager $blockManagerId")
}
//根据execId移除Executor
private def removeExecutor(execId: String) {
logInfo("Trying to remove executor " + execId + " from BlockManagerMaster.")
blockManagerIdByExecutor.get(execId).foreach(removeBlockManager)
}
/**
* Return true if the driver knows about the given block manager. Otherwise, return false,
* indicating that the block manager should re-register.
*如果driver端已知给定的块管理器,则返回true,否则返回false,表示块管理器应该重新注册,
*/
private def heartbeatReceived(blockManagerId: BlockManagerId): Boolean = {
if (!blockManagerInfo.contains(blockManagerId)) {
blockManagerId.isDriver && !isLocal
} else {
//最终更新BlockManagerMaster对BlockManager的最后可见时间(即更新Block-ManagerId对应的BlockManagerInfo的_lastSeenMS)
blockManagerInfo(blockManagerId).updateLastSeenMs()
true
}
}
// Remove a block from the slaves that have it. This can only be used to remove
// blocks that the master knows about.
//根据blockId删除slaves中的块,这只能用于删除master端存在的块
private def removeBlockFromWorkers(blockId: BlockId) {
val locations = blockLocations.get(blockId)
if (locations != null) {
locations.foreach { blockManagerId: BlockManagerId =>
val blockManager = blockManagerInfo.get(blockManagerId)
if (blockManager.isDefined) {
// Remove the block from the slave's BlockManager.
// Doesn't actually wait for a confirmation and the message might get lost.
// If message loss becomes frequent, we should add retry logic here.
//从节点的BlockManager中删除该块。实际上并不等待确认,并且该消息可能会丢失
// 如果消息丢失频繁,我们应该在此添加重试逻辑
blockManager.get.slaveEndpoint.ask[Boolean](RemoveBlock(blockId))
}
}
}
}
// Return a map from the block manager id to max memory and remaining memory.
//从块管理器返回Map包括最大内存和剩余内存
private def memoryStatus: Map[BlockManagerId, (Long, Long)] = {
blockManagerInfo.map {
case (blockManagerId, info) =>
(blockManagerId, (info.maxMem, info.remainingMem))
}.toMap
}
private def storageStatus: Array[StorageStatus] = {
blockManagerInfo.map {
case (blockManagerId, info) =>
new StorageStatus(blockManagerId, info.maxMem, info.blocks)
}.toArray
}
/**
* Return the block's status for all block managers, if any. NOTE: This is a
* potentially expensive operation and should only be used for testing.
* 返回所有块管理者的块的状态,这是一个耗时操作只用于测试,Master查询每一块管理的最新状态,只能用于测试
* If askSlaves is true, the master queries each block manager for the most updated block
* statuses. This is useful when the master is not informed of the given block by all block
* managers.
* 如果askSlaves为true,则主服务器查询每个块管理器获取最新的块状态,当主服务器不被所有块管理员通知给定的块时这是有用的。
*/
private def blockStatus(
blockId: BlockId,
askSlaves: Boolean): Map[BlockManagerId, Future[Option[BlockStatus]]] = {
val getBlockStatus = GetBlockStatus(blockId)
/*
* Rather than blocking on the block status query, master endpoint should simply return
* Futures to avoid potential deadlocks. This can arise if there exists a block manager
* that is also waiting for this master endpoint's response to a previous message.
* 阻塞状态查询阻塞,而不是主端点应该只是返回Futures以避免潜在的死锁,
* 如果存在还等待该主端点对先前消息的响应的块管理器,则可能会出现这种情况。
*/
blockManagerInfo.values.map { info =>
val blockStatusFuture =
if (askSlaves) {
info.slaveEndpoint.ask[Option[BlockStatus]](getBlockStatus)
} else {
Future { info.getStatus(blockId) }
}
(info.blockManagerId, blockStatusFuture)
}.toMap
}
/**
* Return the ids of blocks present in all the block managers that match the given filter.
* NOTE: This is a potentially expensive operation and should only be used for testing.
* 返回过虑器匹配的所有块管理器中存在的块的标识符,这是非常耗时,只用于测试
* If askSlaves is true, the master queries each block manager for the most updated block
* statuses. This is useful when the master is not informed of the given block by all block
* managers.
* 如果askSlaves为true,则主对每个块管理器查询最新的块状态,当Master不被所有块管理员通知给定的块时,这是有用的。
*/
private def getMatchingBlockIds(
filter: BlockId => Boolean,
askSlaves: Boolean): Future[Seq[BlockId]] = {
val getMatchingBlockIds = GetMatchingBlockIds(filter)
Future.sequence(
blockManagerInfo.values.map { info =>
val future =
if (askSlaves) {
info.slaveEndpoint.ask[Seq[BlockId]](getMatchingBlockIds)
} else {
Future { info.blocks.keys.filter(filter).toSeq }
}
future
}).map(_.flatten.toSeq)
}
/**
* 接到注册请求后,会将Slave的信息保存到Master端(这里Master端指的Driver端)
*/
private def register(id: BlockManagerId, maxMemSize: Long, slaveEndpoint: RpcEndpointRef) {
val time = System.currentTimeMillis()//获取系统当前时间
if (!blockManagerInfo.contains(id)) {
//根据executorId查找blockManagerId,如有存存,则删除旧的blockManagerId
blockManagerIdByExecutor.get(id.executorId) match {
case Some(oldId) =>
// A block manager of the same executor already exists, so remove it (assumed dead)
//同一个执行者的块管理器已经存在,所以删除它(假死)
logError("Got two different block manager registrations on same executor - "
+ s" will replace old one $oldId with new one $id")
removeExecutor(id.executorId)
case None =>
}
logInfo("Registering block manager %s with %s RAM, %s".format(
id.hostPort, Utils.bytesToString(maxMemSize), id))
//存储HashMap key->executorId value->BlockManagerId
blockManagerIdByExecutor(id.executorId) = id
//存储HashMap key->BlockManagerId vlaue->BlockManagerInfo
blockManagerInfo(id) = new BlockManagerInfo(
id, System.currentTimeMillis(), maxMemSize, slaveEndpoint)
}
//最后向listenerBus推送(post)SparkListenerBlockManagerAdded事件,maxMemSize最大内存
listenerBus.post(SparkListenerBlockManagerAdded(time, id, maxMemSize))
}
/**
* Master端信息更新
*/
private def updateBlockInfo(
blockManagerId: BlockManagerId,
blockId: BlockId,
storageLevel: StorageLevel,
memSize: Long,
diskSize: Long,
externalBlockStoreSize: Long): Boolean = {
if (!blockManagerInfo.contains(blockManagerId)) {
if (blockManagerId.isDriver && !isLocal) {
// We intentionally do not register the master (except in local mode),
// so we should not indicate failure.
//我们故意不注册主(除了本地模式),所以我们不应该指出失败。
return true
} else {
return false
}
}
if (blockId == null) {
//更新最时时间
blockManagerInfo(blockManagerId).updateLastSeenMs()
return true
}
//更新blockManagerInfo
blockManagerInfo(blockManagerId).updateBlockInfo(
blockId, storageLevel, memSize, diskSize, externalBlockStoreSize)
var locations: mutable.HashSet[BlockManagerId] = null
if (blockLocations.containsKey(blockId)) {
//该Block是有信息更新或者多个备份
locations = blockLocations.get(blockId)
} else {
//新加入的Block
locations = new mutable.HashSet[BlockManagerId]
blockLocations.put(blockId, locations)
}
if (storageLevel.isValid) {
locations.add(blockManagerId) //为该Block加入新的位置
} else {
locations.remove(blockManagerId) //删除无效的Block的位置
}
// Remove the block from master tracking if it has been removed on all slaves.
//删除在Slave上已经不存在的Block
if (locations.size == 0) {
blockLocations.remove(blockId)
}
true
}
private def getLocations(blockId: BlockId): Seq[BlockManagerId] = {
if (blockLocations.containsKey(blockId)) blockLocations.get(blockId).toSeq else Seq.empty
}
private def getLocationsMultipleBlockIds(
blockIds: Array[BlockId]): IndexedSeq[Seq[BlockManagerId]] = {
blockIds.map(blockId => getLocations(blockId))
}
/**
* Get the list of the peers of the given block manager
* 请求获得其他BlockManager的id
* */
//getPeers获得其他相同的BlockManagerId,做Block的分布式存储副本时会用到
private def getPeers(blockManagerId: BlockManagerId): Seq[BlockManagerId] = {
val blockManagerIds = blockManagerInfo.keySet
if (blockManagerIds.contains(blockManagerId)) {
//过滤掉Driver的BlockManager和当前的Executor的BlockManager,将其余的BlockManagerId都返回
//isDriver是指的Master
blockManagerIds.filterNot { _.isDriver }.filterNot { _ == blockManagerId }.toSeq
} else {
Seq.empty
}
}
/**
* Returns the hostname and port of an executor, based on the [[RpcEnv]] address of its
* [[BlockManagerSlaveEndpoint]].
* 返回Executor的主机名和端口
*/
private def getRpcHostPortForExecutor(executorId: String): Option[(String, Int)] = {
for (
blockManagerId <- blockManagerIdByExecutor.get(executorId);
info <- blockManagerInfo.get(blockManagerId)
) yield {
(info.slaveEndpoint.address.host, info.slaveEndpoint.address.port)
}
}
override def onStop(): Unit = {
askThreadPool.shutdownNow()
}
}
@DeveloperApi
case class BlockStatus(
storageLevel: StorageLevel, //存储级别
memSize: Long, //内存大小
diskSize: Long, //硬盘大小
externalBlockStoreSize: Long) { //扩展存储块的大小
def isCached: Boolean = memSize + diskSize + externalBlockStoreSize > 0
}
@DeveloperApi
object BlockStatus {
def empty: BlockStatus = BlockStatus(StorageLevel.NONE, 0L, 0L, 0L)
}
private[spark] class BlockManagerInfo(
val blockManagerId: BlockManagerId,
timeMs: Long, //最后一次看到
val maxMem: Long, //最大内容
val slaveEndpoint: RpcEndpointRef) //Exectuor端引用
extends Logging {
private var _lastSeenMs: Long = timeMs //最后一次看到
private var _remainingMem: Long = maxMem //使用最大内容
// Mapping from block id to its status.
//映射的块标识其状态
private val _blocks = new JHashMap[BlockId, BlockStatus]
// Cached blocks held by this BlockManager. This does not include broadcast blocks.
//由BlockManager持有的缓存块,这不包括广播块。
private val _cachedBlocks = new mutable.HashSet[BlockId]
def getStatus(blockId: BlockId): Option[BlockStatus] = Option(_blocks.get(blockId))
//更新最时时间
def updateLastSeenMs() {
_lastSeenMs = System.currentTimeMillis()
}
def updateBlockInfo(
blockId: BlockId,
storageLevel: StorageLevel,
memSize: Long,
diskSize: Long,
externalBlockStoreSize: Long) {
//更新最时时间
updateLastSeenMs()
if (_blocks.containsKey(blockId)) {
// The block exists on the slave already.
//块上已经在从节点(slave)存在
val blockStatus: BlockStatus = _blocks.get(blockId)
val originalLevel: StorageLevel = blockStatus.storageLevel
val originalMemSize: Long = blockStatus.memSize
if (originalLevel.useMemory) {
_remainingMem += originalMemSize
}
}
if (storageLevel.isValid) {
/* isValid means it is either stored in-memory, on-disk or on-externalBlockStore.
* The memSize here indicates the data size in or dropped from memory,
* externalBlockStoreSize here indicates the data size in or dropped from externalBlockStore,
* and the diskSize here indicates the data size in or dropped to disk.
* They can be both larger than 0, when a block is dropped from memory to disk.
* Therefore, a safe way to set BlockStatus is to set its info in accurate modes.
* isValid意味着它是存储在内存中,在磁盘上或on-externalBlockStore,这里的memSize表示从内存中进入或从内存中删除的数据大小,
* 这里externalBlockStoreSize表示从externalBlockStore进入的数据大小,或者从externalBlockStore中删除的数据大小,
* 而diskSize这里表示数据的大小,或者放在磁盘上。它们可以大于0,当一个块 从内存到磁盘。
* 因此,设置BlockStatus的安全方法是将其信息设置为准确的模式。*/
var blockStatus: BlockStatus = null
if (storageLevel.useMemory) {
blockStatus = BlockStatus(storageLevel, memSize, 0, 0)
_blocks.put(blockId, blockStatus)
_remainingMem -= memSize
logInfo("Added %s in memory on %s (size: %s, free: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(memSize),
Utils.bytesToString(_remainingMem)))
}
if (storageLevel.useDisk) {
blockStatus = BlockStatus(storageLevel, 0, diskSize, 0)
_blocks.put(blockId, blockStatus)
logInfo("Added %s on disk on %s (size: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(diskSize)))
}
if (storageLevel.useOffHeap) {
blockStatus = BlockStatus(storageLevel, 0, 0, externalBlockStoreSize)
_blocks.put(blockId, blockStatus)
logInfo("Added %s on ExternalBlockStore on %s (size: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(externalBlockStoreSize)))
}
if (!blockId.isBroadcast && blockStatus.isCached) {
_cachedBlocks += blockId
}
} else if (_blocks.containsKey(blockId)) {
// If isValid is not true, drop the block.
//如果isValid不正确,则删除该块
val blockStatus: BlockStatus = _blocks.get(blockId)
_blocks.remove(blockId)
_cachedBlocks -= blockId
if (blockStatus.storageLevel.useMemory) {
logInfo("Removed %s on %s in memory (size: %s, free: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.memSize),
Utils.bytesToString(_remainingMem)))
}
if (blockStatus.storageLevel.useDisk) {
logInfo("Removed %s on %s on disk (size: %s)".format(
blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.diskSize)))
}
if (blockStatus.storageLevel.useOffHeap) {
logInfo("Removed %s on %s on externalBlockStore (size: %s)".format(
blockId, blockManagerId.hostPort,
Utils.bytesToString(blockStatus.externalBlockStoreSize)))
}
}
}
def removeBlock(blockId: BlockId) {
if (_blocks.containsKey(blockId)) {
_remainingMem += _blocks.get(blockId).memSize
_blocks.remove(blockId)
}
_cachedBlocks -= blockId
}
//保留内存
def remainingMem: Long = _remainingMem
//最近时间
def lastSeenMs: Long = _lastSeenMs
def blocks: JHashMap[BlockId, BlockStatus] = _blocks
// This does not include broadcast blocks.
//这不包括广播块
def cachedBlocks: collection.Set[BlockId] = _cachedBlocks
override def toString: String = "BlockManagerInfo " + timeMs + " " + _remainingMem
def clear() {
_blocks.clear()
}
}
|
tophua/spark1.52
|
core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
|
Scala
|
apache-2.0
| 27,809 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.batchupdater
import scala.concurrent.{ ExecutionContext, Future }
trait UpdateAction[ID] {
def transactionName: String
def apply(id: ID)(implicit ec: ExecutionContext): Future[SingleResult]
}
|
hmrc/batch-updater
|
src/main/scala/uk/gov/hmrc/batchupdater/UpdateAction.scala
|
Scala
|
apache-2.0
| 824 |
package scala.meta
import com.intellij.openapi.project.Project
import com.intellij.openapi.vfs.VirtualFileManager
import com.intellij.psi.{PsiFile, PsiManager}
import org.jetbrains.plugins.scala.lang.psi.api.base.ScAnnotation
trait EnvironmentProvider {
def findFileByPath(path: String): PsiFile = {
val virtualFile = VirtualFileManager.getInstance().findFileByUrl(path)
PsiManager.getInstance(getCurrentProject).findFile(virtualFile)
}
def getCurrentProject: Project
def dumbMode: Boolean = false
// at the time this comment is written, paradise doesn't convert trees 100% equal to what is written in source code
// this flag tells tree converter to mimic paradise converter behaviour
def paradiseCompatibilityHacks: Boolean = true
// macro annotation itself must not appear in converted tree(paradise behaviour)
protected val annotationToSkip: ScAnnotation
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/scala/meta/EnvironmentProvider.scala
|
Scala
|
apache-2.0
| 893 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package mutable
import generic._
import scala.scalajs.js
/** Buffers are used to create sequences of elements incrementally by
* appending, prepending, or inserting new elements. It is also
* possible to access and modify elements in a random access fashion
* via the index of the element in the current sequence.
*
* @author Matthias Zenger
* @author Martin Odersky
* @version 2.8
* @since 1
*
* @tparam A type of the elements contained in this buffer.
*
* @define Coll `Buffer`
* @define coll buffer
*/
trait Buffer[A] extends Seq[A]
with GenericTraversableTemplate[A, Buffer]
with BufferLike[A, Buffer[A]]
with scala.Cloneable {
override def companion: GenericCompanion[Buffer] = Buffer
}
/** $factoryInfo
* @define coll buffer
* @define Coll `Buffer`
*/
object Buffer extends SeqFactory[Buffer] {
implicit def canBuildFrom[A]: CanBuildFrom[Coll, A, Buffer[A]] = ReusableCBF.asInstanceOf[GenericCanBuildFrom[A]]
def newBuilder[A]: Builder[A, Buffer[A]] = new js.WrappedArray
}
/** Explicit instantiation of the `Buffer` trait to reduce class file size in subclasses. */
abstract class AbstractBuffer[A] extends AbstractSeq[A] with Buffer[A]
|
xuwei-k/scala-js
|
scalalib/overrides-2.13/scala/collection/mutable/Buffer.scala
|
Scala
|
bsd-3-clause
| 1,807 |
package finatra.quickstart.domain.http
import com.twitter.finatra.validation._
import finatra.quickstart.domain.Location
case class TweetLocation(
@Range(min = -85, max = 85) lat: Double,
@Range(min = -180, max = 180) long: Double) {
def toDomain = {
Location(lat, long)
}
}
|
syamantm/finatra
|
examples/twitter-clone/src/main/scala/finatra/quickstart/domain/http/TweetLocation.scala
|
Scala
|
apache-2.0
| 290 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package toplevel
package imports
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.extensions.{ObjectExt, StubBasedExt}
import org.jetbrains.plugins.scala.lang.TokenSets.IMPORT_WILDCARDS
import org.jetbrains.plugins.scala.lang.parser.ScalaElementType._
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReference
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports._
import org.jetbrains.plugins.scala.lang.psi.stubs.ScImportExprStub
/**
* @author AlexanderPodkhalyuzin
* Date: 20.02.2008
*/
class ScImportExprImpl private (stub: ScImportExprStub, node: ASTNode)
extends ScalaStubBasedElementImpl(stub, IMPORT_EXPR, node) with ScImportExpr {
def this(node: ASTNode) = this(null, node)
def this(stub: ScImportExprStub) = this(stub, null)
override def toString: String = "ImportExpression"
override def hasWildcardSelector: Boolean = byStubOrPsi(_.hasWildcardSelector)(wildcardElement.nonEmpty)
override def hasGivenSelector: Boolean = byStubOrPsi(_.hasGivenSelector)(selectors.exists(_.isGivenSelector))
override def wildcardElement: Option[PsiElement] =
Option(findChildByType(IMPORT_WILDCARDS))
.orElse(selectorSet.flatMap(_.wildcardElement))
override def qualifier: Option[ScStableCodeReference] =
reference.flatMap(ref =>
if (hasWildcardSelector || selectorSet.isDefined)
Some(ref)
else
ref.qualifier
)
override def deleteExpr(): Unit = {
val parent = getParent.asInstanceOf[ScImportOrExportStmt]
if (parent.importExprs.size == 1) {
parent.getParent match {
case x: ScImportsOrExportsHolder => x.deleteImportOrExportStmt(parent)
case _ =>
}
} else {
val node = parent.getNode
val remove = node.removeChild _
val next = getNextSibling
if (next != null) {
def removeWhitespaceAfterComma(comma: ASTNode): Unit = {
if (comma.getTreeNext != null && !comma.getTreeNext.getText.contains("\\n") &&
comma.getTreeNext.getText.trim.isEmpty) {
remove(comma.getTreeNext)
}
}
if (next.textMatches(",")) {
val comma = next.getNode
removeWhitespaceAfterComma(comma)
remove(comma)
} else {
if (next.getNextSibling != null && next.getNextSibling.textMatches(",")) {
val comma = next.getNextSibling
removeWhitespaceAfterComma(comma.getNode)
remove(next.getNode)
remove(comma.getNode)
} else {
val prev = getPrevSibling
if (prev != null) {
if (prev.textMatches(",")) {
remove(prev.getNode)
} else {
if (prev.getPrevSibling != null && prev.getPrevSibling.textMatches(",")) {
remove(prev.getPrevSibling.getNode)
}
}
}
}
}
} else {
val prev = getPrevSibling
if (prev != null) {
if (prev.textMatches(",")) {
remove(prev.getNode)
} else {
if (prev.getPrevSibling != null && prev.getPrevSibling.textMatches(",")) {
val prevSibling = prev.getPrevSibling
remove(prev.getNode)
remove(prevSibling.getNode)
}
}
}
}
remove(getNode)
}
}
override def deleteRedundantSingleSelectorBraces(): Unit = {
this.selectors match {
case Seq(selector: ScImportSelector) =>
if (!selector.isScala2StyleAliasImport) {
val textWithoutBraces = this.qualifier.fold("")(_.getText + ".") + selector.getText
this.replace(ScalaPsiElementFactory.createImportExprFromText(textWithoutBraces, this))
}
case _ =>
}
}
override def selectorSet: Option[ScImportSelectors] =
this.stubOrPsiChild(IMPORT_SELECTORS)
override def reference: Option[ScStableCodeReference] =
byPsiOrStub(getFirstChild.asOptionOf[ScStableCodeReference])(_.reference)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/imports/ScImportExprImpl.scala
|
Scala
|
apache-2.0
| 4,145 |
package com.twitter.scrooge.java_generator
import com.twitter.scrooge.ast._
import com.twitter.scrooge.ast.SetType
import com.twitter.scrooge.ast.MapType
class FieldValueMetadataController(
fieldType: FieldType,
generator: ApacheJavaGenerator,
ns: Option[Identifier])
extends BaseController(generator, ns) {
val field_type = new FieldTypeController(fieldType, generator)
def map_element = {
fieldType match {
case MapType(k, v, _) => {
Map(
"field_value_meta_data_key" -> generateMetadata(k),
"field_value_meta_data_val" -> generateMetadata(v)
)
}
case _ => false
}
}
def set_or_list_element = {
fieldType match {
case SetType(x, _) => elem(x)
case ListType(x, _) => elem(x)
case _ => false
}
}
def elem(x: FieldType): Map[String, Object] = {
Map("field_value_meta_data_elem" -> generateMetadata(x))
}
def generateMetadata(k: FieldType): String = {
indent(generator.fieldValueMetaData(k, ns), 4, skipFirst = true, addLast = false)
}
}
|
elipoz/scrooge
|
scrooge-generator/src/main/scala/com/twitter/scrooge/java_generator/FieldValueMetadataController.scala
|
Scala
|
apache-2.0
| 1,069 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kinesis
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.Arrays
import com.amazonaws.services.kinesis.clientlibrary.exceptions._
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason
import com.amazonaws.services.kinesis.model.Record
import org.mockito.Matchers._
import org.mockito.Matchers.{eq => meq}
import org.mockito.Mockito._
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.mock.MockitoSugar
import org.apache.spark.streaming.{Duration, TestSuiteBase}
/**
* Suite of Kinesis streaming receiver tests focusing mostly on the KinesisRecordProcessor
*/
class KinesisReceiverSuite extends TestSuiteBase with Matchers with BeforeAndAfter
with MockitoSugar {
val app = "TestKinesisReceiver"
val stream = "mySparkStream"
val endpoint = "endpoint-url"
val workerId = "dummyWorkerId"
val shardId = "dummyShardId"
val seqNum = "dummySeqNum"
val checkpointInterval = Duration(10)
val someSeqNum = Some(seqNum)
val record1 = new Record()
record1.setData(ByteBuffer.wrap("Spark In Action".getBytes(StandardCharsets.UTF_8)))
val record2 = new Record()
record2.setData(ByteBuffer.wrap("Learning Spark".getBytes(StandardCharsets.UTF_8)))
val batch = Arrays.asList(record1, record2)
var receiverMock: KinesisReceiver[Array[Byte]] = _
var checkpointerMock: IRecordProcessorCheckpointer = _
override def beforeFunction(): Unit = {
receiverMock = mock[KinesisReceiver[Array[Byte]]]
checkpointerMock = mock[IRecordProcessorCheckpointer]
}
test("process records including store and set checkpointer") {
when(receiverMock.isStopped()).thenReturn(false)
when(receiverMock.getCurrentLimit).thenReturn(Int.MaxValue)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.processRecords(batch, checkpointerMock)
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, times(1)).addRecords(shardId, batch)
verify(receiverMock, times(1)).setCheckpointer(shardId, checkpointerMock)
}
test("split into multiple processes if a limitation is set") {
when(receiverMock.isStopped()).thenReturn(false)
when(receiverMock.getCurrentLimit).thenReturn(1)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.processRecords(batch, checkpointerMock)
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, times(1)).addRecords(shardId, batch.subList(0, 1))
verify(receiverMock, times(1)).addRecords(shardId, batch.subList(1, 2))
verify(receiverMock, times(1)).setCheckpointer(shardId, checkpointerMock)
}
test("shouldn't store and update checkpointer when receiver is stopped") {
when(receiverMock.isStopped()).thenReturn(true)
when(receiverMock.getCurrentLimit).thenReturn(Int.MaxValue)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.processRecords(batch, checkpointerMock)
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, never).addRecords(anyString, anyListOf(classOf[Record]))
verify(receiverMock, never).setCheckpointer(anyString, meq(checkpointerMock))
}
test("shouldn't update checkpointer when exception occurs during store") {
when(receiverMock.isStopped()).thenReturn(false)
when(receiverMock.getCurrentLimit).thenReturn(Int.MaxValue)
when(
receiverMock.addRecords(anyString, anyListOf(classOf[Record]))
).thenThrow(new RuntimeException())
intercept[RuntimeException] {
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.processRecords(batch, checkpointerMock)
}
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, times(1)).addRecords(shardId, batch)
verify(receiverMock, never).setCheckpointer(anyString, meq(checkpointerMock))
}
test("shutdown should checkpoint if the reason is TERMINATE") {
when(receiverMock.getLatestSeqNumToCheckpoint(shardId)).thenReturn(someSeqNum)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.shutdown(checkpointerMock, ShutdownReason.TERMINATE)
verify(receiverMock, times(1)).removeCheckpointer(meq(shardId), meq(checkpointerMock))
}
test("shutdown should not checkpoint if the reason is something other than TERMINATE") {
when(receiverMock.getLatestSeqNumToCheckpoint(shardId)).thenReturn(someSeqNum)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.shutdown(checkpointerMock, ShutdownReason.ZOMBIE)
recordProcessor.shutdown(checkpointerMock, null)
verify(receiverMock, times(2)).removeCheckpointer(meq(shardId),
meq[IRecordProcessorCheckpointer](null))
}
test("retry success on first attempt") {
val expectedIsStopped = false
when(receiverMock.isStopped()).thenReturn(expectedIsStopped)
val actualVal = KinesisRecordProcessor.retryRandom(receiverMock.isStopped(), 2, 100)
assert(actualVal == expectedIsStopped)
verify(receiverMock, times(1)).isStopped()
}
test("retry success on second attempt after a Kinesis throttling exception") {
val expectedIsStopped = false
when(receiverMock.isStopped())
.thenThrow(new ThrottlingException("error message"))
.thenReturn(expectedIsStopped)
val actualVal = KinesisRecordProcessor.retryRandom(receiverMock.isStopped(), 2, 100)
assert(actualVal == expectedIsStopped)
verify(receiverMock, times(2)).isStopped()
}
test("retry success on second attempt after a Kinesis dependency exception") {
val expectedIsStopped = false
when(receiverMock.isStopped())
.thenThrow(new KinesisClientLibDependencyException("error message"))
.thenReturn(expectedIsStopped)
val actualVal = KinesisRecordProcessor.retryRandom(receiverMock.isStopped(), 2, 100)
assert(actualVal == expectedIsStopped)
verify(receiverMock, times(2)).isStopped()
}
test("retry failed after a shutdown exception") {
when(checkpointerMock.checkpoint()).thenThrow(new ShutdownException("error message"))
intercept[ShutdownException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
verify(checkpointerMock, times(1)).checkpoint()
}
test("retry failed after an invalid state exception") {
when(checkpointerMock.checkpoint()).thenThrow(new InvalidStateException("error message"))
intercept[InvalidStateException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
verify(checkpointerMock, times(1)).checkpoint()
}
test("retry failed after unexpected exception") {
when(checkpointerMock.checkpoint()).thenThrow(new RuntimeException("error message"))
intercept[RuntimeException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
verify(checkpointerMock, times(1)).checkpoint()
}
test("retry failed after exhausting all retries") {
val expectedErrorMessage = "final try error message"
when(checkpointerMock.checkpoint())
.thenThrow(new ThrottlingException("error message"))
.thenThrow(new ThrottlingException(expectedErrorMessage))
val exception = intercept[RuntimeException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
exception.getMessage().shouldBe(expectedErrorMessage)
verify(checkpointerMock, times(2)).checkpoint()
}
}
|
mike0sv/spark
|
external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisReceiverSuite.scala
|
Scala
|
apache-2.0
| 8,657 |
/*
*
* Copyright (c) 2016 LIBBLE team supervised by Dr. Wu-Jun LI at Nanjing University.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Created by syh on 2016/12/9.
*/
package libble.collaborativeFiltering
import libble.linalg.implicits._
import libble.linalg.{DenseVector, Vector}
import libble.utils.{XORShiftRandom, WorkerStore}
import org.apache.spark.rdd.RDD
import scala.collection.mutable.ArrayBuffer
import scala.util.hashing.byteswap64
case class Rating(rating: Double, index_x: Int, index_y: Int)
/**
* This is an acceleration version of matrix factorization,
* but it require that numParts equal to the actual number of machines.
*/
class MatrixFactorization extends Serializable{
/**
* initialize the user factors and item factors randomly
*
* @param indices user(item) indices
* @param rank the length of factor
* @return
*/
def initialize(indices: Set[Int], rank :Int) : Map[Int, Vector]= {
val seedGen = new XORShiftRandom()
val random = new XORShiftRandom(byteswap64(seedGen.nextLong()))
val vectors = new Array[Vector](indices.size)
for (i <- vectors.indices) {
val factors = Array.fill(rank)(random.nextGaussian())
val v = new DenseVector(factors)
v /= v.norm2()
vectors(i) = v
}
indices.zip(vectors).toMap
}
/**
* This is an acceleration version of matrix factorization,
* but it require that numParts equal to the actual number of machines.
*
* @param trainSet RDD of ratings
* @param numIters number of outer loop
* @param numParts number of workers
* @param rank length of factor
* @param lambda_u regularization parameter of users
* @param lambda_v regularization parameter of items
* @param stepSize stepsize for update the factors.
* @return matrix factorization model
*/
def train (trainSet: RDD[Rating],
numIters: Int,
numParts: Int,
rank: Int,
lambda_u: Double,
lambda_v: Double,
stepSize: Double,
ifPrintLoss: Int) : MatrixFactorizationModel = {
var stepsize = stepSize
val items = trainSet.mapPartitions{iter =>
val is = iter.map(r => r.index_y).toSet
Iterator.single(is)
}
.reduce((a,b)=> a.union(b))
val numRatings = trainSet.count()
//random hash the data by row
val ratingsByRow = trainSet.groupBy(_.index_x)
.repartition(numParts)
.values
.flatMap(i=>i)
.cache()
//number of inner iterations is the maximum number of ratings in p workers
val numInnerIters = ratingsByRow.mapPartitions(i => Iterator.single(i.length)).reduce((a,b)=>math.max(a,b))
//initialize item factors in master
var itemFactors = initialize(items, rank)
//initialize U in p workers
ratingsByRow.mapPartitionsWithIndex{(index,iter) =>
val indices_x = iter.map(r => r.index_x).toSet
val userFactors = initialize(indices_x,rank)
MatrixFactorization.workerstore.put(s"userFactors_$index", userFactors)
Iterator.single(0)
}.count()
//main loop
val startTime = System.currentTimeMillis()
val lossList = new ArrayBuffer[Double]()
var testTime = 0L
var i = 0
while (i < numIters){
if(ifPrintLoss == 1){
//loss
val testTimeStart = System.currentTimeMillis()
val bc_test_itemFactors = ratingsByRow.context.broadcast(itemFactors)
//training loss
val loss = ratingsByRow.mapPartitionsWithIndex {(index,iter) =>
val localV = bc_test_itemFactors.value
val localU = MatrixFactorization.workerstore.get[Map[Int, Vector]](s"userFactors_$index")
val reguV = localV.mapValues(v => lambda_v * v.dot(v))
val reguU = localU.mapValues(u => lambda_u * u.dot(u))
val ls = iter.foldLeft(0.0) { (l, r) =>
val uh = localU.get(r.index_x).get
val vj = localV.get(r.index_y).get
val residual = r.rating - uh.dot(vj)
l + residual * residual + reguU.get(r.index_x).get + reguV.get(r.index_y).get
}
Iterator.single(ls)
}.sum() / numRatings
bc_test_itemFactors.unpersist()
print(s"$loss\\t")
testTime += (System.currentTimeMillis() - testTimeStart)
println(s"${System.currentTimeMillis() - testTime - startTime}")
}
//broadcast V to p workers
val bc_itemFactors = ratingsByRow.context.broadcast(itemFactors)
//for each woker i parallelly do
val (newItemFactors, lossSum) = ratingsByRow.mapPartitionsWithIndex{case(index,iter) =>
val localRatings = iter.toArray
val numLocalRatings = localRatings.length
val localV = bc_itemFactors.value
val localU = MatrixFactorization.workerstore.get[Map[Int, Vector]](s"userFactors_$index")
val seedGen = new XORShiftRandom()
val random = new XORShiftRandom(byteswap64(seedGen.nextLong() ^ index))
var loss = 0.0
//inner loop
for(i <- 1 to numInnerIters){
//randomly select an instance r_h,k from R_i
val ranRating = localRatings(random.nextInt(numLocalRatings))
val uh = localU.get(ranRating.index_x).get
val vj = localV.get(ranRating.index_y).get
//update uh
val residual = ranRating.rating - uh.dot(vj)
uh *= (1- stepsize * lambda_u)
uh.plusax(stepsize * residual, vj)
}
for(i <- 1 to numInnerIters){
//randomly select an instance r_h,k from R_i
val ranRating = localRatings(random.nextInt(numLocalRatings))
val uh = localU.get(ranRating.index_x).get
val vj = localV.get(ranRating.index_y).get
//update vj
val residual = ranRating.rating - uh.dot(vj)
vj *= (1 - stepsize * lambda_v)
vj.plusax(stepsize * residual, uh)
loss += (residual * residual)
}
Iterator.single((bc_itemFactors.value, loss))
}
.reduce { (a, b) =>
val temp = a._1
b._1.foreach{case (i, v) =>
v.plusax(1.0, temp.get(i).get)
}
(b._1, a._2 + b._2)
}
itemFactors = newItemFactors
itemFactors.foreach(ui => ui._2 /= numParts.toDouble)
bc_itemFactors.unpersist()
// val approxLoss = lossSum / (numParts * numInnerIters)
// if (i != 0) {
// val oldLoss = lossList.last
// if (approxLoss > oldLoss)
// stepsize = stepsize * 0.5
// else
// stepsize *= 1.05
// }
// lossList.append(approxLoss)
// println(s"approximate loss: $approxLoss, time: ${System.currentTimeMillis() - startTime}")
i += 1
}
val trainOver = System.currentTimeMillis()
val bc_test_itemFactors = ratingsByRow.context.broadcast(itemFactors)
val loss = ratingsByRow.mapPartitionsWithIndex { (index,iter )=>
val localV = bc_test_itemFactors.value
val localU = MatrixFactorization.workerstore.get[Map[Int, Vector]](s"userFactors_$index")
val reguV = localV.mapValues(v => lambda_v * v.dot(v))
val reguU = localU.mapValues(u => lambda_u * u.dot(u))
val ls = iter.foldLeft(0.0) { (l, r) =>
val uh = localU.get(r.index_x).get
val vj = localV.get(r.index_y).get
val residual = r.rating - uh.dot(vj)
l + residual * residual + reguU.get(r.index_x).get + reguV.get(r.index_y).get
}
Iterator.single(ls)
}
.reduce(_ + _) / numRatings
bc_test_itemFactors.unpersist()
println(s"loss: $loss\\t")
println(s"cputime of training process(ms): ${ trainOver - startTime }")
val userFactorsRDD = ratingsByRow.mapPartitionsWithIndex{(index,iter) =>
val factors = MatrixFactorization.workerstore.get[Map[Int, Vector]](s"userFactors_$index")
factors.toIterator
}.cache()
val itemFactorsRDD = ratingsByRow.context.parallelize(itemFactors.toSeq, numParts).cache()
new MatrixFactorizationModel(rank, userFactorsRDD, itemFactorsRDD)
}
}
object MatrixFactorization {
val workerstore = new WorkerStore()
}
|
syh6585/LIBBLE-Spark
|
src/main/scala/collaborativeFiltering/MatrixFactorization.scala
|
Scala
|
apache-2.0
| 8,662 |
package com.rumblesan.giftest
import processing.core._
object App {
def main(args: Array[String]) {
PApplet.main(
Array("com.rumblesan.giftest.Giftest")
)
}
}
|
rumblesan/giftest
|
src/main/scala/App.scala
|
Scala
|
mit
| 182 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.ctrl.controllers
import akka.actor.ActorSelection
import cmwell.ctrl.controllers.CassandraController._
import cmwell.ctrl.config.Config
import cmwell.ctrl.utils.ProcUtil
import com.typesafe.scalalogging.LazyLogging
import k.grid.Grid
import scala.concurrent.{Future, blocking}
import scala.util.{Failure, Success}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by michael on 2/16/15.
*/
abstract class ComponentController(startScriptLocation : String, psIdentifier : String, dirIdentifier : Set[String]) {
object ComponentControllerLogger extends LazyLogging {
lazy val l = logger
}
protected val startScriptPattern : String = "start[0-9]*.sh"
def getStartScriptLocation = startScriptLocation
def getStartScripts(location : String) : Set[String] = {
ProcUtil.executeCommand(s"ls -1 $location/ | grep $startScriptPattern") match {
case Success(str) =>
str.trim.split("\\n").toSet
case Failure(err) => Set.empty[String]
}
}
def getDataDirs(location : String, id : String) : Set[String] = {
ProcUtil.executeCommand(s"ls -1 $location | grep $id[0-9]*") match {
case Success(str) =>
str.trim.split("\\n").toSet
case Failure(err) => Set.empty[String]
}
}
private def doStart: Unit = {
getStartScripts(startScriptLocation) foreach {
sScript =>
val runScript = s"HAL=9000 $startScriptLocation/$sScript"
ProcUtil.executeCommand(runScript)
}
}
def start {
blocking {
Future {
doStart
}
}
}
private def doStop(forceKill : Boolean = false, tries : Int = 5): Unit = {
val cmd = s"ps aux | grep $psIdentifier | egrep -v 'grep|starter' | awk '{print $$2}' | xargs kill ${if(forceKill) "-9" else ""}"
ComponentControllerLogger.l.info(s"executing $cmd")
ProcUtil.executeCommand(cmd)
val isDead = ProcUtil.executeCommand(s"ps aux | grep $psIdentifier | egrep -v 'grep|starter' | awk '{print $$2}'").get.isEmpty
if(!isDead){
if(tries > 1) doStop(false , tries - 1) else doStop(true , tries - 1)
}
}
def stop {
Future {
blocking {
doStop()
}
}
}
def restart: Unit = {
Future {
blocking {
doStop()
doStart
}
}
}
def clearData {
Future {
blocking {
dirIdentifier.foreach{
id =>
getDataDirs(s"${Config.cmwellHome}/data/", id).foreach {
dir =>
ProcUtil.executeCommand(s"rm -rf ${Config.cmwellHome}/data/$dir/")
}
}
}
}
}
}
|
nruppin/CM-Well
|
server/cmwell-controller/src/main/scala/cmwell/ctrl/controllers/ComponentController.scala
|
Scala
|
apache-2.0
| 3,259 |
package fpscala.chapter7
import java.util.concurrent._
import scala.util._
object Par {
type Par[A] = ExecutorService => Future[A]
val globEs = Executors.newFixedThreadPool(1)
def run[A](s: ExecutorService)(a: Par[A]): Future[A] = a(s)
def mapWithTimeouts[A, B, C](a: Par[A], b: Par[B])(f: (A, B) => C): Par[C] = es => {
val af: Future[A] = a(es)
val bf: Future[B] = b(es)
val timeout: Long = 1l
val aValMb = Try(af.get(timeout, TimeUnit.SECONDS))
val bValMb = Try(bf.get(timeout, TimeUnit.SECONDS))
(aValMb, bValMb) match {
case (Success(aVal), Success(bVal)) => UnitFuture(f(aVal, bVal))
case _ => throw new Exception
}
}
def parMap[A, B](ps: List[A])(f: A => B): Par[List[B]] = fork {
val bFutures: List[Par[B]] = ps.map(asyncF(f))
sequence(bFutures)
}
def parFilterNot[A](as: List[A])(f: A => Boolean): Par[List[A]] = {
doParFilter(as)(f)(true)
}
private def doParFilter[A](as: List[A])(f: A => Boolean)(isNot: Boolean): Par[List[A]] = {
val lpl: List[Par[List[A]]] = as map (asyncF(el => if (not(f, el, isNot)) List(el) else List()))
map(sequence(lpl))(_.flatten)
}
def map[A, B](pa: Par[A])(f: A => B): Par[B] = map2(pa, unit(()))((a, _) => f(a))
private def not[A](f: A => Boolean, a: A, isNot: Boolean) = if (isNot) {
!f(a)
} else {
f(a)
}
def asyncF[A, B](f: A => B): A => Par[B] = { a => lazyUnit(f(a)) }
def lazyUnit[A](a: => A): Par[A] = fork(unit(a))
def fork[A](a: => Par[A]): Par[A] = es => es.submit(new Callable[A] {
override def call(): A = a(es).get
})
def unit[A](a: A): Par[A] = (es: ExecutorService) => UnitFuture(a)
/**
* folds over a list of Pars. and does something.. meh.. compiles atleast.
* @param ps
* @tparam A
* @return
*/
def sequence[A](ps: List[Par[A]]): Par[List[A]] = ps.foldRight[Par[List[A]]](unit(List()))((aParElement, parOfAList) =>
map2(aParElement, parOfAList)((element, acc) => element :: acc))
def map2[A, B, C](a: Par[A], b: Par[B])(f: (A, B) => C): Par[C] = es => {
val af = a(es)
val bf = b(es)
UnitFuture(f(af.get(), bf.get()))
}
def sequencev2[A](ps:List[Par[A]]):Par[List[A]] = es => {
val futures:List[Future[A]] = ps.map{ aPar => aPar(es)}
val as:List[A] = futures.map { aFuture => aFuture.get() }
UnitFuture(as)
}
def sequenceCopied[A](ps:List[Par[A]]):Par[List[A]] = ps match {
case Nil => unit(List())
case hPar :: tList => {
map2(hPar,sequenceCopied(tList))((el,acc) => el :: acc)
}
}
def parSum(as: List[Int]): Par[Int] = {
if (as.length <= 1) {
unit(as.headOption.getOrElse(0))
}
else {
val (l, r): (List[Int], List[Int]) = as.splitAt(as.length / 2)
val lPar: Par[Int] = parSum(l)
val rPar: Par[Int] = parSum(r)
map2(lPar, rPar)((e1, e2) => e1 + e2)
}
}
def parSum1(as: List[Int]): Par[Int] = {
if (as.length <= 1) {
unit(as.headOption.getOrElse(0))
}
else {
val (l, r): (List[Int], List[Int]) = as.splitAt(as.length / 2)
val lPar: Par[Int] = parSum1(l)
val rPar: Par[Int] = parSum1(r)
map2(lPar, rPar)((e1, e2) => e1 + e2)
}
}
/**
* From github source
* https://github.com/fpinscala/fpinscala/blob/master/answerkey/parallelism/06.answer.scala
* @param as
* @param f
* @tparam A
* @return
*/
def parFilter2[A](as: List[A])(f: A => Boolean): Par[List[A]] = {
doParFilter(as)(f)(false)
}
def parFilterv3[A](as:List[A])(f: A => Boolean): Par[List[A]] = as match {
case Nil => unit(List())
case h :: t => {
val parB:Par[Boolean] = asyncF(f)(h)
map2(parB,parFilterv3(t)(f)){(el,acc) => if(el) {h :: acc} else {acc}}
}
}
def parExists[A](as: List[A])(f: A => Boolean): Par[Boolean] = map(parFilter2(as)(f))(aList => !aList.isEmpty)
def merge(a: Map[String, Int], b: Map[String, Int]): Map[String, Int] = {
val merged: Map[String, Int] = a.map { case (x, y) => {
val count = b.get(x).getOrElse(0) + y;
(x, count)
}
}
b ++ merged
}
def count(list:List[String]):Map[String,Int] = {
list.groupBy { el => el }.mapValues { x => x.size }
}
def parWordCount(paras:List[List[String]],soFar:Map[String,Int]): Map[String,Int] = paras match {
case Nil => soFar
case h :: t => {
val counts = count(h)
val merged = merge(counts,soFar)
parWordCount(t,merged)
}
}
private case class UnitFuture[A](get: A) extends Future[A] {
def isDone = true
def isCancelled = false
def get(timeout: Long, units: TimeUnit) = get
def cancel(eventIfRunning: Boolean) = false
}
}
|
sajit/learnyou
|
scala/minimal-scala/src/main/scala/fpscala/chapter7/Par.scala
|
Scala
|
mit
| 4,711 |
package org.scalatra
import javax.servlet.http.{ Cookie => ServletCookie }
import org.scalatra.test.scalatest.ScalatraFunSuite
class RequestCookiesTest extends ScalatraFunSuite {
addServlet(new ScalatraServlet {
get("/multi-cookies") {
Seq("one", "two", "three") map { key =>
response.setHeader(key, request.multiCookies(key).mkString(":"))
}
}
get("/cookies") {
Seq("one", "two", "three") map { key =>
response.setHeader(key, request.cookies.getOrElse(key, "NONE"))
}
}
}, "/*")
test("multiCookies is a multi-map of names to values") {
get("/multi-cookies", headers = Map("Cookie" -> "one=uno; one=eins; two=zwei")) {
header("one") should be("uno:eins")
header("two") should be("zwei")
header("three") should be("")
}
}
test("cookies is a map of names to values") {
get("/cookies", headers = Map("Cookie" -> "one=uno; one=eins; two=zwei")) {
header("one") should be("uno")
header("two") should be("zwei")
header("three") should be("NONE")
}
}
}
|
0xfaded/scalatra
|
core/src/test/scala/org/scalatra/RequestCookiesTest.scala
|
Scala
|
bsd-2-clause
| 1,072 |
package com.twitter.finagle.httpx.service
import com.twitter.finagle.Service
import com.twitter.finagle.httpx.{Request, Response, Method}
import com.twitter.finagle.httpx.path.Path
import com.twitter.util.Future
/**
* RoutingService for composing Services. Responds with 404 Not Found if no
* matching service.
*
* RoutingService.byPath {
* case "/search.json" => mySearchService
* ....
* }
*/
class RoutingService[REQUEST <: Request](
val routes: PartialFunction[Request, Service[REQUEST, Response]])
extends Service[REQUEST, Response] {
// Try routes, fall back to 404 Not Found
protected[this] val notFoundService = new NotFoundService[REQUEST]
protected[this] val notFoundPf: PartialFunction[REQUEST, Service[REQUEST, Response]] = {
case _ => notFoundService
}
protected[this] val requestToService = routes orElse notFoundPf
def apply(request: REQUEST): Future[Response] = {
val service = requestToService(request)
service(request)
}
}
object RoutingService {
def byPath[REQUEST](routes: PartialFunction[String, Service[REQUEST, Response]]) =
new RoutingService(
new PartialFunction[Request, Service[REQUEST, Response]] {
def apply(request: Request) = routes(request.path)
def isDefinedAt(request: Request) = routes.isDefinedAt(request.path)
})
def byPathObject[REQUEST](routes: PartialFunction[Path, Service[REQUEST, Response]]) =
new RoutingService(
new PartialFunction[Request, Service[REQUEST, Response]] {
def apply(request: Request) = routes(Path(request.path))
def isDefinedAt(request: Request) = routes.isDefinedAt(Path(request.path))
})
def byMethodAndPath[REQUEST](routes: PartialFunction[(Method, String), Service[REQUEST, Response]]) =
new RoutingService(
new PartialFunction[Request, Service[REQUEST, Response]] {
def apply(request: Request) = routes((request.method, request.path))
def isDefinedAt(request: Request) = routes.isDefinedAt((request.method, request.path))
})
def byMethodAndPathObject[REQUEST](routes: PartialFunction[(Method, Path), Service[REQUEST, Response]]) =
new RoutingService(
new PartialFunction[Request, Service[REQUEST, Response]] {
def apply(request: Request) = routes((request.method, Path(request.path)))
def isDefinedAt(request: Request) = routes.isDefinedAt((request.method, Path(request.path)))
})
}
|
LithiumTD/finagle
|
finagle-httpx/src/main/scala/com/twitter/finagle/httpx/service/RoutingService.scala
|
Scala
|
apache-2.0
| 2,441 |
object SumOddElements extends App{
def f(arr:List[Int]):Int = arr.filter(_ % 2 != 0).sum
}
|
PaulNoth/hackerrank
|
practice/functional_programming/introduction/sum_of_odd_elements/SumOddElements.scala
|
Scala
|
mit
| 93 |
package zzb.datatype
import java.io._
import org.scalatest.{MustMatchers, WordSpec}
/**
* Created by Simon on 2014/4/29
*/
class MapTypeTest extends WordSpec with MustMatchers {
import zzb.datatype.BasicFormats._
val Colors = TMap[String, Int]("colors", "colors")
object House extends TStruct {
val colors = Field(Colors)
override val t_memo_ : String = "多彩房屋"
}
def serializeTest(o: Serializable) = {
val bs = new ByteArrayOutputStream()
val out = new ObjectOutputStream(bs)
out.writeObject(o)
out.close()
val oin = new ObjectInputStream(new ByteArrayInputStream(bs.toByteArray))
val t = oin.readObject()
oin.close()
println("序列化前的对象----" + o)
println("反序列化的对象----" + t)
t mustBe o
}
"TMap" must {
"支持覆盖操作" in {
val c1 = Colors(Map("white" -> 1, "red" -> 2, "black" -> 5))
val c2 = Colors(Map("white" -> 1, "red" -> 3, "blue" -> 4))
serializeTest(c1)
val c1to2 = c1 ->> c2
val c2to1 = c2 ->> c1
c1to2.size mustBe 4
c2to1.size mustBe 4
c1to2("red").get mustBe 2
c2to1("red").get mustBe 3
c1to2("black").get mustBe c2to1("black").get
}
"集合字段赋值" in {
import House._
val h0 = House(colors := Map("White" -> 1, "Red" -> 2))
val cc = h0(colors).get.value
cc("White") mustBe 1
intercept[IllegalArgumentException] {
House(colors := Map("White" -> "1", "Red" -> "2"))
}
}
}
}
|
stepover/zzb
|
zzb-datatype/src/test/scala/zzb/datatype/MapTypeTest.scala
|
Scala
|
mit
| 1,538 |
package reactivemongo.api
import scala.util.{ Failure, Success, Try }
import scala.concurrent.{ ExecutionContext, Future }
import reactivemongo.util.ExtendedFutures.DelayedFuture
import reactivemongo.core.netty.BufferSequence
import reactivemongo.core.protocol.{
GetMore,
KillCursors,
MongoWireVersion,
Query,
QueryFlags,
RequestMaker,
RequestOp,
Response,
ReplyDocumentIterator,
ReplyDocumentIteratorExhaustedException
}
import reactivemongo.core.actors.{
Exceptions,
RequestMakerExpectingResponse
}
import reactivemongo.api.commands.ResultCursor
@deprecated("Internal: will be made private", "0.16.0")
object DefaultCursor {
import Cursor.{ State, Cont, Fail, logger }
import CursorOps.Unrecoverable
@deprecated("No longer implemented", "0.16.0")
def query[P <: SerializationPack, A](
pack: P,
query: Query,
requestBuffer: Int => BufferSequence,
readPreference: ReadPreference,
mongoConnection: MongoConnection,
failover: FailoverStrategy,
isMongo26WriteOp: Boolean,
collectionName: String)(implicit reader: pack.Reader[A]): Impl[A] =
throw new UnsupportedOperationException("Use query with DefaultDB")
/**
* @param collectionName the fully qualified collection name (even if `query.fullCollectionName` is `\\$cmd`)
*/
private[reactivemongo] def query[P <: SerializationPack, A](
pack: P,
query: Query,
requestBuffer: Int => BufferSequence,
readPreference: ReadPreference,
db: DB,
failover: FailoverStrategy,
isMongo26WriteOp: Boolean,
collectionName: String,
maxTimeMS: Option[Long])(implicit reader: pack.Reader[A]): Impl[A] =
new Impl[A] {
val preference = readPreference
val database = db
val failoverStrategy = failover
val mongo26WriteOp = isMongo26WriteOp
val fullCollectionName = collectionName
val numberToReturn = {
val version = connection._metadata.
fold[MongoWireVersion](MongoWireVersion.V30)(_.maxWireVersion)
if (version.compareTo(MongoWireVersion.V32) < 0) {
// see QueryOpts.batchSizeN
if (query.numberToReturn <= 0) {
Cursor.DefaultBatchSize
} else query.numberToReturn
} else {
1 // nested 'cursor' document
}
}
val tailable = (query.flags &
QueryFlags.TailableCursor) == QueryFlags.TailableCursor
val makeIterator = ReplyDocumentIterator.parse(pack)(_: Response)(reader)
@inline def makeRequest(maxDocs: Int)(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[Response] = Failover2(connection, failoverStrategy) { () =>
val ntr = toReturn(numberToReturn, maxDocs, 0)
// MongoDB2.6: Int.MaxValue
val op = query.copy(numberToReturn = ntr)
val req = new RequestMakerExpectingResponse(
requestMaker = RequestMaker(
op, requestBuffer(maxDocs), readPreference),
isMongo26WriteOp = isMongo26WriteOp,
pinnedNode = transaction.flatMap(_.pinnedNode))
requester(0, maxDocs, req)(ec)
}.future.flatMap {
case Response.CommandError(_, _, _, cause) =>
Future.failed[Response](cause)
case response =>
Future.successful(response)
}
val builder = pack.newBuilder
val getMoreOpCmd: Function2[Long, Int, (RequestOp, BufferSequence)] = {
if (lessThenV32) { (cursorId, ntr) =>
GetMore(fullCollectionName, ntr, cursorId) -> BufferSequence.empty
} else {
val moreQry = query.copy(numberToSkip = 0, numberToReturn = 1)
val collName = fullCollectionName.span(_ != '.')._2.tail
{ (cursorId, ntr) =>
import builder.{ elementProducer => elem, int, long, string }
val cmdOpts = Seq.newBuilder[pack.ElementProducer] ++= Seq(
elem("getMore", long(cursorId)),
elem("collection", string(collName)),
elem("batchSize", int(ntr)))
maxTimeMS.foreach { ms =>
cmdOpts += elem("maxTimeMS", long(ms))
}
val cmd = builder.document(cmdOpts.result())
moreQry -> BufferSequence.single[pack.type](pack)(cmd)
}
}
}
}
@deprecated("No longer implemented", "0.16.0")
def getMore[P <: SerializationPack, A](
pack: P,
preload: => Response,
result: ResultCursor,
toReturn: Int,
readPreference: ReadPreference,
mongoConnection: MongoConnection,
failover: FailoverStrategy,
isMongo26WriteOp: Boolean)(implicit reader: pack.Reader[A]): Impl[A] =
throw new UnsupportedOperationException("No longer implemented")
private[reactivemongo] trait Impl[A]
extends Cursor[A] with CursorOps[A] with CursorCompat[A] {
/** The read preference */
def preference: ReadPreference
def database: DB
@inline protected final def transaction =
database.session.flatMap(_.transaction.toOption)
@inline def connection: MongoConnection = database.connection
def failoverStrategy: FailoverStrategy
def mongo26WriteOp: Boolean
def fullCollectionName: String
def numberToReturn: Int
def tailable: Boolean
def makeIterator: Response => Iterator[A] // Unsafe
final def documentIterator(response: Response): Iterator[A] =
makeIterator(response)
protected final lazy val version = connection._metadata.
fold[MongoWireVersion](MongoWireVersion.V30)(_.maxWireVersion)
@inline protected def lessThenV32: Boolean =
version.compareTo(MongoWireVersion.V32) < 0
protected lazy val requester: (Int, Int, RequestMakerExpectingResponse) => ExecutionContext => Future[Response] = {
val base: ExecutionContext => RequestMakerExpectingResponse => Future[Response] = { implicit ec: ExecutionContext =>
database.session match {
case Some(session) => { req: RequestMakerExpectingResponse =>
connection.sendExpectingResponse(req).flatMap {
Session.updateOnResponse(session, _).map(_._2)
}
}
case _ =>
connection.sendExpectingResponse(_: RequestMakerExpectingResponse)
}
}
if (lessThenV32) {
{ (_: Int, maxDocs: Int, req: RequestMakerExpectingResponse) =>
val max = if (maxDocs > 0) maxDocs else Int.MaxValue
{ implicit ec: ExecutionContext =>
base(ec)(req).map { response =>
val fetched = // See nextBatchOffset
response.reply.numberReturned + response.reply.startingFrom
if (fetched < max) {
response
} else response match {
case error @ Response.CommandError(_, _, _, _) => error
case r => {
// Normalizes as MongoDB 2.x doesn't offer the 'limit'
// on query, which allows with MongoDB 3 to exhaust
// the cursor with a last partial batch
r.cursorID(0L)
}
}
}
}
}
} else { (startingFrom: Int, _: Int, req: RequestMakerExpectingResponse) =>
{ implicit ec: ExecutionContext =>
base(ec)(req).map {
// Normalizes as 'new' cursor doesn't indicate such property
_.startingFrom(startingFrom)
}
}
}
}
// cursorId: Long, toReturn: Int
protected def getMoreOpCmd: Function2[Long, Int, (RequestOp, BufferSequence)]
private def next(response: Response, maxDocs: Int)(implicit ec: ExecutionContext): Future[Option[Response]] = {
if (response.reply.cursorID != 0) {
// numberToReturn=1 for new find command,
// so rather use batchSize from the previous reply
val reply = response.reply
val nextOffset = nextBatchOffset(response)
val ntr = toReturn(reply.numberReturned, maxDocs, nextOffset)
val (op, cmd) = getMoreOpCmd(reply.cursorID, ntr)
logger.trace(s"Asking for the next batch of $ntr documents on cursor #${reply.cursorID}, after ${nextOffset}: $op")
def req = new RequestMakerExpectingResponse(
requestMaker = RequestMaker(op, cmd,
readPreference = preference,
channelIdHint = Some(response.info._channelId)),
isMongo26WriteOp = mongo26WriteOp,
pinnedNode = transaction.flatMap(_.pinnedNode))
Failover2(connection, failoverStrategy) { () =>
requester(nextOffset, maxDocs, req)(ec)
}.future.map(Some(_))
} else {
logger.warn("Call to next() but cursorID is 0, there is probably a bug")
Future.successful(Option.empty[Response])
}
}
@inline private def hasNext(response: Response, maxDocs: Int): Boolean =
(response.reply.cursorID != 0) && (
maxDocs < 0 || (nextBatchOffset(response) < maxDocs))
/** Returns next response using tailable mode */
private def tailResponse(current: Response, maxDocs: Int)(implicit ec: ExecutionContext): Future[Option[Response]] = {
{
@inline def closed = Future.successful {
logger.warn("[tailResponse] Connection is closed")
Option.empty[Response]
}
if (connection.killed) closed
else if (hasNext(current, maxDocs)) {
next(current, maxDocs).recoverWith {
case _: Exceptions.ClosedException => closed
case err =>
Future.failed[Option[Response]](err)
}
} else {
logger.debug("[tailResponse] Current cursor exhausted, renewing...")
DelayedFuture(500, connection.actorSystem).
flatMap { _ => makeRequest(maxDocs).map(Some(_)) }
}
}
}
def kill(cursorID: Long): Unit = // DEPRECATED
killCursor(cursorID)(connection.actorSystem.dispatcher)
def killCursor(id: Long)(implicit ec: ExecutionContext): Unit =
killCursors(id, "Cursor")
private def killCursors(
cursorID: Long,
logCat: String)(implicit ec: ExecutionContext): Unit = {
if (cursorID != 0) {
logger.debug(s"[$logCat] Clean up $cursorID, sending KillCursors")
val result = connection.sendExpectingResponse(
new RequestMakerExpectingResponse(
requestMaker = RequestMaker(
KillCursors(Set(cursorID)),
readPreference = preference),
isMongo26WriteOp = false,
pinnedNode = transaction.flatMap(_.pinnedNode)))
result.onComplete {
case Failure(cause) => logger.warn(
s"[$logCat] Fails to kill cursor #${cursorID}", cause)
case _ => ()
}
} else {
logger.trace(s"[$logCat] Nothing to release: cursor already exhausted ($cursorID)")
}
}
def head(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[A] =
makeRequest(1).flatMap { response =>
val result = documentIterator(response)
if (!result.hasNext) {
Future.failed[A](Cursor.NoSuchResultException)
} else Future(result.next())
}
final def headOption(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[Option[A]] =
makeRequest(1).flatMap { response =>
val result = documentIterator(response)
if (!result.hasNext) {
Future.successful(Option.empty[A])
} else {
Future(Some(result.next()))
}
}
@inline private def syncSuccess[T, U](f: (T, U) => State[T])(implicit ec: ExecutionContext): (T, U) => Future[State[T]] = { (a: T, b: U) => Future(f(a, b)) }
def foldResponses[T](z: => T, maxDocs: Int = -1)(suc: (T, Response) => State[T], err: (T, Throwable) => State[T])(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[T] = FoldResponses(z, makeRequest(maxDocs)(_: ExecutionContext),
nextResponse(maxDocs), killCursors _, syncSuccess(suc), err, maxDocs)(
connection.actorSystem, ec)
def foldResponsesM[T](z: => T, maxDocs: Int = -1)(suc: (T, Response) => Future[State[T]], err: (T, Throwable) => State[T])(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[T] = FoldResponses(z, makeRequest(maxDocs)(_: ExecutionContext),
nextResponse(maxDocs), killCursors _, suc, err, maxDocs)(
connection.actorSystem, ec)
def foldBulks[T](z: => T, maxDocs: Int = -1)(suc: (T, Iterator[A]) => State[T], err: (T, Throwable) => State[T])(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[T] = foldBulksM[T](z, maxDocs)(syncSuccess[T, Iterator[A]](suc), err)
def foldBulksM[T](z: => T, maxDocs: Int = -1)(suc: (T, Iterator[A]) => Future[State[T]], err: (T, Throwable) => State[T])(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[T] = foldResponsesM(z, maxDocs)({ (s, r) =>
Try(makeIterator(r)) match {
case Success(it) => suc(s, it)
case Failure(e) => Future.successful[State[T]](Fail(e))
}
}, err)
def foldWhile[T](z: => T, maxDocs: Int = -1)(suc: (T, A) => State[T], err: (T, Throwable) => State[T])(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[T] = foldWhileM[T](z, maxDocs)(syncSuccess[T, A](suc), err)
def foldWhileM[T](z: => T, maxDocs: Int = -1)(suc: (T, A) => Future[State[T]], err: (T, Throwable) => State[T])(implicit @deprecatedName(Symbol("ctx")) ec: ExecutionContext): Future[T] = {
def go(v: T, it: Iterator[A]): Future[State[T]] = {
if (!it.hasNext) {
Future.successful(Cont(v))
} else Try(it.next) match {
case Failure(
x @ ReplyDocumentIteratorExhaustedException(_)) =>
Future.successful(Fail(x))
case Failure(e) => err(v, e) match {
case Cont(cv) => go(cv, it)
case f @ Fail(Unrecoverable(_)) =>
/* already marked unrecoverable */ Future.successful(f)
case Fail(u) =>
Future.successful(Fail(Unrecoverable(u)))
case st => Future.successful(st)
}
case Success(a) => suc(v, a).recover {
case cause if it.hasNext => err(v, cause)
}.flatMap {
case Cont(cv) => go(cv, it)
case Fail(cause) =>
// Prevent error handler at bulk/response level to recover
Future.successful(Fail(Unrecoverable(cause)))
case st => Future.successful(st)
}
}
}
foldBulksM(z, maxDocs)(go, err)
}
def nextResponse(maxDocs: Int): (ExecutionContext, Response) => Future[Option[Response]] = {
if (!tailable) { (ec: ExecutionContext, r: Response) =>
if (!hasNext(r, maxDocs)) {
Future.successful(Option.empty[Response])
} else {
next(r, maxDocs)(ec)
}
} else { (ec: ExecutionContext, r: Response) =>
tailResponse(r, maxDocs)(ec)
}
}
}
@inline private def nextBatchOffset(response: Response): Int =
response.reply.numberReturned + response.reply.startingFrom
@inline private def toReturn(
batchSizeN: Int, maxDocs: Int, offset: Int): Int = {
// Normalizes the max number of documents
val max = if (maxDocs < 0) Int.MaxValue else maxDocs
if (batchSizeN > 0 && (offset + batchSizeN) <= max) {
// Valid `numberToReturn` and next batch won't exceed the max
batchSizeN
} else {
max - offset
}
}
}
|
ornicar/ReactiveMongo
|
driver/src/main/scala/api/DefaultCursor.scala
|
Scala
|
apache-2.0
| 15,491 |
package org.jetbrains.plugins.scala
package lang.rearranger
import java.util
import com.intellij.openapi.util.TextRange
import com.intellij.psi.codeStyle.arrangement._
import com.intellij.psi.codeStyle.arrangement.std.ArrangementSettingsToken
/**
* @author Roman.Shein
* Date: 08.07.13
*/
class ScalaArrangementEntry(parent: ArrangementEntry, startOffset: Int, endOffset: Int,
entryType: ArrangementSettingsToken, name: String, canBeMatched: Boolean,
val innerEntryType: Option[ArrangementSettingsToken])
extends DefaultArrangementEntry(parent, startOffset, endOffset, canBeMatched) with TypeAwareArrangementEntry
with NameAwareArrangementEntry with ModifierAwareArrangementEntry {
val modifiers = new util.HashSet[ArrangementSettingsToken]
def this( parent: ArrangementEntry, range: TextRange, entryType: ArrangementSettingsToken, name: String,
canBeMatched: Boolean, innerEntryType: Option[ArrangementSettingsToken] = None) =
this(parent, range.getStartOffset, range.getEndOffset, entryType, name, canBeMatched, innerEntryType)
override def getName: String = name
override def getModifiers: util.Set[ArrangementSettingsToken] = modifiers
override def getTypes: util.Set[ArrangementSettingsToken] = {
val res = new util.HashSet[ArrangementSettingsToken]()
res.add(entryType)
res
}
def getType: ArrangementSettingsToken = entryType
def addModifier(mod: ArrangementSettingsToken): Boolean = modifiers.add(mod)
override def toString = s"[$startOffset, $endOffset)" //text range represented by this entry
override def hashCode: Int = startOffset + endOffset
override def equals(o: Any): Boolean = o match {
case other: ScalaArrangementEntry => other.getStartOffset == startOffset && other.getEndOffset == endOffset &&
other.getType == entryType && other.getParent == parent
case _ => false
}
def spansTextRange(range: TextRange) =
range.getStartOffset == getStartOffset && range.getEndOffset == getEndOffset
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/rearranger/ScalaArrangementEntry.scala
|
Scala
|
apache-2.0
| 2,089 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.akkasse.example
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes.PermanentRedirect
import akka.http.scaladsl.server.Directives
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import de.heikoseeberger.akkasse.{ EventStreamMarshalling, ServerSentEvent }
import java.time.LocalTime
import java.time.format.DateTimeFormatter
import scala.concurrent.duration.DurationInt
object TimeServer {
def main(args: Array[String]): Unit = {
implicit val system = ActorSystem()
implicit val mat = ActorMaterializer()
Http().bindAndHandle(route, "127.0.0.1", 9000)
}
def route = {
import Directives._
import EventStreamMarshalling._
def assets =
getFromResourceDirectory("web") ~ pathSingleSlash(
get(redirect("index.html", PermanentRedirect))
)
def events = path("events") {
get {
complete {
Source
.tick(2.seconds, 2.seconds, NotUsed)
.map(_ => LocalTime.now())
.map(dateTimeToServerSentEvent)
.keepAlive(1.second, () => ServerSentEvent.heartbeat)
}
}
}
assets ~ events
}
def dateTimeToServerSentEvent(time: LocalTime): ServerSentEvent =
ServerSentEvent(DateTimeFormatter.ISO_LOCAL_TIME.format(time))
}
|
viktorklang/akka-sse
|
akka-sse-example/src/main/scala/de/heikoseeberger/akkasse/example/TimeServer.scala
|
Scala
|
apache-2.0
| 1,981 |
// scala gives you tons of ways of writing *anonymous Functions*
val predicate: Int => Boolean = (x: Int) => x > 2
val predicate: Int => Boolean = x => x > 2
val predicate: Int => Boolean = _ > 2
val predicate: Int => Boolean = (_: Int) > 2 // YUCK!
val predicate: Int => Boolean = {
case x if x > 2 => true
case _ => false
}
// look up PartialFunction to learn more about this syntax
|
agconti/scala-school
|
04-functions-as-values/slides/slide056.scala
|
Scala
|
mit
| 399 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionsException, PartitionsAlreadyExistException}
import org.apache.spark.sql.connector.catalog.{CatalogV2Implicits, Identifier}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits
class AlterTablePartitionV2SQLSuite extends DatasourceV2SQLBase {
import CatalogV2Implicits._
import DataSourceV2Implicits._
test("ALTER TABLE RECOVER PARTITIONS") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t RECOVER PARTITIONS")
}
assert(e.message.contains("ALTER TABLE RECOVER PARTITIONS is only supported with v1 tables"))
}
}
test("ALTER TABLE ADD PARTITION") {
val t = "testpart.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
spark.sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'")
val partTable = catalog("testpart").asTableCatalog
.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")).asInstanceOf[InMemoryPartitionTable]
assert(partTable.partitionExists(InternalRow.fromSeq(Seq(1))))
val partMetadata = partTable.loadPartitionMetadata(InternalRow.fromSeq(Seq(1)))
assert(partMetadata.containsKey("location"))
assert(partMetadata.get("location") == "loc")
}
}
test("ALTER TABLE ADD PARTITIONS") {
val t = "testpart.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
spark.sql(
s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc' PARTITION (id=2) LOCATION 'loc1'")
val partTable = catalog("testpart").asTableCatalog
.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")).asInstanceOf[InMemoryPartitionTable]
assert(partTable.partitionExists(InternalRow.fromSeq(Seq(1))))
assert(partTable.partitionExists(InternalRow.fromSeq(Seq(2))))
val partMetadata = partTable.loadPartitionMetadata(InternalRow.fromSeq(Seq(1)))
assert(partMetadata.containsKey("location"))
assert(partMetadata.get("location") == "loc")
val partMetadata1 = partTable.loadPartitionMetadata(InternalRow.fromSeq(Seq(2)))
assert(partMetadata1.containsKey("location"))
assert(partMetadata1.get("location") == "loc1")
}
}
test("ALTER TABLE ADD PARTITIONS: partition already exists") {
val t = "testpart.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
spark.sql(
s"ALTER TABLE $t ADD PARTITION (id=2) LOCATION 'loc1'")
assertThrows[PartitionsAlreadyExistException](
spark.sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'" +
" PARTITION (id=2) LOCATION 'loc1'"))
val partTable = catalog("testpart").asTableCatalog
.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")).asInstanceOf[InMemoryPartitionTable]
assert(!partTable.partitionExists(InternalRow.fromSeq(Seq(1))))
spark.sql(s"ALTER TABLE $t ADD IF NOT EXISTS PARTITION (id=1) LOCATION 'loc'" +
" PARTITION (id=2) LOCATION 'loc1'")
assert(partTable.partitionExists(InternalRow.fromSeq(Seq(1))))
assert(partTable.partitionExists(InternalRow.fromSeq(Seq(2))))
}
}
test("ALTER TABLE RENAME PARTITION") {
val t = "testcat.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE $t PARTITION (id=1) RENAME TO PARTITION (id=2)")
}
assert(e.message.contains("ALTER TABLE RENAME PARTITION is only supported with v1 tables"))
}
}
test("ALTER TABLE DROP PARTITION") {
val t = "testpart.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
spark.sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'")
spark.sql(s"ALTER TABLE $t DROP PARTITION (id=1)")
val partTable =
catalog("testpart").asTableCatalog.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl"))
assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1))))
}
}
test("ALTER TABLE DROP PARTITIONS") {
val t = "testpart.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
spark.sql(s"ALTER TABLE $t ADD IF NOT EXISTS PARTITION (id=1) LOCATION 'loc'" +
" PARTITION (id=2) LOCATION 'loc1'")
spark.sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)")
val partTable =
catalog("testpart").asTableCatalog.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl"))
assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1))))
assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(2))))
assert(partTable.asPartitionable.listPartitionIdentifiers(InternalRow.empty).isEmpty)
}
}
test("ALTER TABLE DROP PARTITIONS: partition not exists") {
val t = "testpart.ns1.ns2.tbl"
withTable(t) {
spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)")
spark.sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'")
assertThrows[NoSuchPartitionsException](
spark.sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)"))
val partTable =
catalog("testpart").asTableCatalog.loadTable(Identifier.of(Array("ns1", "ns2"), "tbl"))
assert(partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1))))
spark.sql(s"ALTER TABLE $t DROP IF EXISTS PARTITION (id=1), PARTITION (id=2)")
assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(1))))
assert(!partTable.asPartitionable.partitionExists(InternalRow.fromSeq(Seq(2))))
assert(partTable.asPartitionable.listPartitionIdentifiers(InternalRow.empty).isEmpty)
}
}
}
|
shuangshuangwang/spark
|
sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala
|
Scala
|
apache-2.0
| 7,060 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.param.{IntParam, ParamMap, ParamValidators}
import org.apache.spark.ml.param.shared.{HasInputCols, HasOutputCol}
import org.apache.spark.ml.util.{DefaultParamsReadable, DefaultParamsWritable, Identifiable, SchemaUtils}
import org.apache.spark.mllib.feature.{HashingTF => OldHashingTF}
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.OpenHashMap
/**
* Feature hashing projects a set of categorical or numerical features into a feature vector of
* specified dimension (typically substantially smaller than that of the original feature
* space). This is done using the hashing trick (https://en.wikipedia.org/wiki/Feature_hashing)
* to map features to indices in the feature vector.
*
* The [[FeatureHasher]] transformer operates on multiple columns. Each column may contain either
* numeric or categorical features. Behavior and handling of column data types is as follows:
* -Numeric columns: For numeric features, the hash value of the column name is used to map the
* feature value to its index in the feature vector. Numeric features are never
* treated as categorical, even when they are integers. You must explicitly
* convert numeric columns containing categorical features to strings first.
* -String columns: For categorical features, the hash value of the string "column_name=value"
* is used to map to the vector index, with an indicator value of `1.0`.
* Thus, categorical features are "one-hot" encoded
* (similarly to using [[OneHotEncoder]] with `dropLast=false`).
* -Boolean columns: Boolean values are treated in the same way as string columns. That is,
* boolean features are represented as "column_name=true" or "column_name=false",
* with an indicator value of `1.0`.
*
* Null (missing) values are ignored (implicitly zero in the resulting feature vector).
*
* Since a simple modulo is used to transform the hash function to a vector index,
* it is advisable to use a power of two as the numFeatures parameter;
* otherwise the features will not be mapped evenly to the vector indices.
*
* {{{
* val df = Seq(
* (2.0, true, "1", "foo"),
* (3.0, false, "2", "bar")
* ).toDF("real", "bool", "stringNum", "string")
*
* val hasher = new FeatureHasher()
* .setInputCols("real", "bool", "stringNum", "num")
* .setOutputCol("features")
*
* hasher.transform(df).show()
*
* +----+-----+---------+------+--------------------+
* |real| bool|stringNum|string| features|
* +----+-----+---------+------+--------------------+
* | 2.0| true| 1| foo|(262144,[51871,63...|
* | 3.0|false| 2| bar|(262144,[6031,806...|
* +----+-----+---------+------+--------------------+
* }}}
*/
@Experimental
@Since("2.3.0")
class FeatureHasher(@Since("2.3.0") override val uid: String) extends Transformer
with HasInputCols with HasOutputCol with DefaultParamsWritable {
@Since("2.3.0")
def this() = this(Identifiable.randomUID("featureHasher"))
/**
* Number of features. Should be greater than 0.
* (default = 2^18^)
* @group param
*/
@Since("2.3.0")
val numFeatures = new IntParam(this, "numFeatures", "number of features (> 0)",
ParamValidators.gt(0))
setDefault(numFeatures -> (1 << 18))
/** @group getParam */
@Since("2.3.0")
def getNumFeatures: Int = $(numFeatures)
/** @group setParam */
@Since("2.3.0")
def setNumFeatures(value: Int): this.type = set(numFeatures, value)
/** @group setParam */
@Since("2.3.0")
def setInputCols(values: String*): this.type = setInputCols(values.toArray)
/** @group setParam */
@Since("2.3.0")
def setInputCols(value: Array[String]): this.type = set(inputCols, value)
/** @group setParam */
@Since("2.3.0")
def setOutputCol(value: String): this.type = set(outputCol, value)
@Since("2.3.0")
override def transform(dataset: Dataset[_]): DataFrame = {
val hashFunc: Any => Int = OldHashingTF.murmur3Hash
val n = $(numFeatures)
val localInputCols = $(inputCols)
val outputSchema = transformSchema(dataset.schema)
val realFields = outputSchema.fields.filter { f =>
f.dataType.isInstanceOf[NumericType]
}.map(_.name).toSet
def getDouble(x: Any): Double = {
x match {
case n: java.lang.Number =>
n.doubleValue()
case other =>
// will throw ClassCastException if it cannot be cast, as would row.getDouble
other.asInstanceOf[Double]
}
}
val hashFeatures = udf { row: Row =>
val map = new OpenHashMap[Int, Double]()
localInputCols.foreach { colName =>
val fieldIndex = row.fieldIndex(colName)
if (!row.isNullAt(fieldIndex)) {
val (rawIdx, value) = if (realFields(colName)) {
// numeric values are kept as is, with vector index based on hash of "column_name"
val value = getDouble(row.get(fieldIndex))
val hash = hashFunc(colName)
(hash, value)
} else {
// string and boolean values are treated as categorical, with an indicator value of 1.0
// and vector index based on hash of "column_name=value"
val value = row.get(fieldIndex).toString
val fieldName = s"$colName=$value"
val hash = hashFunc(fieldName)
(hash, 1.0)
}
val idx = Utils.nonNegativeMod(rawIdx, n)
map.changeValue(idx, value, v => v + value)
}
}
Vectors.sparse(n, map.toSeq)
}
val metadata = outputSchema($(outputCol)).metadata
dataset.select(
col("*"),
hashFeatures(struct($(inputCols).map(col): _*)).as($(outputCol), metadata))
}
@Since("2.3.0")
override def copy(extra: ParamMap): FeatureHasher = defaultCopy(extra)
@Since("2.3.0")
override def transformSchema(schema: StructType): StructType = {
val fields = schema($(inputCols).toSet)
fields.foreach { fieldSchema =>
val dataType = fieldSchema.dataType
val fieldName = fieldSchema.name
require(dataType.isInstanceOf[NumericType] ||
dataType.isInstanceOf[StringType] ||
dataType.isInstanceOf[BooleanType],
s"FeatureHasher requires columns to be of NumericType, BooleanType or StringType. " +
s"Column $fieldName was $dataType")
}
val attrGroup = new AttributeGroup($(outputCol), $(numFeatures))
SchemaUtils.appendColumn(schema, attrGroup.toStructField())
}
}
@Since("2.3.0")
object FeatureHasher extends DefaultParamsReadable[FeatureHasher] {
@Since("2.3.0")
override def load(path: String): FeatureHasher = super.load(path)
}
|
UndeadBaneGitHub/spark
|
mllib/src/main/scala/org/apache/spark/ml/feature/FeatureHasher.scala
|
Scala
|
apache-2.0
| 7,940 |
package com.etsy.conjecture.text
import com.etsy.conjecture.data.{ AbstractInstance, BinaryLabeledInstance, LabeledInstance, StringKeyedVector }
import com.twitter.algebird.Operators._
import cascading.tuple.Fields
import cascading.pipe.Pipe
import scala.collection.JavaConverters._
object FeatureHelper {
import com.twitter.scalding.Dsl._
def keepFeaturesWithCountGreaterThan(pipe: Pipe, instance_field: Fields, n: Int): Pipe = {
val counts = pipe
.flatMapTo(instance_field -> ('term, '__count)) {
v: AnyRef =>
val vector = v match {
case skv: StringKeyedVector => skv
case ins: AbstractInstance[_] => ins.getVector
case lin: LabeledInstance[_] => lin.getVector
case _ => throw new IllegalArgumentException("keepFeaturesWithCountGreaterThan does not expect class: " + v.getClass.getName)
}
vector.keySet.asScala.map { k => k -> 1 }
}
.groupBy('term) { _.sum('__count) }
.filter('__count) { c: Long => c > n }
.mapTo('term -> 'set) { t: String => Set(t) }
.groupAll { _.plus[Set[String]]('set) }
pipe
.crossWithTiny(counts)
.map(instance_field.append('set) -> instance_field) { x: (AnyRef, Set[String]) =>
val skv = x._1 match {
case s: StringKeyedVector => s
case i: AbstractInstance[_] => i.getVector
case l: LabeledInstance[_] => l.getVector
case _ => throw new IllegalArgumentException("keepFeaturesWithCountGreaterThan does not expect class: " + x._1.getClass.getName)
}
val it = skv.iterator
while (it.hasNext) {
val e = it.next
if (!x._2.contains(e.getKey)) {
it.remove
}
}
x._1
}
}
def nGramsUpTo(string: String, n: Int = 2, prefix: String = ""): List[String] = {
val toks = Text(string.toLowerCase).standardTextFilter.toString.split(" ").toList
val toks_pad = "" +: toks :+ ""
val grams = (1 to n).map { m => toks_pad.sliding(m).toList.map { p => p.mkString("::") } }.foldLeft(List[String]()) { _ ++ _ }
grams.filter { g => g != "" }.map { g => prefix + g }
}
def stringListToSKV(list: List[String], weight: Double = 1.0): StringKeyedVector = {
val skv = new StringKeyedVector();
list.foreach { f => skv.setCoordinate(f, weight) }
skv
}
def getEmailBody(body: String): Option[String] = {
val p = parseEmailBodyToTextAndType(body)
if (p._1 != null)
Some(p._1)
else
None
}
def parseEmailBodyToTextAndType(body: String): (String, String) = {
try {
val email = com.codahale.jerkson.Json.parse[List[Map[String, String]]](body)
val textParts = email.filter(part => part("type") == "text/plain")
if (textParts.length > 0)
(textParts.map(part => part("body")).mkString(" "), "text/plain")
else {
val htmlParts = email.filter(part => part("type") == "text/html")
if (htmlParts.length > 0)
(htmlParts.map(part => part("body")).mkString(" "), "text/html")
else
(null, "filter") // Filter this email
}
} catch {
case _ => (null, "filter")
}
}
}
|
zviri/Conjecture
|
src/main/scala/com/etsy/conjecture/text/FeatureHelper.scala
|
Scala
|
mit
| 3,659 |
package org.bitcoins.spvnode.messages.data
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.util.Factory
import org.bitcoins.spvnode.messages.NotFoundMessage
import org.bitcoins.spvnode.serializers.messages.data.RawNotFoundMessageSerializer
/**
* Created by chris on 6/2/16.
* The companion object factory used to create NotFoundMessages on the p2p network
* https://bitcoin.org/en/developer-reference#notfound
*/
object NotFoundMessage extends Factory[NotFoundMessage] {
private case class NotFoundMessageImpl(inventoryCount : CompactSizeUInt, inventories : Seq[Inventory]) extends NotFoundMessage
def fromBytes(bytes : Seq[Byte]) : NotFoundMessage = RawNotFoundMessageSerializer.read(bytes)
def apply(inventoryCount : CompactSizeUInt, inventories : Seq[Inventory]) : NotFoundMessage = {
NotFoundMessageImpl(inventoryCount,inventories)
}
}
|
Christewart/bitcoin-s-spv-node
|
src/main/scala/org/bitcoins/spvnode/messages/data/NotFoundMessage.scala
|
Scala
|
mit
| 892 |
package models.daos
import java.util.UUID
import scala.concurrent.Future
import models.{ Major, Subject, Department }
trait MajorSubjectDAO {
def findByMajor(id: UUID): Future[Seq[(Subject, Boolean)]]
def findBySubject(code: String, department: Department): Future[Seq[(Major, Boolean)]]
}
|
yoo-haemin/hufs-planner
|
project/app/models/daos/MajorSubjectDAO.scala
|
Scala
|
agpl-3.0
| 310 |
package endpoints.xhr.future
import endpoints.xhr
import scala.concurrent.{Future, Promise}
/**
* Implements [[xhr.Endpoints]] by using Scala’s [[Future]]s.
*/
trait Endpoints extends xhr.Endpoints {
/** Maps `Result` to [[Future]] */
type Result[A] = Future[A]
def endpoint[A, B](request: Request[A], response: Response[B]): Endpoint[A, B] =
(a: A) => {
val promise = Promise[B]()
performXhr(request, response, a)(
_.fold(exn => { promise.failure(exn); () }, b => { promise.success(b); () }),
xhr => { promise.failure(new Exception(xhr.responseText)); () }
)
promise.future
}
}
|
Krever/endpoints
|
xhr/client/src/main/scala/endpoints/xhr/future/Endpoints.scala
|
Scala
|
mit
| 644 |
class MultiApply extends (Int => Int) with ((Int, Int) => Int) {
def apply(x: Int): Int = x + x
def apply(x: Int, y: Int): Int = x * y
}
@main
def Test = {
val fun = new MultiApply
assert(fun(2) == 4)
assert(fun(2, 3) == 6)
}
|
dotty-staging/dotty
|
tests/run/multi-apply.scala
|
Scala
|
apache-2.0
| 236 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx.lib
import scala.reflect.ClassTag
import org.apache.spark.Logging
import org.apache.spark.graphx._
/**
* PageRank algorithm implementation. There are two implementations of PageRank implemented.
*
* The first implementation uses the [[Pregel]] interface and runs PageRank for a fixed number
* of iterations:
* {{{
* var PR = Array.fill(n)( 1.0 )
* val oldPR = Array.fill(n)( 1.0 )
* for( iter <- 0 until numIter ) {
* swap(oldPR, PR)
* for( i <- 0 until n ) {
* PR[i] = alpha + (1 - alpha) * inNbrs[i].map(j => oldPR[j] / outDeg[j]).sum
* }
* }
* }}}
*
* The second implementation uses the standalone [[Graph]] interface and runs PageRank until
* convergence:
*
* {{{
* var PR = Array.fill(n)( 1.0 )
* val oldPR = Array.fill(n)( 0.0 )
* while( max(abs(PR - oldPr)) > tol ) {
* swap(oldPR, PR)
* for( i <- 0 until n if abs(PR[i] - oldPR[i]) > tol ) {
* PR[i] = alpha + (1 - \alpha) * inNbrs[i].map(j => oldPR[j] / outDeg[j]).sum
* }
* }
* }}}
*
* `alpha` is the random reset probability (typically 0.15), `inNbrs[i]` is the set of
* neighbors whick link to `i` and `outDeg[j]` is the out degree of vertex `j`.
*
* Note that this is not the "normalized" PageRank and as a consequence pages that have no
* inlinks will have a PageRank of alpha.
*/
object PageRank extends Logging {
/**
* Run PageRank for a fixed number of iterations returning a graph
* with vertex attributes containing the PageRank and edge
* attributes the normalized edge weight.
*
* @tparam VD the original vertex attribute (not used)
* @tparam ED the original edge attribute (not used)
*
* @param graph the graph on which to compute PageRank
* @param numIter the number of iterations of PageRank to run
* @param resetProb the random reset probability (alpha)
*
* @return the graph containing with each vertex containing the PageRank and each edge
* containing the normalized weight.
*
*/
def run[VD: ClassTag, ED: ClassTag](
graph: Graph[VD, ED], numIter: Int, resetProb: Double = 0.15): Graph[Double, Double] =
{
// Initialize the PageRank graph with each edge attribute having
// weight 1/outDegree and each vertex with attribute 1.0.
var rankGraph: Graph[Double, Double] = graph
// Associate the degree with each vertex
.outerJoinVertices(graph.outDegrees) { (vid, vdata, deg) => deg.getOrElse(0) }
// Set the weight on the edges based on the degree
.mapTriplets( e => 1.0 / e.srcAttr, TripletFields.Src )
// Set the vertex attributes to the initial pagerank values
.mapVertices( (id, attr) => resetProb )
var iteration = 0
var prevRankGraph: Graph[Double, Double] = null
while (iteration < numIter) {
rankGraph.cache()
// Compute the outgoing rank contributions of each vertex, perform local preaggregation, and
// do the final aggregation at the receiving vertices. Requires a shuffle for aggregation.
val rankUpdates = rankGraph.aggregateMessages[Double](
ctx => ctx.sendToDst(ctx.srcAttr * ctx.attr), _ + _, TripletFields.Src)
// Apply the final rank updates to get the new ranks, using join to preserve ranks of vertices
// that didn't receive a message. Requires a shuffle for broadcasting updated ranks to the
// edge partitions.
prevRankGraph = rankGraph
rankGraph = rankGraph.joinVertices(rankUpdates) {
(id, oldRank, msgSum) => resetProb + (1.0 - resetProb) * msgSum
}.cache()
rankGraph.edges.foreachPartition(x => {}) // also materializes rankGraph.vertices
logInfo(s"PageRank finished iteration $iteration.")
prevRankGraph.vertices.unpersist(false)
prevRankGraph.edges.unpersist(false)
iteration += 1
}
rankGraph
}
/**
* Run a dynamic version of PageRank returning a graph with vertex attributes containing the
* PageRank and edge attributes containing the normalized edge weight.
*
* @tparam VD the original vertex attribute (not used)
* @tparam ED the original edge attribute (not used)
*
* @param graph the graph on which to compute PageRank
* @param tol the tolerance allowed at convergence (smaller => more accurate).
* @param resetProb the random reset probability (alpha)
*
* @return the graph containing with each vertex containing the PageRank and each edge
* containing the normalized weight.
*/
def runUntilConvergence[VD: ClassTag, ED: ClassTag](
graph: Graph[VD, ED], tol: Double, resetProb: Double = 0.15): Graph[Double, Double] =
{
// Initialize the pagerankGraph with each edge attribute
// having weight 1/outDegree and each vertex with attribute 1.0.
val pagerankGraph: Graph[(Double, Double), Double] = graph
// Associate the degree with each vertex
.outerJoinVertices(graph.outDegrees) {
(vid, vdata, deg) => deg.getOrElse(0)
}
// Set the weight on the edges based on the degree
.mapTriplets( e => 1.0 / e.srcAttr )
// Set the vertex attributes to (initalPR, delta = 0)
.mapVertices( (id, attr) => (0.0, 0.0) )
.cache()
// Define the three functions needed to implement PageRank in the GraphX
// version of Pregel
def vertexProgram(id: VertexId, attr: (Double, Double), msgSum: Double): (Double, Double) = {
val (oldPR, lastDelta) = attr
val newPR = oldPR + (1.0 - resetProb) * msgSum
(newPR, newPR - oldPR)
}
def sendMessage(edge: EdgeTriplet[(Double, Double), Double]) = {
if (edge.srcAttr._2 > tol) {
Iterator((edge.dstId, edge.srcAttr._2 * edge.attr))
} else {
Iterator.empty
}
}
def messageCombiner(a: Double, b: Double): Double = a + b
// The initial message received by all vertices in PageRank
val initialMessage = resetProb / (1.0 - resetProb)
// Execute a dynamic version of Pregel.
Pregel(pagerankGraph, initialMessage, activeDirection = EdgeDirection.Out)(
vertexProgram, sendMessage, messageCombiner)
.mapVertices((vid, attr) => attr._1)
} // end of deltaPageRank
}
|
sjtu-iiot/graphx-algorithm
|
src/main/scala/org/apache/spark/graphx/lib/PageRank.scala
|
Scala
|
gpl-2.0
| 6,967 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.config
import scala.collection.JavaConverters._
object SerializerConfig {
// serializer config constants
val SERIALIZER_PREFIX = "serializers.registry.%s"
val SERDE_FACTORY_CLASS = "serializers.registry.%s.class"
val SERIALIZED_INSTANCE_SUFFIX = ".samza.serialized.instance"
val SERDE_SERIALIZED_INSTANCE = SERIALIZER_PREFIX + SERIALIZED_INSTANCE_SUFFIX
implicit def Config2Serializer(config: Config) = new SerializerConfig(config)
}
class SerializerConfig(config: Config) extends ScalaMapConfig(config) {
def getSerdeClass(name: String) = getOption(SerializerConfig.SERDE_FACTORY_CLASS format name)
/**
* Returns a list of all serializer names from the config file. Useful for
* getting individual serializers.
*/
import SerializerConfig._
def getSerdeNames() = {
val subConf = config.subset(SERIALIZER_PREFIX format "", true)
subConf.asScala.keys.filter(k => k.endsWith(".class")).map(_.replace(".class", ""))
}
}
|
TiVo/samza
|
samza-core/src/main/scala/org/apache/samza/config/SerializerConfig.scala
|
Scala
|
apache-2.0
| 1,793 |
package predict4s
package sgp
import org.scalatest.FunSuite
import org.scalactic.TolerantNumerics
import org.scalactic.Equality
import predict4s.coord._
class GeoPotentialStateCheck extends FunSuite with TLE00005 with TLE06251 {
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(1E-9)
implicit val wgs = SGP72Constants.tleDoubleConstants
def sgpImpl : String = "Vallado SGP4"
def buildGeoPotential(tle: TLE) = {
import spire.implicits._
val elem0Ctx = SGPElemsConversions.sgpElemsAndContext(tle, wgs).get
val geoPotCtx = BrouwerLaneSecularCorrections.geoPotentialCoefs(elem0Ctx)
(elem0Ctx, geoPotCtx)
}
test(s"${sgpImpl}: compare GeoPotentialState for 00005 when t=0") {
val (elem0Ctx, geoPotCtx) = buildGeoPotential(tle00005)
assert(elem0Ctx.isImpacting == false)
assert(elem0Ctx.isDeepSpace == false)
checkSgp4GeoPotential_5(elem0Ctx, geoPotCtx)
}
test(s"${sgpImpl}: compare GeoPotentialState for 06251 when t=0") {
val (elem0Ctx, geoPotCtx) = buildGeoPotential(tle06251)
assert(elem0Ctx.isImpacting == false)
assert(elem0Ctx.isDeepSpace == false)
checkSgp4GeoPotential_06251(elem0Ctx, geoPotCtx)
}
def checkSgp4GeoPotential_5(elem0Ctx: SGPElemsCtx[Double], geoPot: GeoPotentialCtx[Double]) = {
import elem0Ctx.{elem,iCtx,eCtx,wgs,rp}
import elem._,iCtx.`3c²-1`,eCtx.`β0²` // rteosq
import geoPot.{_1=>gcoef,_2=>geoctx},gcoef._,geoctx._
val ωcof = C3*bStar*math.cos(ω)
assert( n === 0.047206302); assert( a === 1.353899821);
assert( e === 0.185966700);
assert( I === 0.598092919); assert( ω === 5.790416027);
assert( Ω === 6.086385471); assert( M === 0.337309313);
assert( bStar === 0.000028098); assert( epoch === 18441.784950620);
assert( rp === 1.102119539);
assert( `3c²-1` === 1.048865088); assert( `β0²` === 0.965416386); assert( ωcof === 0.000000000 );
assert( C1 === 0.000000000); assert( C4 === 0.000000526); assert( C5 === 0.000016465);
assert( D2 === 0.000000000); assert( D3 === 0.000000000); assert( D4 === 0.000000000);
}
def checkSgp4GeoPotential_06251(elem0Ctx: SGPElemsCtx[Double], geoPot: GeoPotentialCtx[Double]) = {
import elem0Ctx.{elem,iCtx,eCtx,wgs,rp}
import elem._,iCtx.`3c²-1`,eCtx.`β0²` // rteosq
import geoPot.{_1=>gcoef,_2=>geoctx},gcoef._,geoctx._
val ωcof = C3*bStar*math.cos(ω)
assert( n === 0.067918037); assert( a === 1.062338933);
assert( e === 0.003003500);
assert( I === 1.013301512); assert( ω === 2.428744337);
assert( Ω === 0.943219561); assert( M === 3.860413487);
assert( bStar === 0.000128080); assert( epoch === 20630.824120140);
assert( rp === 1.059148199);
assert( `3c²-1` === -0.160280193); assert( `β0²` === 0.999990979); assert( ωcof === -0.000000052 )
assert( C1 === 0.000000003); assert( C4 === 0.000005200); assert( C5 === 0.000650194);
assert( D2 === 0.000000000); assert( D3 === 0.000000000); assert( D4 === 0.000000000);
}
}
|
pleira/SGP4Extensions
|
tests/src/test/scala/predict4s/sgp/GeoPotentialStateCheck.scala
|
Scala
|
apache-2.0
| 3,325 |
import sbt._
import sbt.Keys._
import sbt.Def.Initialize
import scala.util.Try
import BackgroundServiceKeys._
import spray.revolver.RevolverPlugin.Revolver
import spray.revolver.AppProcess
import scala.util.{ Success, Failure }
import scala.concurrent.duration._
import java.util.concurrent.TimeoutException
import Utils.retryBlocking
object BackgroundService {
/** Builds should normally import these default settings */
val settings = Revolver.settings ++ Seq(
dependenciesToStart := Seq(),
startDependencies := startDependenciesTask.value,
stopDependencies := stopDependenciesTask.value,
start := startServiceTask.value,
stop := stopServiceTask.value,
startSolo := startSoloTask.value,
stopSolo := stopSoloTask.value,
status := Revolver.reStatus.value,
healthCheckFn := { () => Success() }, // TODO instead check the health port by default
healthCheck := healthCheckTask.value,
waitUntilHealthy := waitUntilHealthyTask.value,
Revolver.reLogTag := name.value,
jmxPort := None,
programArgs := Nil,
adminPort := None,
healthPort := None
// (test in IntegrationTest) := itTestTask.value // LATEr enable this by conditionally checking wither IntegrationTest configuration is present
)
/** a replacement for it:test that first launches the dependecy services. */
lazy val itTestTask: Initialize[Task[Unit]] = Def.taskDyn {
// to avoid initialization issues (using the same key we're replacing), we repeat what it:test does
// in sbt and call executeTests with appropriate logging
// (SCALA/SBT is there a better way?)
val resultLogger = (testResultLogger in (Test, test)).value
val taskName = Project.showContextKey(state.value)(resolvedScoped.value)
val log = streams.value.log
(executeTests in IntegrationTest).map { results =>
resultLogger.run(log, results, taskName)
}.dependsOn(startDependencies)
}
/** task that returns true/false if the service is healthy, based on the Build provided healthCheckFn */
private lazy val healthCheckTask: Initialize[Task[Boolean]] = Def.task {
val logger = spray.revolver.Utilities.colorLogger(streams.value.log)
val healthy = healthCheckFn.value.apply().isSuccess
val healthyStr = if (healthy) { "[GREEN]healthy" } else { "[RED]not healthy" }
logger.info(s"${name.value} is $healthyStr")
healthy
}
/** start services that this service depends on */
private lazy val startDependenciesTask: Initialize[Task[Unit]] = Def.taskDyn {
val starts = dependenciesToStart.value.map { project => (start in project) }
Def.task().dependsOn(starts: _*)
}
/** stop services that this service depends on */
private lazy val stopDependenciesTask: Initialize[Task[Unit]] = Def.taskDyn {
val stops = dependenciesToStart.value.map { project => (stop in project) }
Def.task().dependsOn(stops: _*)
}
/** start dependent services, then start this service */
private lazy val startServiceTask: Initialize[Task[Unit]] = Def.taskDyn {
startSolo.dependsOn(startDependencies)
}
/** stop this service, then stop dependent services */
private lazy val stopServiceTask: Initialize[Task[Unit]] = Def.taskDyn {
stopDependencies.dependsOn(stopSolo)
}
/** stop this service, via the admin port if it has one, otherwise by terminating the jvm */
private lazy val stopSoloTask: Initialize[Task[Unit]] = Def.taskDyn {
adminPort.value.foreach { port =>
RunServices.stopService(streams.value, name.value, port)
awaitShutdown.value
}
Revolver.reStop // kills the jvm
}
/** Returns an sbt task which starts a invokes the revolver start mechanism
* and then blocks until the service is healthy.
*
* We invoke revolver directly so that we have control over the parameters passed into revolver
*/
private lazy val startSoloTask: Initialize[Task[Unit]] = Def.taskDyn {
val _ = (products in Compile).value // TODO necessary?
val logger = spray.revolver.Utilities.colorLogger(streams.value.log)
lazy val isHealthy: Boolean = healthCheckFn.value.apply().isSuccess
spray.revolver.Actions.revolverState.getProcess(thisProjectRef.value) match {
case Some(process) if process.isRunning =>
logger.info(s"${Revolver.reLogTag.value} already running")
emptyTask
case _ if isHealthy =>
logger.info(s"${Revolver.reLogTag.value} already healthy")
emptyTask
case _ =>
waitUntilHealthy.dependsOn(startWithRevolver) // TODO use SbtUtil.inOrder?
}
}
/** an Sbt task that blocks until this service's health check succeeds */
private lazy val waitUntilHealthyTask = Def.task {
val logger = spray.revolver.Utilities.colorLogger(streams.value.log)
val tryHealth: () => Try[Unit] = healthCheckFn.value
/** Return a try with the result of the healthCheckFn.
*
* If the background process has died, throw an exception, aborting further processing
* of this command.
*/
def checkHealth(): Try[Unit] = {
tryHealth() match {
case checked: Success[Unit] =>
checked
case checkFail: Failure[Unit] if processRunning() =>
checkFail // could be still trying to get started
case Failure(err) =>
logger.info(s"[RED]application ${name.value} has died; abort")
throw err
}
}
/** return true if revolver knows about the service and reports it as running */
def processRunning(): Boolean = {
spray.revolver.Actions.revolverState.getProcess(thisProjectRef.value) match {
case Some(process) if process.isRunning => true
case None => false
}
}
val projectName = name.value
logger.info(s"[YELLOW]waiting for application $projectName to be healthy")
retryBlocking(30.seconds, 250.millis) { checkHealth() }
logger.info(s"[GREEN]application $projectName is healthy!")
}
/** A task that launches a service in a background process with Revolver.
* Returns once the project process has been forked (but probably before the app
* has initialized itself).
*/
private lazy val startWithRevolver = Def.task {
val logger = spray.revolver.Utilities.colorLogger(streams.value.log)
val jmxArgs = jmxPort.value.toSeq.flatMap { port =>
Seq(
"-Djava.rmi.server.hostname=localhost",
s"-Dcom.sun.management.jmxremote.port=$port",
"-Dcom.sun.management.jmxremote.ssl=false",
"-Dcom.sun.management.jmxremote.authenticate=false")
}
val jvmOptions = javaOptions.value
// PROJECT_HOME defaults to value of system property
val projectHome = if (!jvmOptions.exists(_.startsWith("-DPROJECT_HOME="))) {
Seq(s"-DPROJECT_HOME=${baseDirectory.value.getAbsolutePath}")
} else { Nil }
val jvmArgs = jmxArgs ++ projectHome ++ jvmOptions
val extraOptions = spray.revolver.Actions.ExtraCmdLineOptions(jvmArgs = jvmArgs, startArgs = programArgs.value)
val main = (mainClass in Revolver.reStart).value
logger.info(
s"[BLUE]about to run ${Revolver.reLogTag.value} (${main.get}) "
+ s"with command line arguments ${extraOptions.startArgs} and JVM arguments $jvmArgs"
)
spray.revolver.Actions.startApp(
streams = streams.value,
logTag = Revolver.reLogTag.value,
project = thisProjectRef.value,
options = Revolver.reForkOptions.value,
mainClass = main,
cp = (fullClasspath in sbt.Runtime).value,
args = Revolver.reStartArgs.value,
startConfig = extraOptions)
}
/** Block until there is no longer a Revolver process running */
private lazy val awaitShutdown = Def.task {
retryBlocking(5.seconds, 100.millis) {
spray.revolver.Actions.revolverState.getProcess(thisProjectRef.value) match {
case Some(process) if !process.isRunning => Success()
case None => Success()
case _ => Failure(new TimeoutException)
}
}
}
/** Trivial do nothing task */
private lazy val emptyTask = Def.task {}
}
|
mighdoll/sparkle
|
project/BackgroundService.scala
|
Scala
|
apache-2.0
| 8,177 |
package org.nkvoll.javabin.json
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.{ DateTimeZone, DateTime }
import spray.json._
trait DateTimeProtocol {
implicit val dateTimeFormat = new JsonFormat[DateTime] {
override def read(json: JsValue): DateTime = json match {
case JsString(value) => ISODateTimeFormat.dateTime().parseDateTime(value).withZone(DateTimeZone.UTC)
case _ => deserializationError("DateTime expected")
}
override def write(obj: DateTime): JsValue = JsString(obj.withZone(DateTimeZone.UTC).toString(ISODateTimeFormat.dateTime()))
}
}
|
nkvoll/javabin-rest-on-akka
|
src/main/scala/org/nkvoll/javabin/json/DateTimeProtocol.scala
|
Scala
|
mit
| 615 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger, Linked}
case class CP114(value: Int) extends CtBoxIdentifier(name = "Non-trade interest received") with CtInteger
object CP114 extends Linked[CP58, CP114] {
override def apply(source: CP58): CP114 = CP114(source.value)
}
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/CP114.scala
|
Scala
|
apache-2.0
| 923 |
package com.sksamuel.elastic4s.http.search.aggs
import com.sksamuel.elastic4s.DistanceUnit._
import com.sksamuel.elastic4s.http.ScriptBuilderFn
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.searches.aggs.GeoDistanceAggregation
import com.sksamuel.elastic4s.searches.queries.geo.GeoDistance
object GeoDistanceAggregationBuilder {
def apply(agg: GeoDistanceAggregation): XContentBuilder = {
val builder = XContentFactory.obj()
builder.startObject("geo_distance")
builder.startObject("origin")
builder.field("lat", agg.origin.lat)
builder.field("lon", agg.origin.long)
builder.endObject()
agg.field.foreach(builder.field("field", _))
agg.format.foreach(builder.field("format", _))
agg.missing.foreach(builder.autofield("missing", _))
agg.keyed.foreach(builder.field("keyed", _))
agg.distanceType
.map {
case GeoDistance.Arc => "arc"
case GeoDistance.Plane => "plane"
}
.foreach(builder.field("distance_type", _))
agg.unit
.map {
case INCH => "in"
case YARD => "yd"
case FEET => "ft"
case KILOMETERS => "km"
case NAUTICALMILES => "nmi"
case MILLIMETERS => "mm"
case CENTIMETERS => "cm"
case MILES => "mi"
case METERS => "m"
}
.foreach(builder.field("unit", _))
agg.script.foreach { script =>
builder.rawField("script", ScriptBuilderFn(script))
}
builder.startArray("ranges")
agg.unboundedTo.foreach {
case (keyOpt, to) =>
builder.startObject()
keyOpt.foreach(builder.field("key", _))
builder.field("to", to)
builder.endObject()
}
agg.ranges.foreach {
case (keyOpt, from, to) =>
builder.startObject()
keyOpt.foreach(builder.field("key", _))
builder.field("from", from)
builder.field("to", to)
builder.endObject()
}
agg.unboundedFrom.foreach {
case (keyOpt, from) =>
builder.startObject()
keyOpt.foreach(builder.field("key", _))
builder.field("from", from)
builder.endObject()
}
builder.endArray()
builder.endObject()
SubAggsBuilderFn(agg, builder)
AggMetaDataFn(agg, builder)
builder
}
}
|
Tecsisa/elastic4s
|
elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/search/aggs/GeoDistanceAggregationBuilder.scala
|
Scala
|
apache-2.0
| 2,361 |
import scala.quoted._
def test(using QuoteContext) = {
val q = '[String]
'[String]
}
|
som-snytt/dotty
|
tests/pos/i4539.scala
|
Scala
|
apache-2.0
| 89 |
/*
* Copyright 2016 Michal Harish, [email protected]
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import akka.http.scaladsl.model.StatusCodes.SeeOther
import com.typesafe.config.ConfigFactory
import io.amient.affinity.Conf
import io.amient.affinity.avro.record.AvroSerde
import io.amient.affinity.core.cluster.Node
import io.amient.affinity.core.storage.LogStorage
import io.amient.affinity.core.util.AffinityTestBase
import io.amient.affinity.kafka.EmbeddedKafka
import io.amient.affinity.spark.LogRDD
import message.{Component, VertexProps}
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer._
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{FlatSpec, Matchers}
import scala.reflect.ClassTag
class AnalyticsSystemSpec extends FlatSpec with AffinityTestBase with EmbeddedKafka with Matchers {
def config = ConfigFactory.load("example")
override def numPartitions = Conf(config).Affi.Keyspace("graph").Partitions()
val node2 = new Node(configure(config, Some(zkConnect), Some(kafkaBootstrap)))
val node1 = new Node(configure(config, Some(zkConnect), Some(kafkaBootstrap)))
override def beforeAll(): Unit = try {
node1.start()
node1.awaitClusterReady
node1.http_post("/connect/1/2").status should be(SeeOther)
node1.http_post("/connect/3/4").status should be(SeeOther)
node1.http_post("/connect/2/3").status should be(SeeOther)
} finally {
super.beforeAll()
}
override def afterAll(): Unit = try {
node2.shutdown()
node1.shutdown()
} finally {
super.afterAll()
}
"Spark Module" should "be able to see avro-serialised state in kafka" in {
node1.get_json(node1.http_get("/vertex/1")).get("data").get("component").asInt should be(1)
node1.get_json(node1.http_get("/vertex/4")).get("data").get("component").asInt should be(1)
implicit val sc = new SparkContext(new SparkConf()
.setMaster("local[4]")
.set("spark.driver.host", "localhost")
.setAppName("Affinity_Spark")
.set("spark.serializer", classOf[KryoSerializer].getName))
implicit val conf = Conf(configure(config, Some(zkConnect), Some(kafkaBootstrap)))
val graphRdd = SparkDriver.graphRdd
val sortedGraph = graphRdd.repartition(1).sortByKey().collect().toList
sortedGraph match {
case (1, VertexProps(_, 1, _)) ::
(2, VertexProps(_, 1, _)) ::
(3, VertexProps(_, 1, _)) ::
(4, VertexProps(_, 1, _)) :: Nil =>
case x =>
throw new AssertionError(s"Graph should contain 4 vertices but was: $x")
}
SparkDriver.componentRdd.collect.toList match {
case (1, Component(_, _)) :: Nil =>
case x => throw new AssertionError(s"Graph should contain 1 component, got: $x")
}
val updateBatch: RDD[(Int, Component)] = sc.parallelize(Array((1, null), (2, Component(0L, Set()))))
SparkDriver.avroUpdate("graph", "components", updateBatch)
SparkDriver.componentRdd.collect.toList match {
case (2, Component(0L, _)) :: Nil =>
case _ => throw new AssertionError("Graph should contain 1 component")
}
}
}
object SparkDriver {
def graphRdd(implicit conf: Conf, sc: SparkContext) = avroRdd[Int, VertexProps]("graph", "graph")
def componentRdd(implicit conf: Conf, sc: SparkContext) = avroRdd[Int, Component]("graph", "components")
def avroRdd[K: ClassTag, V: ClassTag](ks: String, store: String)(implicit conf: Conf, sc: SparkContext): RDD[(K,V)] = {
val avroConf = conf.Affi.Avro
val storageConf = conf.Affi.Keyspace(ks).State(store).Storage
LogRDD(LogStorage.newInstance(storageConf)).compact.present[K, V](AvroSerde.create(avroConf))
}
def avroUpdate[K: ClassTag, V: ClassTag](ks: String, store: String, data: RDD[(K, V)])(implicit conf: Conf, sc: SparkContext): Unit = {
val avroConf = conf.Affi.Avro
val storageConf = conf.Affi.Keyspace(ks).State(store).Storage
LogRDD.append(AvroSerde.create(avroConf), LogStorage.newInstance(storageConf), data)
}
}
|
amient/affinity
|
examples/example-distributed-graph/src/test/scala/AnalyticsSystemSpec.scala
|
Scala
|
apache-2.0
| 4,756 |
package com.clemble.query.model
trait ExpressionField {
val field: String
def asc = Ascending(field)
def desc = Descending(field)
}
case class StringField(field: String) extends ExpressionField {
def is(str: String): Expression = Equals(field, str)
def not(str: String): Expression = NotEquals(field, str)
}
case class IntField(field: String) extends ExpressionField {
def is(num: Int) = IntEquals(field, num)
def not(num: Int) = IntNotEquals(field, num)
def lt(num: Int) = LessThen(field, num)
def <(num: Int) = lt(num)
def lte(num: Int) = LessThenEquals(field, num)
def <=(num: Int) = lte(num)
def gt(num: Int) = GreaterThen(field, num)
def > (num: Int) = gt(num)
def gte(num: Int) = GreaterThenEquals(field, num)
def >= (num: Int) = gte(num)
}
|
clemble/scala-query-dsl
|
src/main/scala/com/clemble/query/model/ExpressionField.scala
|
Scala
|
apache-2.0
| 789 |
object Test extends App {
import scala.reflect.runtime.universe._
//
// x's owner is outer Test scope. Previosly the quasiquote expansion
// looked like:
//
// object Test {
// build.withFreshTermName("doWhile")(n =>
// LabelDef(n, List(),
// Block(
// List({ val x = 1; x }),
// If(Literal(Constant(true)), Apply(Ident(n), List()), Literal(Constant(())))))
// }
//
// Here the proper owner is anonymous function, not the Test. Hence
// symbol corruption. In new encoding this is represented as:
//
// object Test {
// {
// val n = build.freshTermName("doWhile")
// LabelDef(n, List(),
// Block(
// List({ val x = 1; x }),
// If(Literal(Constant(true)), Apply(Ident(n), List()), Literal(Constant(()))))
// }
// }
//
// Owner stays the same and life is good again.
//
println(q"do ${ val x = 1; x } while(true)")
}
|
som-snytt/dotty
|
tests/disabled/reflect/run/t8047.scala
|
Scala
|
apache-2.0
| 998 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fs.mount
import slamdata.Predef.List
import pathy.Path._
import pathy.scalacheck.PathyArbitrary._
import scalaz.std.list._
class MountsSpec extends quasar.Qspec {
"Mounts" should {
"adding entries" >> {
"fails when dir is a prefix of existing" >> prop { mnt: AbsDir[Sandboxed] =>
Mounts.singleton(mnt </> dir("c1"), 1).add(mnt, 2) must beLeftDisjunction
}
"fails when dir is prefixed by existing" >> prop { mnt: AbsDir[Sandboxed] =>
Mounts.singleton(mnt, 1).add(mnt </> dir("c2"), 2) must beLeftDisjunction
}
"succeeds when dir not a prefix of existing" >> {
val mnt1: AbsDir[Sandboxed] = rootDir </> dir("one")
val mnt2: AbsDir[Sandboxed] = rootDir </> dir("two")
Mounts.fromFoldable(List((mnt1, 1), (mnt2, 2))) must beRightDisjunction
}
"succeeds when replacing value at existing" >> prop { mnt: AbsDir[Sandboxed] =>
Mounts.singleton(mnt, 1)
.add(mnt, 2)
.toOption
.flatMap(_.toMap.get(mnt)) must beSome(2)
}
}
}
}
|
jedesah/Quasar
|
core/src/test/scala/quasar/fs/mount/MountsSpec.scala
|
Scala
|
apache-2.0
| 1,682 |
package core.model
class Post {
var title: String = ""
var body: String = ""
var info: String = ""
var crawledAt: Long = 0L
}
|
hpedrorodrigues/GizmodoBr
|
app/core/model/Post.scala
|
Scala
|
apache-2.0
| 138 |
/*
* Copyright (C) 2009-2011 Mathias Doenitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.parboiled.scala.rules
import org.parboiled.matchers._
import java.lang.String
import Rule._
import org.parboiled.support.IndexRange
/**
* The base class of all rules pushing a certain number of elements onto the parser value stack.
*/
sealed abstract class PushRule extends Rule
/**
* A rule pushing one new value of a given type onto the parsers value stack.
*/
class Rule1[+A](val matcher: Matcher) extends PushRule {
def ~[Y, Z, AA >: A](other: PopRule3[Y, Z, AA]) = new PopRule2[Y, Z](append(other))
def ~[Z, AA >: A](other: PopRule2[Z, AA]) = new PopRule1[Z](append(other))
def ~[AA >: A](other: PopRule1[AA]) = new Rule0(append(other))
def ~[Y, Z, AA >: A, R](other: ReductionRule3[Y, Z, AA, R]) = new ReductionRule2[Y, Z, R](append(other))
def ~[Y, Z, AA >: A, RA, B](other: ReductionRule3_2[Y, Z, AA, RA, B]) = new ReductionRule2_2[Y, Z, RA, B](append(other))
def ~[Y, Z, AA >: A, RA, B, C](other: ReductionRule3_3[Y, Z, AA, RA, B, C]) = new ReductionRule2_3[Y, Z, RA, B, C](append(other))
def ~[Y, Z, AA >: A, RA, B, C, D](other: ReductionRule3_4[Y, Z, AA, RA, B, C, D]) = new ReductionRule2_4[Y, Z, RA, B, C, D](append(other))
def ~[Y, Z, AA >: A, RA, B, C, D, E](other: ReductionRule3_5[Y, Z, AA, RA, B, C, D, E]) = new ReductionRule2_5[Y, Z, RA, B, C, D, E](append(other))
def ~[Y, Z, AA >: A, RA, B, C, D, E, F](other: ReductionRule3_6[Y, Z, AA, RA, B, C, D, E, F]) = new ReductionRule2_6[Y, Z, RA, B, C, D, E, F](append(other))
def ~[Y, Z, AA >: A, RA, B, C, D, E, F, G](other: ReductionRule3_7[Y, Z, AA, RA, B, C, D, E, F, G]) = new ReductionRule2_7[Y, Z, RA, B, C, D, E, F, G](append(other))
def ~[Z, AA >: A, RA](other: ReductionRule2[Z, AA, RA]) = new ReductionRule1[Z, RA](append(other))
def ~[Z, AA >: A, RA, B](other: ReductionRule2_2[Z, AA, RA, B]) = new ReductionRule1_2[Z, RA, B](append(other))
def ~[Z, AA >: A, RA, B, C](other: ReductionRule2_3[Z, AA, RA, B, C]) = new ReductionRule1_3[Z, RA, B, C](append(other))
def ~[Z, AA >: A, RA, B, C, D](other: ReductionRule2_4[Z, AA, RA, B, C, D]) = new ReductionRule1_4[Z, RA, B, C, D](append(other))
def ~[Z, AA >: A, RA, B, C, D, E](other: ReductionRule2_5[Z, AA, RA, B, C, D, E]) = new ReductionRule1_5[Z, RA, B, C, D, E](append(other))
def ~[Z, AA >: A, RA, B, C, D, E, F](other: ReductionRule2_6[Z, AA, RA, B, C, D, E, F]) = new ReductionRule1_6[Z, RA, B, C, D, E, F](append(other))
def ~[Z, AA >: A, RA, B, C, D, E, F, G](other: ReductionRule2_7[Z, AA, RA, B, C, D, E, F, G]) = new ReductionRule1_7[Z, RA, B, C, D, E, F, G](append(other))
def ~[AA >: A, RA](other: ReductionRule1[AA, RA]) = new Rule1[RA](append(other))
def ~[AA >: A, RA, B](other: ReductionRule1_2[AA, RA, B]) = new Rule2[RA, B](append(other))
def ~[AA >: A, RA, B, C](other: ReductionRule1_3[AA, RA, B, C]) = new Rule3[RA, B, C](append(other))
def ~[AA >: A, RA, B, C, D](other: ReductionRule1_4[AA, RA, B, C, D]) = new Rule4[RA, B, C, D](append(other))
def ~[AA >: A, RA, B, C, D, E](other: ReductionRule1_5[AA, RA, B, C, D, E]) = new Rule5[RA, B, C, D, E](append(other))
def ~[AA >: A, RA, B, C, D, E, F](other: ReductionRule1_6[AA, RA, B, C, D, E, F]) = new Rule6[RA, B, C, D, E, F](append(other))
def ~[AA >: A, RA, B, C, D, E, F, G](other: ReductionRule1_7[AA, RA, B, C, D, E, F, G]) = new Rule7[RA, B, C, D, E, F, G](append(other))
def ~[B](other: Rule1[B]) = new Rule2[A, B](append(other))
def ~[B, C](other: Rule2[B, C]) = new Rule3[A, B, C](append(other))
def ~[B, C, D](other: Rule3[B, C, D]) = new Rule4[A, B, C, D](append(other))
def ~[B, C, D, E](other: Rule4[B, C, D, E]) = new Rule5[A, B, C, D, E](append(other))
def ~[B, C, D, E, F](other: Rule5[B, C, D, E, F]) = new Rule6[A, B, C, D, E, F](append(other))
def ~[B, C, D, E, F, G](other: Rule6[B, C, D, E, F, G]) = new Rule7[A, B, C, D, E, F, G](append(other))
def ~:>[R](f: Char => R) = new Rule2[A, R](append(push(exec(GetMatchedChar, f))))
def ~>>[R](f: IndexRange => R) = new Rule2[A, R](append(push(exec(GetMatchRange, f))))
def ~>[R](f: String => R) = new Rule2[A, R](append(push(exec(GetMatch, f))))
def ~~>[R](f: A => R) = new Rule1[R](append(push(exec(stack1(Pop), f))))
def ~~>[Z, R](f: (Z, A) => R) = new ReductionRule1[Z, R](append(push(exec(stack2(Pop), f))))
def ~~>[Y, Z, R](f: (Y, Z, A) => R) = new ReductionRule2[Y, Z, R](append(push(exec(stack3(Pop), f))))
def ~~>[X, Y, Z, R](f: (X, Y, Z, A) => R) = new ReductionRule3[X, Y, Z, R](append(push(exec(stack4(Pop), f))))
def ~~?(f: A => Boolean) = new Rule0(append(exec(stack1(Pop), f)))
def ~~?[Z](f: (Z, A) => Boolean) = new PopRule1[Z](append(exec(stack2(Pop), f)))
def ~~?[Y, Z](f: (Y, Z, A) => Boolean) = new PopRule2[Y, Z](append(exec(stack3(Pop), f)))
def ~~?[X, Y, Z](f: (X, Y, Z, A) => Boolean) = new PopRule3[X, Y, Z](append(exec(stack4(Pop), f)))
def ~~%(f: A => Unit) = new Rule0(append(ok(exec(stack1(Pop), f))))
def ~~%[Z](f: (Z, A) => Unit) = new PopRule1[Z](append(ok(exec(stack1(Pop), f))))
def ~~%[Y, Z](f: (Y, Z, A) => Unit) = new PopRule2[Y, Z](append(ok(exec(stack3(Pop), f))))
def ~~%[X, Y, Z](f: (X, Y, Z, A) => Unit) = new PopRule3[X, Y, Z](append(ok(exec(stack4(Pop), f))))
def ~~~>[R](f: A => R) = new Rule2[A, R](append(push(exec(stack1(Peek), f))))
def ~~~?(f: A => Boolean) = withMatcher(append(exec(stack1(Peek), f)))
def ~~~%(f: A => Unit) = withMatcher(append(ok(exec(stack1(Peek), f))))
def |[AA >: A](other: Rule1[AA]) = new Rule1[AA](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new Rule1[A](matcher).asInstanceOf[this.type]
}
object Rule1 {
implicit def toRule(rule: Rule1[_]): org.parboiled.Rule = rule.matcher
}
/**
* A rule pushing two new values of given types onto the parsers value stack.
*/
class Rule2[+A, +B](val matcher: Matcher) extends PushRule {
def ~[Z, AA >: A, BB >: B](other: PopRule3[Z, AA, BB]) = new PopRule1[Z](append(other))
def ~[AA >: A, BB >: B](other: PopRule2[AA, BB]) = new Rule0(append(other))
def ~[BB >: B](other: PopRule1[BB]) = new Rule1[A](append(other))
def ~[Z, AA >: A, BB >: B, RA](other: ReductionRule3[Z, AA, BB, RA]) = new ReductionRule1[Z, RA](append(other))
def ~[Z, AA >: A, BB >: B, RA, RB](other: ReductionRule3_2[Z, AA, BB, RA, RB]) = new ReductionRule1_2[Z, RA, RB](append(other))
def ~[Z, AA >: A, BB >: B, RA, RB, C](other: ReductionRule3_3[Z, AA, BB, RA, RB, C]) = new ReductionRule1_3[Z, RA, RB, C](append(other))
def ~[Z, AA >: A, BB >: B, RA, RB, C, D](other: ReductionRule3_4[Z, AA, BB, RA, RB, C, D]) = new ReductionRule1_4[Z, RA, RB, C, D](append(other))
def ~[Z, AA >: A, BB >: B, RA, RB, C, D, E](other: ReductionRule3_5[Z, AA, BB, RA, RB, C, D, E]) = new ReductionRule1_5[Z, RA, RB, C, D, E](append(other))
def ~[Z, AA >: A, BB >: B, RA, RB, C, D, E, F](other: ReductionRule3_6[Z, AA, BB, RA, RB, C, D, E, F]) = new ReductionRule1_6[Z, RA, RB, C, D, E, F](append(other))
def ~[Z, AA >: A, BB >: B, RA, RB, C, D, E, F, G](other: ReductionRule3_7[Z, AA, BB, RA, RB, C, D, E, F, G]) = new ReductionRule1_7[Z, RA, RB, C, D, E, F, G](append(other))
def ~[AA >: A, BB >: B, RA](other: ReductionRule2[AA, BB, RA]) = new Rule1[RA](append(other))
def ~[AA >: A, BB >: B, RA, RB](other: ReductionRule2_2[AA, BB, RA, RB]) = new Rule2[RA, RB](append(other))
def ~[AA >: A, BB >: B, RA, RB, C](other: ReductionRule2_3[AA, BB, RA, RB, C]) = new Rule3[RA, RB, C](append(other))
def ~[AA >: A, BB >: B, RA, RB, C, D](other: ReductionRule2_4[AA, BB, RA, RB, C, D]) = new Rule4[RA, RB, C, D](append(other))
def ~[AA >: A, BB >: B, RA, RB, C, D, E](other: ReductionRule2_5[AA, BB, RA, RB, C, D, E]) = new Rule5[RA, RB, C, D, E](append(other))
def ~[AA >: A, BB >: B, RA, RB, C, D, E, F](other: ReductionRule2_6[AA, BB, RA, RB, C, D, E, F]) = new Rule6[RA, RB, C, D, E, F](append(other))
def ~[AA >: A, BB >: B, RA, RB, C, D, E, F, G](other: ReductionRule2_7[AA, BB, RA, RB, C, D, E, F, G]) = new Rule7[RA, RB, C, D, E, F, G](append(other))
def ~[BB >: B, RB](other: ReductionRule1[BB, RB]) = new Rule2[A, RB](append(other))
def ~[BB >: B, RB, C](other: ReductionRule1_2[BB, RB, C]) = new Rule3[A, RB, C](append(other))
def ~[BB >: B, RB, C, D](other: ReductionRule1_3[BB, RB, C, D]) = new Rule4[A, RB, C, D](append(other))
def ~[BB >: B, RB, C, D, E](other: ReductionRule1_4[BB, RB, C, D, E]) = new Rule5[A, RB, C, D, E](append(other))
def ~[BB >: B, RB, C, D, E, F](other: ReductionRule1_5[BB, RB, C, D, E, F]) = new Rule6[A, RB, C, D, E, F](append(other))
def ~[BB >: B, RB, C, D, E, F, G](other: ReductionRule1_6[BB, RB, C, D, E, F, G]) = new Rule7[A, RB, C, D, E, F, G](append(other))
def ~[C](other: Rule1[C]) = new Rule3[A, B, C](append(other))
def ~[C, D](other: Rule2[C, D]) = new Rule4[A, B, C, D](append(other))
def ~[C, D, E](other: Rule3[C, D, E]) = new Rule5[A, B, C, D, E](append(other))
def ~[C, D, E, F](other: Rule4[C, D, E, F]) = new Rule6[A, B, C, D, E, F](append(other))
def ~[C, D, E, F, G](other: Rule5[C, D, E, F, G]) = new Rule7[A, B, C, D, E, F, G](append(other))
def ~:>[R](f: Char => R) = new Rule3[A, B, R](append(push(exec(GetMatchedChar, f))))
def ~>>[R](f: IndexRange => R) = new Rule3[A, B, R](append(push(exec(GetMatchRange, f))))
def ~>[R](f: String => R) = new Rule3[A, B, R](append(push(exec(GetMatch, f))))
def ~~>[R](f: B => R) = new Rule2[A, R](append(push(exec(stack1(Pop), f))))
def ~~>[R](f: (A, B) => R) = new Rule1[R](append(push(exec(stack2(Pop), f))))
def ~~>[Z, R](f: (Z, A, B) => R) = new ReductionRule1[Z, R](append(push(exec(stack3(Pop), f))))
def ~~>[Y, Z, R](f: (Y, Z, A, B) => R) = new ReductionRule2[Y, Z, R](append(push(exec(stack4(Pop), f))))
def ~~>[X, Y, Z, R](f: (X, Y, Z, A, B) => R) = new ReductionRule3[X, Y, Z, R](append(push(exec(stack5(Pop), f))))
def ~~?(f: B => Boolean) = new Rule1[A](append(exec(stack1(Pop), f)))
def ~~?(f: (A, B) => Boolean) = new Rule0(append(exec(stack2(Pop), f)))
def ~~?[Z](f: (Z, A, B) => Boolean) = new PopRule1[Z](append(exec(stack3(Pop), f)))
def ~~?[Y, Z](f: (Y, Z, A, B) => Boolean) = new PopRule2[Y, Z](append(exec(stack4(Pop), f)))
def ~~?[X, Y, Z](f: (X, Y, Z, A, B) => Boolean) = new PopRule3[X, Y, Z](append(exec(stack5(Pop), f)))
def ~~%(f: B => Unit) = new Rule1[A](append(ok(exec(stack1(Pop), f))))
def ~~%(f: (A, B) => Unit) = new Rule0(append(ok(exec(stack2(Pop), f))))
def ~~%[Z](f: (Z, A, B) => Unit) = new PopRule1[Z](append(ok(exec(stack3(Pop), f))))
def ~~%[Y, Z](f: (Y, Z, A, B) => Unit) = new PopRule2[Y, Z](append(ok(exec(stack4(Pop), f))))
def ~~%[X, Y, Z](f: (X, Y, Z, A, B) => Unit) = new PopRule3[X, Y, Z](append(ok(exec(stack5(Pop), f))))
def ~~~>[R](f: B => R) = new Rule3[A, B, R](append(push(exec(stack1(Peek), f))))
def ~~~>[R](f: (A, B) => R) = new Rule3[A, B, R](append(push(exec(stack2(Peek), f))))
def ~~~?(f: B => Boolean) = withMatcher(append(exec(stack1(Peek), f)))
def ~~~?(f: (A, B) => Boolean) = withMatcher(append(exec(stack2(Peek), f)))
def ~~~%(f: B => Unit) = withMatcher(append(ok(exec(stack1(Peek), f))))
def ~~~%(f: (A, B) => Unit) = withMatcher(append(ok(exec(stack2(Peek), f))))
def |[AA >: A, BB >: B](other: Rule2[AA, BB]) = new Rule2[AA, BB](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new Rule2[A, B](matcher).asInstanceOf[this.type]
}
/**
* A rule pushing 3 new values of given types onto the parsers value stack.
*/
class Rule3[+A, +B, +C](val matcher: Matcher) extends PushRule {
def ~[AA >: A, BB >: B, CC >: C](other: PopRule3[AA, BB, CC]) = new Rule0(append(other))
def ~[BB >: B, CC >: C](other: PopRule2[BB, CC]) = new Rule1[A](append(other))
def ~[CC >: C](other: PopRule1[CC]) = new Rule2[A, B](append(other))
def ~[AA >: A, BB >: B, CC >: C, RA](other: ReductionRule3[AA, BB, CC, RA]) = new Rule1[RA](append(other))
def ~[AA >: A, BB >: B, CC >: C, RA, RB](other: ReductionRule3_2[AA, BB, CC, RA, RB]) = new Rule2[RA, RB](append(other))
def ~[AA >: A, BB >: B, CC >: C, RA, RB, RC](other: ReductionRule3_3[AA, BB, CC, RA, RB, RC]) = new Rule3[RA, RB, RC](append(other))
def ~[AA >: A, BB >: B, CC >: C, RA, RB, RC, D](other: ReductionRule3_4[AA, BB, CC, RA, RB, RC, D]) = new Rule4[RA, RB, RC, D](append(other))
def ~[AA >: A, BB >: B, CC >: C, RA, RB, RC, D, E](other: ReductionRule3_5[AA, BB, CC, RA, RB, RC, D, E]) = new Rule5[RA, RB, RC, D, E](append(other))
def ~[AA >: A, BB >: B, CC >: C, RA, RB, RC, D, E, F](other: ReductionRule3_6[AA, BB, CC, RA, RB, RC, D, E, F]) = new Rule6[RA, RB, RC, D, E, F](append(other))
def ~[AA >: A, BB >: B, CC >: C, RA, RB, RC, D, E, F, G](other: ReductionRule3_7[AA, BB, CC, RA, RB, RC, D, E, F, G]) = new Rule7[RA, RB, RC, D, E, F, G](append(other))
def ~[BB >: B, CC >: C, RB](other: ReductionRule2[BB, CC, RB]) = new Rule2[A, RB](append(other))
def ~[BB >: B, CC >: C, RB, RC](other: ReductionRule2_2[BB, CC, RB, RC]) = new Rule3[A, RB, RC](append(other))
def ~[BB >: B, CC >: C, RB, RC, D](other: ReductionRule2_3[BB, CC, RB, RC, D]) = new Rule4[A, RB, RC, D](append(other))
def ~[BB >: B, CC >: C, RB, RC, D, E](other: ReductionRule2_4[BB, CC, RB, RC, D, E]) = new Rule5[A, RB, RC, D, E](append(other))
def ~[BB >: B, CC >: C, RB, RC, D, E, F](other: ReductionRule2_5[BB, CC, RB, RC, D, E, F]) = new Rule6[A, RB, RC, D, E, F](append(other))
def ~[BB >: B, CC >: C, RB, RC, D, E, F, G](other: ReductionRule2_6[BB, CC, RB, RC, D, E, F, G]) = new Rule7[A, RB, RC, D, E, F, G](append(other))
def ~[CC >: C, RC](other: ReductionRule1[CC, RC]) = new Rule3[A, B, RC](append(other))
def ~[CC >: C, RC, D](other: ReductionRule1_2[CC, RC, D]) = new Rule4[A, B, RC, D](append(other))
def ~[CC >: C, RC, D, E](other: ReductionRule1_3[CC, RC, D, E]) = new Rule5[A, B, RC, D, E](append(other))
def ~[CC >: C, RC, D, E, F](other: ReductionRule1_4[CC, RC, D, E, F]) = new Rule6[A, B, RC, D, E, F](append(other))
def ~[CC >: C, RC, D, E, F, G](other: ReductionRule1_5[CC, RC, D, E, F, G]) = new Rule7[A, B, RC, D, E, F, G](append(other))
def ~[D](other: Rule1[D]) = new Rule4[A, B, C, D](append(other))
def ~[D, E](other: Rule2[D, E]) = new Rule5[A, B, C, D, E](append(other))
def ~[D, E, F](other: Rule3[D, E, F]) = new Rule6[A, B, C, D, E, F](append(other))
def ~[D, E, F, G](other: Rule4[D, E, F, G]) = new Rule7[A, B, C, D, E, F, G](append(other))
def ~:>[R](f: Char => R) = new Rule4[A, B, C, R](append(push(exec(GetMatchedChar, f))))
def ~>>[R](f: IndexRange => R) = new Rule4[A, B, C, R](append(push(exec(GetMatchRange, f))))
def ~>[R](f: String => R) = new Rule4[A, B, C, R](append(push(exec(GetMatch, f))))
def ~~>[R](f: C => R) = new Rule3[A, B, R](append(push(exec(stack1(Pop), f))))
def ~~>[R](f: (B, C) => R) = new Rule2[A, R](append(push(exec(stack2(Pop), f))))
def ~~>[R](f: (A, B, C) => R) = new Rule1[R](append(push(exec(stack3(Pop), f))))
def ~~>[Z, R](f: (Z, A, B, C) => R) = new ReductionRule1[Z, R](append(push(exec(stack4(Pop), f))))
def ~~>[Y, Z, R](f: (Y, Z, A, B, C) => R) = new ReductionRule2[Y, Z, R](append(push(exec(stack5(Pop), f))))
def ~~>[X, Y, Z, R](f: (X, Y, Z, A, B, C) => R) = new ReductionRule3[X, Y, Z, R](append(push(exec(stack6(Pop), f))))
def ~~?(f: C => Boolean) = new Rule2[A, B](append(exec(stack1(Pop), f)))
def ~~?(f: (B, C) => Boolean) = new Rule1[A](append(exec(stack2(Pop), f)))
def ~~?(f: (A, B, C) => Boolean) = new Rule0(append(exec(stack3(Pop), f)))
def ~~?[Z](f: (Z, A, B, C) => Boolean) = new PopRule1[Z](append(exec(stack4(Pop), f)))
def ~~?[Y, Z](f: (Y, Z, A, B, C) => Boolean) = new PopRule2[Y, Z](append(exec(stack5(Pop), f)))
def ~~?[X, Y, Z](f: (X, Y, Z, A, B, C) => Boolean) = new PopRule3[X, Y, Z](append(exec(stack6(Pop), f)))
def ~~%(f: C => Unit) = new Rule2[A, B](append(ok(exec(stack1(Pop), f))))
def ~~%(f: (B, C) => Unit) = new Rule1[A](append(ok(exec(stack2(Pop), f))))
def ~~%(f: (A, B, C) => Unit) = new Rule0(append(ok(exec(stack3(Pop), f))))
def ~~%[Z](f: (Z, A, B, C) => Unit) = new PopRule1[Z](append(ok(exec(stack4(Pop), f))))
def ~~%[Y, Z](f: (Y, Z, A, B, C) => Unit) = new PopRule2[Y, Z](append(ok(exec(stack5(Pop), f))))
def ~~%[X, Y, Z](f: (X, Y, Z, A, B, C) => Unit) = new PopRule3[X, Y, Z](append(ok(exec(stack6(Pop), f))))
def ~~~>[R](f: C => R) = new Rule4[A, B, C, R](append(push(exec(stack1(Peek), f))))
def ~~~>[R](f: (B, C) => R) = new Rule4[A, B, C, R](append(push(exec(stack2(Peek), f))))
def ~~~>[R](f: (A, B, C) => R) = new Rule4[A, B, C, R](append(push(exec(stack3(Peek), f))))
def ~~~?(f: C => Boolean) = withMatcher(append(exec(stack1(Peek), f)))
def ~~~?(f: (B, C) => Boolean) = withMatcher(append(exec(stack2(Peek), f)))
def ~~~?(f: (A, B, C) => Boolean) = withMatcher(append(exec(stack3(Peek), f)))
def ~~~%(f: C => Unit) = withMatcher(append(ok(exec(stack1(Peek), f))))
def ~~~%(f: (B, C) => Unit) = withMatcher(append(ok(exec(stack2(Peek), f))))
def ~~~%(f: (A, B, C) => Unit) = withMatcher(append(ok(exec(stack3(Peek), f))))
def |[AA >: A, BB >: B, CC >: C](other: Rule3[AA, BB, CC]) = new Rule3[AA, BB, CC](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new Rule3[A, B, C](matcher).asInstanceOf[this.type]
}
/**
* A rule pushing 4 new values of given types onto the parsers value stack.
*/
class Rule4[+A, +B, +C, +D](val matcher: Matcher) extends PushRule {
def ~[BB >: B, CC >: C, DD >: D](other: PopRule3[BB, CC, DD]) = new Rule1[A](append(other))
def ~[CC >: C, DD >: D](other: PopRule2[CC, DD]) = new Rule2[A, B](append(other))
def ~[DD >: D](other: PopRule1[DD]) = new Rule3[A, B, C](append(other))
def ~[BB >: B, CC >: C, DD >: D, RB](other: ReductionRule3[BB, CC, DD, RB]) = new Rule2[A, RB](append(other))
def ~[BB >: B, CC >: C, DD >: D, RB, RC](other: ReductionRule3_2[BB, CC, DD, RB, RC]) = new Rule3[A, RB, RC](append(other))
def ~[BB >: B, CC >: C, DD >: D, RB, RC, RD](other: ReductionRule3_3[BB, CC, DD, RB, RC, RD]) = new Rule4[A, RB, RC, RD](append(other))
def ~[BB >: B, CC >: C, DD >: D, RB, RC, RD, E](other: ReductionRule3_4[BB, CC, DD, RB, RC, RD, E]) = new Rule5[A, RB, RC, RD, E](append(other))
def ~[BB >: B, CC >: C, DD >: D, RB, RC, RD, E, F](other: ReductionRule3_5[BB, CC, DD, RB, RC, RD, E, F]) = new Rule6[A, RB, RC, RD, E, F](append(other))
def ~[BB >: B, CC >: C, DD >: D, RB, RC, RD, E, F, G](other: ReductionRule3_6[BB, CC, DD, RB, RC, RD, E, F, G]) = new Rule7[A, RB, RC, RD, E, F, G](append(other))
def ~[CC >: C, DD >: D, RC](other: ReductionRule2[CC, DD, RC]) = new Rule3[A, B, RC](append(other))
def ~[CC >: C, DD >: D, RC, RD](other: ReductionRule2_2[CC, DD, RC, RD]) = new Rule4[A, B, RC, RD](append(other))
def ~[CC >: C, DD >: D, RC, RD, E](other: ReductionRule2_3[CC, DD, RC, RD, E]) = new Rule5[A, B, RC, RD, E](append(other))
def ~[CC >: C, DD >: D, RC, RD, E, F](other: ReductionRule2_4[CC, DD, RC, RD, E, F]) = new Rule6[A, B, RC, RD, E, F](append(other))
def ~[CC >: C, DD >: D, RC, RD, E, F, G](other: ReductionRule2_5[CC, DD, RC, RD, E, F, G]) = new Rule7[A, B, RC, RD, E, F, G](append(other))
def ~[DD >: D, RD](other: ReductionRule1[DD, RD]) = new Rule4[A, B, C, RD](append(other))
def ~[DD >: D, RD, E](other: ReductionRule1_2[DD, RD, E]) = new Rule5[A, B, C, RD, E](append(other))
def ~[DD >: D, RD, E, F](other: ReductionRule1_3[DD, RD, E, F]) = new Rule6[A, B, C, RD, E, F](append(other))
def ~[DD >: D, RD, E, F, G](other: ReductionRule1_4[DD, RD, E, F, G]) = new Rule7[A, B, C, RD, E, F, G](append(other))
def ~[E](other: Rule1[E]) = new Rule5[A, B, C, D, E](append(other))
def ~[E, F](other: Rule2[E, F]) = new Rule6[A, B, C, D, E, F](append(other))
def ~[E, F, G](other: Rule3[E, F, G]) = new Rule7[A, B, C, D, E, F, G](append(other))
def ~:>[R](f: Char => R) = new Rule5[A, B, C, D, R](append(push(exec(GetMatchedChar, f))))
def ~>>[R](f: IndexRange => R) = new Rule5[A, B, C, D, R](append(push(exec(GetMatchRange, f))))
def ~>[R](f: String => R) = new Rule5[A, B, C, D, R](append(push(exec(GetMatch, f))))
def ~~>[R](f: D => R) = new Rule4[A, B, C, R](append(push(exec(stack1(Pop), f))))
def ~~>[R](f: (C, D) => R) = new Rule3[A, B, R](append(push(exec(stack2(Pop), f))))
def ~~>[R](f: (B, C, D) => R) = new Rule2[A, R](append(push(exec(stack3(Pop), f))))
def ~~>[R](f: (A, B, C, D) => R) = new Rule1[R](append(push(exec(stack4(Pop), f))))
def ~~>[Z, R](f: (Z, A, B, C, D) => R) = new ReductionRule1[Z, R](append(push(exec(stack5(Pop), f))))
def ~~>[Y, Z, R](f: (Y, Z, A, B, C, D) => R) = new ReductionRule2[Y, Z, R](append(push(exec(stack6(Pop), f))))
def ~~>[X, Y, Z, R](f: (X, Y, Z, A, B, C, D) => R) = new ReductionRule3[X, Y, Z, R](append(push(exec(stack7(Pop), f))))
def ~~?(f: D => Boolean) = new Rule3[A, B, C](append(exec(stack1(Pop), f)))
def ~~?(f: (C, D) => Boolean) = new Rule2[A, B](append(exec(stack2(Pop), f)))
def ~~?(f: (B, C, D) => Boolean) = new Rule1[A](append(exec(stack3(Pop), f)))
def ~~?(f: (A, B, C, D) => Boolean) = new Rule0(append(exec(stack4(Pop), f)))
def ~~?[Z](f: (Z, A, B, C, D) => Boolean) = new PopRule1[Z](append(exec(stack5(Pop), f)))
def ~~?[Y, Z](f: (Y, Z, A, B, C, D) => Boolean) = new PopRule2[Y, Z](append(exec(stack6(Pop), f)))
def ~~?[X, Y, Z](f: (X, Y, Z, A, B, C, D) => Boolean) = new PopRule3[X, Y, Z](append(exec(stack7(Pop), f)))
def ~~%(f: D => Unit) = new Rule3[A, B, C](append(ok(exec(stack1(Pop), f))))
def ~~%(f: (C, D) => Unit) = new Rule2[A, B](append(ok(exec(stack2(Pop), f))))
def ~~%(f: (B, C, D) => Unit) = new Rule1[A](append(ok(exec(stack3(Pop), f))))
def ~~%(f: (A, B, C, D) => Unit) = new Rule0(append(ok(exec(stack4(Pop), f))))
def ~~%[Z](f: (Z, A, B, C, D) => Unit) = new PopRule1[Z](append(ok(exec(stack5(Pop), f))))
def ~~%[Y, Z](f: (Y, Z, A, B, C, D) => Unit) = new PopRule2[Y, Z](append(ok(exec(stack6(Pop), f))))
def ~~%[X, Y, Z](f: (X, Y, Z, A, B, C, D) => Unit) = new PopRule3[X, Y, Z](append(ok(exec(stack7(Pop), f))))
def ~~~>[R](f: D => R) = new Rule5[A, B, C, D, R](append(push(exec(stack1(Peek), f))))
def ~~~>[R](f: (C, D) => R) = new Rule5[A, B, C, D, R](append(push(exec(stack2(Peek), f))))
def ~~~>[R](f: (B, C, D) => R) = new Rule5[A, B, C, D, R](append(push(exec(stack3(Peek), f))))
def ~~~>[R](f: (A, B, C, D) => R) = new Rule5[A, B, C, D, R](append(push(exec(stack4(Peek), f))))
def ~~~?(f: D => Boolean) = withMatcher(append(exec(stack1(Peek), f)))
def ~~~?(f: (C, D) => Boolean) = withMatcher(append(exec(stack2(Peek), f)))
def ~~~?(f: (B, C, D) => Boolean) = withMatcher(append(exec(stack3(Peek), f)))
def ~~~?(f: (A, B, C, D) => Boolean) = withMatcher(append(exec(stack4(Peek), f)))
def ~~~%(f: D => Unit) = withMatcher(append(ok(exec(stack1(Peek), f))))
def ~~~%(f: (C, D) => Unit) = withMatcher(append(ok(exec(stack2(Peek), f))))
def ~~~%(f: (B, C, D) => Unit) = withMatcher(append(ok(exec(stack3(Peek), f))))
def ~~~%(f: (A, B, C, D) => Unit) = withMatcher(append(ok(exec(stack4(Peek), f))))
def |[AA >: A, BB >: B, CC >: C, DD >: D](other: Rule4[AA, BB, CC, DD]) = new Rule4[AA, BB, CC, DD](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new Rule4[A, B, C, D](matcher).asInstanceOf[this.type]
}
/**
* A rule pushing 5 new values of given types onto the parsers value stack.
*/
class Rule5[+A, +B, +C, +D, +E](val matcher: Matcher) extends PushRule {
def ~[CC >: C, DD >: D, EE >: E](other: PopRule3[CC, DD, EE]) = new Rule2[A, B](append(other))
def ~[DD >: D, EE >: E](other: PopRule2[DD, EE]) = new Rule3[A, B, C](append(other))
def ~[EE >: E](other: PopRule1[EE]) = new Rule4[A, B, C, D](append(other))
def ~[CC >: C, DD >: D, EE >: E, RC](other: ReductionRule3[CC, DD, EE, RC]) = new Rule3[A, B, RC](append(other))
def ~[CC >: C, DD >: D, EE >: E, RC, RD](other: ReductionRule3_2[CC, DD, EE, RC, RD]) = new Rule4[A, B, RC, RD](append(other))
def ~[CC >: C, DD >: D, EE >: E, RC, RD, RE](other: ReductionRule3_3[CC, DD, EE, RC, RD, RE]) = new Rule5[A, B, RC, RD, RE](append(other))
def ~[CC >: C, DD >: D, EE >: E, RC, RD, RE, F](other: ReductionRule3_4[CC, DD, EE, RC, RD, RE, F]) = new Rule6[A, B, RC, RD, RE, F](append(other))
def ~[CC >: C, DD >: D, EE >: E, RC, RD, RE, F, G](other: ReductionRule3_5[CC, DD, EE, RC, RD, RE, F, G]) = new Rule7[A, B, RC, RD, RE, F, G](append(other))
def ~[DD >: D, EE >: E, RD](other: ReductionRule2[DD, EE, RD]) = new Rule4[A, B, C, RD](append(other))
def ~[DD >: D, EE >: E, RD, RE](other: ReductionRule2_2[DD, EE, RD, RE]) = new Rule5[A, B, C, RD, RE](append(other))
def ~[DD >: D, EE >: E, RD, RE, F](other: ReductionRule2_3[DD, EE, RD, RE, F]) = new Rule6[A, B, C, RD, RE, F](append(other))
def ~[DD >: D, EE >: E, RD, RE, F, G](other: ReductionRule2_4[DD, EE, RD, RE, F, G]) = new Rule7[A, B, C, RD, RE, F, G](append(other))
def ~[EE >: E, RE](other: ReductionRule1[EE, RE]) = new Rule5[A, B, C, D, RE](append(other))
def ~[EE >: E, RE, F](other: ReductionRule1_2[EE, RE, F]) = new Rule6[A, B, C, D, RE, F](append(other))
def ~[EE >: E, RE, F, G](other: ReductionRule1_3[EE, RE, F, G]) = new Rule7[A, B, C, D, RE, F, G](append(other))
def ~[F](other: Rule1[F]) = new Rule6[A, B, C, D, E, F](append(other))
def ~[F, G](other: Rule2[F, G]) = new Rule7[A, B, C, D, E, F, G](append(other))
def ~:>[R](f: Char => R) = new Rule6[A, B, C, D, E, R](append(push(exec(GetMatchedChar, f))))
def ~>>[R](f: IndexRange => R) = new Rule6[A, B, C, D, E, R](append(push(exec(GetMatchRange, f))))
def ~>[R](f: String => R) = new Rule6[A, B, C, D, E, R](append(push(exec(GetMatch, f))))
def ~~>[R](f: E => R) = new Rule5[A, B, C, D, R](append(push(exec(stack1(Pop), f))))
def ~~>[R](f: (D, E) => R) = new Rule4[A, B, C, R](append(push(exec(stack2(Pop), f))))
def ~~>[R](f: (C, D, E) => R) = new Rule3[A, B, R](append(push(exec(stack3(Pop), f))))
def ~~>[R](f: (B, C, D, E) => R) = new Rule2[A, R](append(push(exec(stack4(Pop), f))))
def ~~>[R](f: (A, B, C, D, E) => R) = new Rule1[R](append(push(exec(stack5(Pop), f))))
def ~~>[Z, R](f: (Z, A, B, C, D, E) => R) = new ReductionRule1[Z, R](append(push(exec(stack6(Pop), f))))
def ~~>[Y, Z, R](f: (Y, Z, A, B, C, D, E) => R) = new ReductionRule2[Y, Z, R](append(push(exec(stack7(Pop), f))))
def ~~?(f: E => Boolean) = new Rule4[A, B, C, D](append(exec(stack1(Pop), f)))
def ~~?(f: (D, E) => Boolean) = new Rule3[A, B, C](append(exec(stack2(Pop), f)))
def ~~?(f: (C, D, E) => Boolean) = new Rule2[A, B](append(exec(stack3(Pop), f)))
def ~~?(f: (B, C, D, E) => Boolean) = new Rule1[A](append(exec(stack4(Pop), f)))
def ~~?(f: (A, B, C, D, E) => Boolean) = new Rule0(append(exec(stack5(Pop), f)))
def ~~?[Z](f: (Z, A, B, C, D, E) => Boolean) = new PopRule1[Z](append(exec(stack6(Pop), f)))
def ~~?[Y, Z](f: (Y, Z, A, B, C, D, E) => Boolean) = new PopRule2[Y, Z](append(exec(stack7(Pop), f)))
def ~~%(f: E => Unit) = new Rule4[A, B, C, D](append(ok(exec(stack1(Pop), f))))
def ~~%(f: (D, E) => Unit) = new Rule3[A, B, C](append(ok(exec(stack2(Pop), f))))
def ~~%(f: (C, D, E) => Unit) = new Rule2[A, B](append(ok(exec(stack3(Pop), f))))
def ~~%(f: (B, C, D, E) => Unit) = new Rule1[A](append(ok(exec(stack4(Pop), f))))
def ~~%(f: (A, B, C, D, E) => Unit) = new Rule0(append(ok(exec(stack5(Pop), f))))
def ~~%[Z](f: (Z, A, B, C, D, E) => Unit) = new PopRule1[Z](append(ok(exec(stack6(Pop), f))))
def ~~%[Y, Z](f: (Y, Z, A, B, C, D, E) => Unit) = new PopRule2[Y, Z](append(ok(exec(stack7(Pop), f))))
def ~~~>[R](f: E => R) = new Rule6[A, B, C, D, E, R](append(push(exec(stack1(Peek), f))))
def ~~~>[R](f: (D, E) => R) = new Rule6[A, B, C, D, E, R](append(push(exec(stack2(Peek), f))))
def ~~~>[R](f: (C, D, E) => R) = new Rule6[A, B, C, D, E, R](append(push(exec(stack3(Peek), f))))
def ~~~>[R](f: (B, C, D, E) => R) = new Rule6[A, B, C, D, E, R](append(push(exec(stack4(Peek), f))))
def ~~~>[R](f: (A, B, C, D, E) => R) = new Rule6[A, B, C, D, E, R](append(push(exec(stack5(Peek), f))))
def ~~~?(f: E => Boolean) = withMatcher(append(exec(stack1(Peek), f)))
def ~~~?(f: (D, E) => Boolean) = withMatcher(append(exec(stack2(Peek), f)))
def ~~~?(f: (C, D, E) => Boolean) = withMatcher(append(exec(stack3(Peek), f)))
def ~~~?(f: (B, C, D, E) => Boolean) = withMatcher(append(exec(stack4(Peek), f)))
def ~~~?(f: (A, B, C, D, E) => Boolean) = withMatcher(append(exec(stack5(Peek), f)))
def ~~~%(f: E => Unit) = withMatcher(append(ok(exec(stack1(Peek), f))))
def ~~~%(f: (D, E) => Unit) = withMatcher(append(ok(exec(stack2(Peek), f))))
def ~~~%(f: (C, D, E) => Unit) = withMatcher(append(ok(exec(stack3(Peek), f))))
def ~~~%(f: (B, C, D, E) => Unit) = withMatcher(append(ok(exec(stack4(Peek), f))))
def ~~~%(f: (A, B, C, D, E) => Unit) = withMatcher(append(ok(exec(stack5(Peek), f))))
def |[AA >: A, BB >: B, CC >: C, DD >: D, EE >: E](other: Rule5[AA, BB, CC, DD, EE]) = new Rule5[AA, BB, CC, DD, EE](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new Rule5[A, B, C, D, E](matcher).asInstanceOf[this.type]
}
/**
* A rule pushing 6 new values of given types onto the parsers value stack.
*/
class Rule6[+A, +B, +C, +D, +E, +F](val matcher: Matcher) extends PushRule {
def ~[DD >: D, EE >: E, FF >: F](other: PopRule3[DD, EE, FF]) = new Rule3[A, B, C](append(other))
def ~[EE >: E, FF >: F](other: PopRule2[EE, FF]) = new Rule4[A, B, C, D](append(other))
def ~[FF >: F](other: PopRule1[FF]) = new Rule5[A, B, C, D, E](append(other))
def ~[DD >: D, EE >: E, FF >: F, RD](other: ReductionRule3[DD, EE, FF, RD]) = new Rule4[A, B, C, RD](append(other))
def ~[DD >: D, EE >: E, FF >: F, RD, RE](other: ReductionRule3_2[DD, EE, FF, RD, RE]) = new Rule5[A, B, C, RD, RE](append(other))
def ~[DD >: D, EE >: E, FF >: F, RD, RE, RF](other: ReductionRule3_3[DD, EE, FF, RD, RE, RF]) = new Rule6[A, B, C, RD, RE, RF](append(other))
def ~[DD >: D, EE >: E, FF >: F, RD, RE, RF, G](other: ReductionRule3_4[DD, EE, FF, RD, RE, RF, G]) = new Rule7[A, B, C, RD, RE, RF, G](append(other))
def ~[EE >: E, FF >: F, RE](other: ReductionRule2[EE, FF, RE]) = new Rule5[A, B, C, D, RE](append(other))
def ~[EE >: E, FF >: F, RE, RF](other: ReductionRule2_2[EE, FF, RE, RF]) = new Rule6[A, B, C, D, RE, RF](append(other))
def ~[EE >: E, FF >: F, RE, RF, G](other: ReductionRule2_3[EE, FF, RE, RF, G]) = new Rule7[A, B, C, D, RE, RF, G](append(other))
def ~[FF >: F, RF](other: ReductionRule1[FF, RF]) = new Rule6[A, B, C, D, E, RF](append(other))
def ~[FF >: F, RF, G](other: ReductionRule1_2[FF, RF, G]) = new Rule7[A, B, C, D, E, RF, G](append(other))
def ~[G](other: Rule1[G]) = new Rule7[A, B, C, D, E, F, G](append(other))
def ~:>[R](f: Char => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(GetMatchedChar, f))))
def ~>>[R](f: IndexRange => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(GetMatchRange, f))))
def ~>[R](f: String => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(GetMatch, f))))
def ~~>[R](f: E => R) = new Rule6[A, B, C, D, E, R](append(push(exec(stack1(Pop), f))))
def ~~>[R](f: (E, F) => R) = new Rule5[A, B, C, D, R](append(push(exec(stack2(Pop), f))))
def ~~>[R](f: (D, E, F) => R) = new Rule4[A, B, C, R](append(push(exec(stack3(Pop), f))))
def ~~>[R](f: (C, D, E, F) => R) = new Rule3[A, B, R](append(push(exec(stack4(Pop), f))))
def ~~>[R](f: (B, C, D, E, F) => R) = new Rule2[A, R](append(push(exec(stack5(Pop), f))))
def ~~>[R](f: (A, B, C, D, E, F) => R) = new Rule1[R](append(push(exec(stack6(Pop), f))))
def ~~>[Z, R](f: (Z, A, B, C, D, E, F) => R) = new ReductionRule1[Z, R](append(push(exec(stack7(Pop), f))))
def ~~?(f: F => Boolean) = new Rule5[A, B, C, D, E](append(exec(stack1(Pop), f)))
def ~~?(f: (E, F) => Boolean) = new Rule4[A, B, C, D](append(exec(stack2(Pop), f)))
def ~~?(f: (D, E, F) => Boolean) = new Rule3[A, B, C](append(exec(stack3(Pop), f)))
def ~~?(f: (C, D, E, F) => Boolean) = new Rule2[A, B](append(exec(stack4(Pop), f)))
def ~~?(f: (B, C, D, E, F) => Boolean) = new Rule1[A](append(exec(stack5(Pop), f)))
def ~~?(f: (A, B, C, D, E, F) => Boolean) = new Rule0(append(exec(stack6(Pop), f)))
def ~~?[Z](f: (Z, A, B, C, D, E, F) => Boolean) = new PopRule1[Z](append(exec(stack7(Pop), f)))
def ~~%(f: F => Unit) = new Rule5[A, B, C, D, E](append(ok(exec(stack1(Pop), f))))
def ~~%(f: (E, F) => Unit) = new Rule4[A, B, C, D](append(ok(exec(stack2(Pop), f))))
def ~~%(f: (D, E, F) => Unit) = new Rule3[A, B, C](append(ok(exec(stack3(Pop), f))))
def ~~%(f: (C, D, E, F) => Unit) = new Rule2[A, B](append(ok(exec(stack4(Pop), f))))
def ~~%(f: (B, C, D, E, F) => Unit) = new Rule1[A](append(ok(exec(stack5(Pop), f))))
def ~~%(f: (A, B, C, D, E, F) => Unit) = new Rule0(append(ok(exec(stack6(Pop), f))))
def ~~%[Z](f: (Z, A, B, C, D, E, F) => Unit) = new PopRule1[Z](append(ok(exec(stack7(Pop), f))))
def ~~~>[R](f: F => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(stack1(Peek), f))))
def ~~~>[R](f: (E, F) => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(stack2(Peek), f))))
def ~~~>[R](f: (D, E, F) => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(stack3(Peek), f))))
def ~~~>[R](f: (C, D, E, F) => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(stack4(Peek), f))))
def ~~~>[R](f: (B, C, D, E, F) => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(stack5(Peek), f))))
def ~~~>[R](f: (A, B, C, D, E, F) => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(stack6(Peek), f))))
def ~~~?(f: F => Boolean) = withMatcher(append(exec(stack1(Peek), f)))
def ~~~?(f: (E, F) => Boolean) = withMatcher(append(exec(stack2(Peek), f)))
def ~~~?(f: (D, E, F) => Boolean) = withMatcher(append(exec(stack3(Peek), f)))
def ~~~?(f: (C, D, E, F) => Boolean) = withMatcher(append(exec(stack4(Peek), f)))
def ~~~?(f: (B, C, D, E, F) => Boolean) = withMatcher(append(exec(stack5(Peek), f)))
def ~~~?(f: (A, B, C, D, E, F) => Boolean) = withMatcher(append(exec(stack6(Peek), f)))
def ~~~%(f: F => Unit) = withMatcher(append(ok(exec(stack1(Peek), f))))
def ~~~%(f: (E, F) => Unit) = withMatcher(append(ok(exec(stack2(Peek), f))))
def ~~~%(f: (D, E, F) => Unit) = withMatcher(append(ok(exec(stack3(Peek), f))))
def ~~~%(f: (C, D, E, F) => Unit) = withMatcher(append(ok(exec(stack4(Peek), f))))
def ~~~%(f: (B, C, D, E, F) => Unit) = withMatcher(append(ok(exec(stack5(Peek), f))))
def ~~~%(f: (A, B, C, D, E, F) => Unit) = withMatcher(append(ok(exec(stack6(Peek), f))))
def |[AA >: A, BB >: B, CC >: C, DD >: D, EE >: E, FF >: F](other: Rule6[AA, BB, CC, DD, EE, FF]) = new Rule6[AA, BB, CC, DD, EE, FF](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new Rule6[A, B, C, D, E, F](matcher).asInstanceOf[this.type]
}
/**
* A rule pushing 7 new values of given types onto the parsers value stack.
*/
class Rule7[+A, +B, +C, +D, +E, +F, +G](val matcher: Matcher) extends PushRule {
def ~[EE >: E, FF >: F, GG >: G](other: PopRule3[EE, FF, GG]) = new Rule4[A, B, C, F](append(other))
def ~[FF >: F, GG >: G](other: PopRule2[FF, GG]) = new Rule5[A, B, C, D, F](append(other))
def ~[GG >: G](other: PopRule1[GG]) = new Rule6[A, B, C, D, E, F](append(other))
def ~[EE >: E, FF >: F, GG >: G, RE](other: ReductionRule3[EE, FF, GG, RE]) = new Rule5[A, B, C, D, RE](append(other))
def ~[EE >: E, FF >: F, GG >: G, RE, RF](other: ReductionRule3_2[EE, FF, GG, RE, RF]) = new Rule6[A, B, C, D, RE, RF](append(other))
def ~[EE >: E, FF >: F, GG >: G, RE, RF, RG](other: ReductionRule3_3[EE, FF, GG, RE, RF, RG]) = new Rule7[A, B, C, D, RE, RF, RG](append(other))
def ~[FF >: F, GG >: G, RF](other: ReductionRule2[FF, GG, RF]) = new Rule6[A, B, C, D, E, RF](append(other))
def ~[FF >: F, GG >: G, RF, RG](other: ReductionRule2_2[FF, GG, RF, RG]) = new Rule7[A, B, C, D, E, RF, RG](append(other))
def ~[GG >: G, RG](other: ReductionRule1[GG, RG]) = new Rule7[A, B, C, D, E, F, RG](append(other))
def ~~>[R](f: G => R) = new Rule7[A, B, C, D, E, F, R](append(push(exec(stack1(Pop), f))))
def ~~>[R](f: (F, G) => R) = new Rule6[A, B, C, D, E, R](append(push(exec(stack2(Pop), f))))
def ~~>[R](f: (E, F, G) => R) = new Rule5[A, B, C, D, R](append(push(exec(stack3(Pop), f))))
def ~~>[R](f: (D, E, F, G) => R) = new Rule4[A, B, C, R](append(push(exec(stack4(Pop), f))))
def ~~>[R](f: (C, D, E, F, G) => R) = new Rule3[A, B, R](append(push(exec(stack5(Pop), f))))
def ~~>[R](f: (B, C, D, E, F, G) => R) = new Rule2[A, R](append(push(exec(stack6(Pop), f))))
def ~~>[R](f: (A, B, C, D, E, F, G) => R) = new Rule1[R](append(push(exec(stack7(Pop), f))))
def ~~?(f: G => Boolean) = new Rule6[A, B, C, D, E, F](append(exec(stack1(Pop), f)))
def ~~?(f: (F, G) => Boolean) = new Rule5[A, B, C, D, E](append(exec(stack2(Pop), f)))
def ~~?(f: (E, F, G) => Boolean) = new Rule4[A, B, C, D](append(exec(stack3(Pop), f)))
def ~~?(f: (D, E, F, G) => Boolean) = new Rule3[A, B, C](append(exec(stack4(Pop), f)))
def ~~?(f: (C, D, E, F, G) => Boolean) = new Rule2[A, B](append(exec(stack5(Pop), f)))
def ~~?(f: (B, C, D, E, F, G) => Boolean) = new Rule1[A](append(exec(stack6(Pop), f)))
def ~~?(f: (A, B, C, D, E, F, G) => Boolean) = new Rule0(append(exec(stack7(Pop), f)))
def ~~%(f: G => Unit) = new Rule6[A, B, C, D, E, F](append(ok(exec(stack1(Pop), f))))
def ~~%(f: (F, G) => Unit) = new Rule5[A, B, C, D, E](append(ok(exec(stack2(Pop), f))))
def ~~%(f: (E, F, G) => Unit) = new Rule4[A, B, C, D](append(ok(exec(stack3(Pop), f))))
def ~~%(f: (D, E, F, G) => Unit) = new Rule3[A, B, C](append(ok(exec(stack4(Pop), f))))
def ~~%(f: (C, D, E, F, G) => Unit) = new Rule2[A, B](append(ok(exec(stack5(Pop), f))))
def ~~%(f: (B, C, D, E, F, G) => Unit) = new Rule1[A](append(ok(exec(stack6(Pop), f))))
def ~~%(f: (A, B, C, D, E, F, G) => Unit) = new Rule0(append(ok(exec(stack7(Pop), f))))
def ~~~?(f: G => Boolean) = withMatcher(append(exec(stack1(Peek), f)))
def ~~~?(f: (F, G) => Boolean) = withMatcher(append(exec(stack2(Peek), f)))
def ~~~?(f: (E, F, G) => Boolean) = withMatcher(append(exec(stack3(Peek), f)))
def ~~~?(f: (D, E, F, G) => Boolean) = withMatcher(append(exec(stack4(Peek), f)))
def ~~~?(f: (C, D, E, F, G) => Boolean) = withMatcher(append(exec(stack5(Peek), f)))
def ~~~?(f: (B, C, D, E, F, G) => Boolean) = withMatcher(append(exec(stack6(Peek), f)))
def ~~~?(f: (A, B, C, D, E, F, G) => Boolean) = withMatcher(append(exec(stack7(Peek), f)))
def ~~~%(f: G => Unit) = withMatcher(append(ok(exec(stack1(Peek), f))))
def ~~~%(f: (F, G) => Unit) = withMatcher(append(ok(exec(stack2(Peek), f))))
def ~~~%(f: (E, F, G) => Unit) = withMatcher(append(ok(exec(stack3(Peek), f))))
def ~~~%(f: (D, E, F, G) => Unit) = withMatcher(append(ok(exec(stack4(Peek), f))))
def ~~~%(f: (C, D, E, F, G) => Unit) = withMatcher(append(ok(exec(stack5(Peek), f))))
def ~~~%(f: (B, C, D, E, F, G) => Unit) = withMatcher(append(ok(exec(stack6(Peek), f))))
def ~~~%(f: (A, B, C, D, E, F, G) => Unit) = withMatcher(append(ok(exec(stack7(Peek), f))))
def |[AA >: A, BB >: B, CC >: C, DD >: D, EE >: E, FF >: F, GG >: G](other: Rule7[AA, BB, CC, DD, EE, FF, GG]) = new Rule7[AA, BB, CC, DD, EE, FF, GG](appendChoice(other))
protected def withMatcher(matcher: Matcher) = new Rule7[A, B, C, D, E, F, G](matcher).asInstanceOf[this.type]
}
|
sirthias/parboiled
|
parboiled-scala/src/main/scala/org/parboiled/scala/rules/PushRule.scala
|
Scala
|
apache-2.0
| 39,836 |
package paint
import core.host.{Host, HostPool}
import core.spatial.{Viewable, Zone}
import play.api.libs.json.Json
import scala.collection.mutable.ListBuffer
import scala.util.Random
class PaintHost(zone: Zone) extends Host( zone) {
val id2cameraNotifier = collection.mutable.HashMap[String, String]()
var elements = List[Point]() //TODO try to work with ListBuffer instead ?
override def clientInput(id :String, data: String) = {
val json = Json.parse(data)
val x = (json \\ "mousePos" \\ "x").get.as[Double]
val y = (json \\ "mousePos" \\ "y").get.as[Double]
val order = (json \\ "order").get.as[Int]
val color = (json \\ "c").get.as[Array[Int]]
val thickness = (json \\ "t").get.as[Int]
val cut = (json \\ "cut").get.as[Int]
val erasing = (json \\ "erase").get.as[Int]
if (erasing == 0) {
val point = new Point(id, x, y, order, color, thickness, cut)
elements = point :: elements
}
else {
// if the space covered by the point (x,y,thickness values) is over the center of an existing point and the
// client asking for the deletion if the same as the one which drew the existing point, then delete it;
// if the previous element in the line is in this host, set its "cut" attribute to 2 (right cut), else set "cut"
// to 1 (left cut) in the next element
elements.foreach(p => if((id == p.id) && (x - thickness < p.x) && (p.x < x + thickness) && (y - thickness < p.y)
&& (p.y < y + thickness)) {
var next = elements.find(np => np.order == p.order + 1) match {
case Some(nextP) => nextP.cut = 1
case None => var previous = elements.find(pp => pp.order == p.order - 1) match {
case Some(prevP) => prevP.cut = 2
case None =>
}
}
})
elements = elements.filterNot(p => (id == p.id) && (x - thickness < p.x) && (p.x < x + thickness)
&& (y - thickness < p.y) && (p.y < y + thickness))
}
HostPool[PaintHost,PaintHostObserver].hostObserver.call(ho => ho.associateToClientView(id, x, y))
}
def getViewableFromZone(id: String,zone : Zone) = {
elements
}
}
|
DeltaIMT/Delta
|
paint_server/src/main/scala/paint/PaintHost.scala
|
Scala
|
mit
| 2,177 |
package ru.primetalk.synapse.core.components
import ru.primetalk.synapse.core.dsl.ContactsDsl
/**
* @author zhizhelev, 05.04.15.
*/
trait RttiContactExt extends ContactsDsl {
/** Contact with runtime type information.
* Can be used for static analysis of the system. At least checking Nop links to connect compatible contacts.
*/
//TODO: move type information to extension
class RttiContact[T](name1: String = null)(implicit val classTag:scala.reflect.ClassTag[T]) extends Contact[T](name1)
}
|
ppopoff/SynapseGrid
|
synapse-grid-core/src/main/scala/ru/primetalk/synapse/core/components/RttiContactExt.scala
|
Scala
|
bsd-2-clause
| 512 |
package com.ajjpj.adiagram_.ui.fw
import javafx.beans.property.SimpleStringProperty
import org.scalatest.{FunSuite, Matchers}
/**
* @author arno
*/
class DigestTest extends FunSuite with Matchers {
// test("bind target function") {
// val digest = new Digest()
//
// var source = "a"
// var target = "b"
//
// digest.bind((x: String) => target=x, source)
//
// target should be ("a")
//
// digest.execute {
// source = "x"
// }
//
// target should be ("x")
// }
//
// test("unbind target function") {
// val digest = new Digest()
//
// var source = "a"
// var target = "b"
//
// val setter = (x: String) => target=x
// digest.bind(setter, source)
//
// target should be ("a")
//
// intercept[IllegalArgumentException] {
// digest.unbind((x: String) => target=x) // in order to unbind, you must pass in the *same* function as to 'bind'!
// }
//
// digest.unbind(setter)
// digest.execute {
// source = "x"
// }
//
// target should be ("a")
// }
//
// test("bind property") {
// val digest = new Digest()
//
// val prop = new SimpleStringProperty()
// var value = "a"
//
// digest.bind(prop, value)
// prop.getValue should equal ("a")
//
// digest.execute {
// value = "b"
// }
//
// prop.getValue should equal ("b")
// }
//
// test("unbind property") {
// val digest = new Digest()
//
// val prop = new SimpleStringProperty()
// var value = "a"
//
// digest.bind(prop, value)
// prop.getValue should equal ("a")
//
// digest.unbind(prop)
//
// digest.execute {
// value = "b"
// }
//
// prop.getValue should equal ("a")
// }
//
// test("watch") {
// val digest = new Digest()
//
// var a = 1
// var changeCounter = 0
//
// val onChange = (i: Int) => {changeCounter += 1}
//
// digest.watch(a, onChange)
//
// a=2
// changeCounter should equal (0)
//
// digest.execute {
// a=3
// }
// changeCounter should equal (1)
//
// digest.execute {
// }
// changeCounter should equal (1)
// }
//
// test("check if property isBound") {
// val digest = new Digest()
// val prop = new SimpleStringProperty()
//
// digest.isBound(prop) should be (false)
//
// var value = "a"
// digest.bind(prop, value)
//
// digest.isBound(prop) should be (true)
// }
}
|
arnohaase/a-diagram
|
src/test/scala/com/ajjpj/adiagram_/ui/fw/DigestTest.scala
|
Scala
|
apache-2.0
| 2,536 |
package org.scalajs.openui5.sap.ui.layout
import org.scalajs.openui5.sap.ui.core._
import org.scalajs.openui5.util.{Settings, SettingsMap, noSettings}
import scala.scalajs.js
import scala.scalajs.js.annotation.{ScalaJSDefined, JSName}
@ScalaJSDefined
trait BlockLayoutCellSettings extends ControlSettings
object BlockLayoutCellSettings extends BlockLayoutCellSettingsBuilder(noSettings)
class BlockLayoutCellSettingsBuilder(val dict: SettingsMap)
extends Settings[BlockLayoutCellSettings, BlockLayoutCellSettingsBuilder](new BlockLayoutCellSettingsBuilder(_))
with BlockLayoutCellSetters[BlockLayoutCellSettings, BlockLayoutCellSettingsBuilder]
trait BlockLayoutCellSetters[T <: js.Object, B <: Settings[T,_]] extends ControlSetters[T, B] {
def title(v: String) = setting("title", v)
def titleAlignment(v: HorizontalAlign) = setting("titleAlignment", v)
def titleLevel(v: TitleLevel) = setting("titleLevel", v)
def width(v: Int) = setting("width", v)
def content(v: js.Array[Control]) = setting("control", v)
}
/** The BlockLayoutCell is used as an aggregation of the BlockLayoutRow. It
* contains Controls. The BlockLayoutCell should be used only as aggregation
* of the BlockLayoutRow.
*
* @since 1.34
*/
@JSName("sap.ui.layout.BlockLayoutCell")
@js.native
class BlockLayoutCell(id: js.UndefOr[String] = js.native,
settings: js.UndefOr[BlockLayoutCellSettings])
extends Control {
def this(id: String) = this(id, js.undefined)
def this(settings: BlockLayoutCellSettings) = this(js.undefined, settings)
}
|
lastsys/scalajs-openui5
|
src/main/scala/org/scalajs/openui5/sap/ui/layout/BlockLayoutCell.scala
|
Scala
|
mit
| 1,567 |
val R = """\\|....\\|.(\\d\\d\\d\\d-\\d\\d-\\d\\d)[^\\d]+(\\d+)[^\\d]+([\\d.]+)[^\\d]+([\\d.]+)[^\\d]+([\\d.]+).*$""".r
case class Reading(readingDate:String, mileage:Int, miles:Double, litres:Double, cost:Double );
val readings = scala.io.Source.fromFile("/home/pperhac/temp/refuel.txt").getLines.map(l => l match {case R(d,tot,mi,l,c) => Reading(d ,tot.toInt, mi.toDouble, l.toDouble, c.toDouble) }).toList
val newVals = readings.drop(1).scanLeft(readings.head.mileage.toDouble) {(acc,e) => acc + e.miles}
|
PeterPerhac/cheat-sheets
|
scala-scratches/parse-text2caseclass-regex.scala
|
Scala
|
unlicense
| 491 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top.params
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 08.02.2008
*/
/*
* ClassParamClause ::= [nl] '(' 'implicit' ClassParam {',' ClassParam} ')'
*/
object ImplicitClassParamClause extends ParsingRule {
override def parse(implicit builder: ScalaPsiBuilder): Boolean = {
val classParamMarker = builder.mark()
if (builder.twoNewlinesBeforeCurrentToken) {
classParamMarker.rollbackTo()
return false
}
//Look for '('
builder.getTokenType match {
case ScalaTokenTypes.tLPARENTHESIS =>
builder.advanceLexer() //Ate '('
builder.disableNewlines()
//Look for implicit
builder.getTokenType match {
case ScalaTokenTypes.kIMPLICIT =>
//It's ok
builder.advanceLexer() //Ate implicit
case _ =>
builder.error(ErrMsg("wrong.parameter"))
}
//ok, let's parse parameters
if (!ClassParam()) {
classParamMarker.rollbackTo()
builder.restoreNewlinesState()
return false
}
while (builder.getTokenType == ScalaTokenTypes.tCOMMA && !builder.consumeTrailingComma(ScalaTokenTypes.tRPARENTHESIS)) {
builder.advanceLexer() //Ate ,
if (!ClassParam()) {
builder.error(ErrMsg("wrong.parameter"))
}
}
case _ =>
classParamMarker.rollbackTo()
return false
}
//Look for ')'
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
case _ =>
builder.error(ErrMsg("rparenthesis.expected"))
}
builder.restoreNewlinesState()
classParamMarker.done(ScalaElementType.PARAM_CLAUSE)
true
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/top/params/ImplicitClassParamClause.scala
|
Scala
|
apache-2.0
| 1,951 |
/**
* MinCutMutable.scala
* Mike Abraham
*
* Given a tab-delimited file containing an adjacency list representation
* of a simple undirected graph with 200 vertices labeled 1 to 200,
* run a randomized contraction algorithm and compute the minimum cut.
*
* Since this algorithm doesn't guarantee a min cut,
* run the algorithm many times with different random seeds, and report the smallest cut found.
*
* Logic:
* Build an adjacency list:
* Vertex -> List of Connected Vertices
* A -> B, C, D, E
*
* Randomly select a connected vertex (J).
* Select its key as the vertex to contract into (I). I will remain, and J will disappear.
*
* For every vertex V where J is a connected vertex:
* - If the vertex V is I, add all of J's connections.
* - Now, walk all of V's connections:
* If the connection is J, change it to I.
* If the resulting connection equals I (self-loop), delete that connection.
*
* Remove the contracted vertex, J, along with all its connected vertices.
*
* Once only two vertices remain, the min cut is the sum of all their connections, divided by two.
* OR, since there will only be parallel connections remaining, the min cut is the number of either nodes connections.
*
* Running time = O(n^2m).
*
* The probability of the first trial producing the min cut is 1 - 2/n.
* The probability of the next trial producing the min cut is 1 - 2 / (n-1).
* The probability of all trials producing the min cut is therefore 1 - 2 / (n(n-1)), which approaches 1 - 1/n^2.
*
* The number of trails (N) needed to guarantee a min cut is therefore (1 - 1/n2)^N, with N approaching the number of edges, m.
*/
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.util.Random
object MinCutMutable {
def contract(graph: mutable.Map[String, ArrayBuffer[String]]): Int = {
while (graph.size > 2) {
val nRetain: (String, ArrayBuffer[String]) = Random.shuffle(graph).head
val kRemove: String = Random.shuffle(nRetain._2).head
//println("Node To Remove = " + kRemove + " Node to Retain = " + nRetain._1)
//println(graph.mkString(" | "))
// Walk each node in the graph.
for ((node, connections) <- graph) {
// For each connection specified in this node,
// if this connection is the node-to-remove, change it to the node-to-retain.
while (connections.contains(kRemove)) {
connections.update(connections.indexOf(kRemove), nRetain._1)
}
}
// Now find the node-to-remove and add all of its connections to the node-to-retain.
graph.get(kRemove) match {
case Some(connections) => {
nRetain._2 ++= connections
}
case None =>
}
// Now remove any self-loops from the node-to-retain.
while (nRetain._2.contains(nRetain._1)) {
nRetain._2 -= nRetain._1
}
// Remove the contracted node along with all of its connections.
graph.remove(kRemove)
//println(graph.mkString(" | "))
}
// We have 2 nodes remaining. MinCut is the number of connections of either node.
graph.head._2.size
}
def deepCopy(graph: mutable.Map[String, ArrayBuffer[String]]): mutable.Map[String, ArrayBuffer[String]] = {
var map: mutable.Map[String, ArrayBuffer[String]] = mutable.Map()
for ((node, connections) <- graph) {
map += node -> connections.clone
}
map
}
def test(graph: mutable.Map[String, ArrayBuffer[String]], iterations: Int) = {
println("")
println("START")
println("Iterations: " + iterations)
var minCut = graph.size - 1
for (i <- 1 to iterations) {
var clone = deepCopy(graph)
println("")
println("Iteration: " + i)
//println("Before: ")
//println(clone.mkString(" | "))
val cut = contract(clone)
if ((cut) < minCut) minCut = cut
//println("After: ")
//println(clone.mkString(" | "))
println("Cuts: " + cut)
println("")
}
println("Best Min Cut: " + minCut)
println("END")
println("")
}
def toAdjList(lines: List[String]): mutable.Map[String, ArrayBuffer[String]] = {
var map: mutable.Map[String, ArrayBuffer[String]] = mutable.Map()
lines.foreach(line => {
val items = line.split("\\t")
map += items(0) -> ArrayBuffer(items.slice(1, items.length)).flatten
})
map
}
def main(args: Array[String]): Unit = {
// This file is tab-delimited and is formatted as:
// vertex \\t adjacent vertex \\t adjacent vertex \\t etc....
// "6 155 56 52 120 ......".
// This means the vertex with label 6 is adjacent to (shares an edge with)
// the vertices with labels 155,56,52,120,......,etc
Random.setSeed(1451283490430598L)
//val graph = toAdjList(Source.fromFile("MinCutInputTo5.txt").getLines.toList)
val graph = toAdjList(Source.fromFile("MinCutInput.txt").getLines.toList)
test(graph, 2000)
}
}
|
mikeabraham/InterviewQuestionsInScala
|
src/main/scala/MinCutMutable.scala
|
Scala
|
unlicense
| 5,013 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.model
import com.netflix.atlas.core.model.DataExpr
/**
* A group of values for the same timestamp. This type is typically created as the result
* of using the `com.netflix.atlas.eval.stream.TimeGrouped` operator on the stream.
*
* The values map should always be non-empty and have datapoints for all entries. Empty
* entries should be omitted.
*
* @param timestamp
* Timestamp that applies to all values within the group.
* @param step
* Step size for the data within this group.
* @param dataExprValues
* Values associated with this time.
*/
case class TimeGroup(timestamp: Long, step: Long, dataExprValues: Map[DataExpr, AggrValuesInfo])
case class AggrValuesInfo(values: List[AggrDatapoint], numRawDatapoints: Int)
|
brharrington/atlas
|
atlas-eval/src/main/scala/com/netflix/atlas/eval/model/TimeGroup.scala
|
Scala
|
apache-2.0
| 1,392 |
package io.eels.component.parquet
import io.eels._
import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate}
import org.apache.parquet.io.api.Binary
object ParquetPredicateBuilder extends PredicateBuilder[FilterPredicate] {
override def build(predicate: Predicate): FilterPredicate = {
predicate match {
case OrPredicate(predicates) => predicates.map(build).reduceLeft((a, b) => FilterApi.or(a, b))
case AndPredicate(predicates) => predicates.map(build).reduceLeft((a, b) => FilterApi.and(a, b))
case NotPredicate(inner) => FilterApi.not(build(inner))
case NotEqualsPredicate(name: String, value: String) => FilterApi.notEq(FilterApi.binaryColumn(name), Binary.fromConstantByteArray(value.toString().getBytes))
case NotEqualsPredicate(name: String, value: Long) => FilterApi.notEq(FilterApi.longColumn(name), java.lang.Long.valueOf(value))
case NotEqualsPredicate(name: String, value: Boolean) => FilterApi.notEq(FilterApi.booleanColumn(name), java.lang.Boolean.valueOf(value))
case NotEqualsPredicate(name: String, value: Float) => FilterApi.notEq(FilterApi.floatColumn(name), java.lang.Float.valueOf(value))
case NotEqualsPredicate(name: String, value: Int) => FilterApi.notEq(FilterApi.intColumn(name), java.lang.Integer.valueOf(value))
case NotEqualsPredicate(name: String, value: Double) => FilterApi.notEq(FilterApi.doubleColumn(name), java.lang.Double.valueOf(value))
case EqualsPredicate(name: String, value: String) => FilterApi.eq(FilterApi.binaryColumn(name), Binary.fromConstantByteArray(value.toString().getBytes))
case EqualsPredicate(name: String, value: Long) => FilterApi.eq(FilterApi.longColumn(name), java.lang.Long.valueOf(value))
case EqualsPredicate(name: String, value: Boolean) => FilterApi.eq(FilterApi.booleanColumn(name), java.lang.Boolean.valueOf(value))
case EqualsPredicate(name: String, value: Float) => FilterApi.eq(FilterApi.floatColumn(name), java.lang.Float.valueOf(value))
case EqualsPredicate(name: String, value: Int) => FilterApi.eq(FilterApi.intColumn(name), java.lang.Integer.valueOf(value))
case EqualsPredicate(name: String, value: Double) => FilterApi.eq(FilterApi.doubleColumn(name), java.lang.Double.valueOf(value))
case LtPredicate(name, value: Double) => FilterApi.lt(FilterApi.doubleColumn(name), java.lang.Double.valueOf(value))
case LtPredicate(name, value: Float) => FilterApi.lt(FilterApi.floatColumn(name), java.lang.Float.valueOf(value))
case LtPredicate(name, value: Int) => FilterApi.lt(FilterApi.intColumn(name), java.lang.Integer.valueOf(value))
case LtPredicate(name, value: Long) => FilterApi.lt(FilterApi.longColumn(name), java.lang.Long.valueOf(value))
case LtePredicate(name, value: Double) => FilterApi.ltEq(FilterApi.doubleColumn(name), java.lang.Double.valueOf(value))
case LtePredicate(name, value: Float) => FilterApi.ltEq(FilterApi.floatColumn(name), java.lang.Float.valueOf(value))
case LtePredicate(name, value: Int) => FilterApi.ltEq(FilterApi.intColumn(name), java.lang.Integer.valueOf(value))
case LtePredicate(name, value: Long) => FilterApi.ltEq(FilterApi.longColumn(name), java.lang.Long.valueOf(value))
case GtPredicate(name, value: Double) => FilterApi.gt(FilterApi.doubleColumn(name), java.lang.Double.valueOf(value))
case GtPredicate(name, value: Float) => FilterApi.gt(FilterApi.floatColumn(name), java.lang.Float.valueOf(value))
case GtPredicate(name, value: Int) => FilterApi.gt(FilterApi.intColumn(name), java.lang.Integer.valueOf(value))
case GtPredicate(name, value: Long) => FilterApi.gt(FilterApi.longColumn(name), java.lang.Long.valueOf(value))
case GtePredicate(name, value: Double) => FilterApi.gtEq(FilterApi.doubleColumn(name), java.lang.Double.valueOf(value))
case GtePredicate(name, value: Float) => FilterApi.gtEq(FilterApi.floatColumn(name), java.lang.Float.valueOf(value))
case GtePredicate(name, value: Int) => FilterApi.gtEq(FilterApi.intColumn(name), java.lang.Integer.valueOf(value))
case GtePredicate(name, value: Long) => FilterApi.gtEq(FilterApi.longColumn(name), java.lang.Long.valueOf(value))
}
}
}
|
stheppi/eel
|
eel-components/src/main/scala/io/eels/component/parquet/ParquetPredicateBuilder.scala
|
Scala
|
apache-2.0
| 4,232 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.tf.loaders
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator
class DigammaSpec extends UnaryOpBaseSpec {
override def doBefore(): Unit = {
super.doBefore()
RandomGenerator.RNG.setSeed(1L)
}
override def getOpName: String = "Digamma"
override def getInput: Tensor[_] = Tensor[Float](4, 32, 32, 3).rand()
override def getDelta: Double = 1e-3
}
|
yiheng/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/utils/tf/loaders/DigammaSpec.scala
|
Scala
|
apache-2.0
| 1,060 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.sql.Timestamp
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.scalatest.Assertions._
import org.scalatest.BeforeAndAfterEach
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.{SparkException, TaskContext, TestUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Column
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.execution.{SparkPlan, SparkPlanTest, UnaryExecNode}
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types.StringType
class ScriptTransformationSuite extends SparkPlanTest with SQLTestUtils with TestHiveSingleton
with BeforeAndAfterEach {
import spark.implicits._
private val noSerdeIOSchema = HiveScriptIOSchema(
inputRowFormat = Seq.empty,
outputRowFormat = Seq.empty,
inputSerdeClass = None,
outputSerdeClass = None,
inputSerdeProps = Seq.empty,
outputSerdeProps = Seq.empty,
recordReaderClass = None,
recordWriterClass = None,
schemaLess = false
)
private val serdeIOSchema = noSerdeIOSchema.copy(
inputSerdeClass = Some(classOf[LazySimpleSerDe].getCanonicalName),
outputSerdeClass = Some(classOf[LazySimpleSerDe].getCanonicalName)
)
private var defaultUncaughtExceptionHandler: Thread.UncaughtExceptionHandler = _
private val uncaughtExceptionHandler = new TestUncaughtExceptionHandler
protected override def beforeAll(): Unit = {
super.beforeAll()
defaultUncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
}
protected override def afterAll(): Unit = {
super.afterAll()
Thread.setDefaultUncaughtExceptionHandler(defaultUncaughtExceptionHandler)
}
override protected def afterEach(): Unit = {
super.afterEach()
uncaughtExceptionHandler.cleanStatus()
}
test("cat without SerDe") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = child,
ioschema = noSerdeIOSchema
),
rowsDf.collect())
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("cat with LazySimpleSerDe") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = child,
ioschema = serdeIOSchema
),
rowsDf.collect())
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("script transformation should not swallow errors from upstream operators (no serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[TestFailedException] {
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = ExceptionInjectingOperator(child),
ioschema = noSerdeIOSchema
),
rowsDf.collect())
}
assert(e.getMessage().contains("intentional exception"))
// Before SPARK-25158, uncaughtExceptionHandler will catch IllegalArgumentException
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("script transformation should not swallow errors from upstream operators (with serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[TestFailedException] {
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = ExceptionInjectingOperator(child),
ioschema = serdeIOSchema
),
rowsDf.collect())
}
assert(e.getMessage().contains("intentional exception"))
// Before SPARK-25158, uncaughtExceptionHandler will catch IllegalArgumentException
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("SPARK-14400 script transformation should fail for bad script command") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[SparkException] {
val plan =
new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "some_non_existent_command",
output = Seq(AttributeReference("a", StringType)()),
child = rowsDf.queryExecution.sparkPlan,
ioschema = serdeIOSchema)
SparkPlanTest.executePlan(plan, hiveContext)
}
assert(e.getMessage.contains("Subprocess exited with status"))
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("SPARK-24339 verify the result after pruning the unused columns") {
val rowsDf = Seq(
("Bob", 16, 176),
("Alice", 32, 164),
("David", 60, 192),
("Amy", 24, 180)).toDF("name", "age", "height")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("name").expr),
script = "cat",
output = Seq(AttributeReference("name", StringType)()),
child = child,
ioschema = serdeIOSchema
),
rowsDf.select("name").collect())
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("SPARK-25990: TRANSFORM should handle different data types correctly") {
assume(TestUtils.testCommandAvailable("python"))
val scriptFilePath = getTestResourcePath("test_script.py")
withTempView("v") {
val df = Seq(
(1, "1", 1.0, BigDecimal(1.0), new Timestamp(1)),
(2, "2", 2.0, BigDecimal(2.0), new Timestamp(2)),
(3, "3", 3.0, BigDecimal(3.0), new Timestamp(3))
).toDF("a", "b", "c", "d", "e") // Note column d's data type is Decimal(38, 18)
df.createTempView("v")
val query = sql(
s"""
|SELECT
|TRANSFORM(a, b, c, d, e)
|USING 'python $scriptFilePath' AS (a, b, c, d, e)
|FROM v
""".stripMargin)
// In Hive 1.2, the string representation of a decimal omits trailing zeroes.
// But in Hive 2.3, it is always padded to 18 digits with trailing zeroes if necessary.
val decimalToString: Column => Column = if (HiveUtils.isHive23) {
c => c.cast("string")
} else {
c => c.cast("decimal(1, 0)").cast("string")
}
checkAnswer(query, identity, df.select(
'a.cast("string"),
'b.cast("string"),
'c.cast("string"),
decimalToString('d),
'e.cast("string")).collect())
}
}
test("SPARK-30973: TRANSFORM should wait for the termination of the script (no serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[SparkException] {
val plan =
new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "some_non_existent_command",
output = Seq(AttributeReference("a", StringType)()),
child = rowsDf.queryExecution.sparkPlan,
ioschema = noSerdeIOSchema)
SparkPlanTest.executePlan(plan, hiveContext)
}
assert(e.getMessage.contains("Subprocess exited with status"))
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("SPARK-30973: TRANSFORM should wait for the termination of the script (with serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[SparkException] {
val plan =
new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "some_non_existent_command",
output = Seq(AttributeReference("a", StringType)()),
child = rowsDf.queryExecution.sparkPlan,
ioschema = serdeIOSchema)
SparkPlanTest.executePlan(plan, hiveContext)
}
assert(e.getMessage.contains("Subprocess exited with status"))
assert(uncaughtExceptionHandler.exception.isEmpty)
}
}
private case class ExceptionInjectingOperator(child: SparkPlan) extends UnaryExecNode {
override protected def doExecute(): RDD[InternalRow] = {
child.execute().map { x =>
assert(TaskContext.get() != null) // Make sure that TaskContext is defined.
Thread.sleep(1000) // This sleep gives the external process time to start.
throw new IllegalArgumentException("intentional exception")
}
}
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
}
|
spark-test/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
|
Scala
|
apache-2.0
| 10,348 |
package functional
import java.text.SimpleDateFormat
import play.api.test._
import play.api.test.Helpers._
import play.api.i18n.MessagesApi
import play.api.i18n.{Lang, Messages, MessagesImpl, MessagesProvider}
import java.time.Instant
import play.api.{Application => PlayApp}
import play.api.inject.guice.GuiceApplicationBuilder
import helpers.InjectorSupport
import play.api.db.Database
import org.specs2.mutable.Specification
import models._
import java.sql.Connection
import LocaleInfo._
import play.api.i18n.{Lang, Messages}
import play.api.db._
import helpers.Helper._
import play.api.test._
import play.api.test.Helpers._
import helpers.ViewHelpers
import java.util.concurrent.TimeUnit
import com.ruimo.scoins.Scoping._
import SeleniumHelpers.FirefoxJa
class OrderHistorySpec extends Specification with InjectorSupport {
case class Tran(
now: Long,
tranHeader: TransactionLogHeader,
tranSiteHeader: Seq[TransactionLogSite],
transporter1: Transporter,
transporter2: Transporter,
transporterName1: TransporterName,
transporterName2: TransporterName,
address: Address,
itemPriceHistory: Seq[ItemPriceHistory]
)
"Order history" should {
"Show login user's order history" in new WithBrowser(
WebDriverFactory(CHROME), appl()
){
inject[Database].withConnection { implicit conn =>
val currencyInfo = inject[CurrencyRegistry]
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
implicit val lang = Lang("ja")
implicit val storeUserRepo = inject[StoreUserRepo]
val Messages = inject[MessagesApi]
implicit val mp: MessagesProvider = new MessagesImpl(lang, Messages)
val user = loginWithTestUser(browser)
val tran = createTransaction(lang, user)
browser.goTo(
controllers.routes.OrderHistory.showOrderHistory() + "?lang=" + lang.code
)
browser.webDriver.getTitle === Messages("commonTitle", Messages("order.history.title"))
browser.find(".orderHistoryInnerTable1").size === 2
doWith(browser.find(".orderHistoryInnerTable1")) { b =>
b.find(".transactionTime td").text ===
"%1$tY/%1$tm/%1$td %1$tH:%1$tM".format(tran.now)
b.find(".tranNo").text === tran.tranHeader.id.get.toString
val user = inject[StoreUserRepo].apply(tran.tranHeader.userId)
b.find(".buyerName").text === user.firstName + " " + user.lastName
if (b.find(".subtotal").index(0).text == ViewHelpers.toAmount(tran.tranSiteHeader(0).totalAmount)) {
b.find(".subtotal").index(1).text === ViewHelpers.toAmount(tran.tranSiteHeader(1).totalAmount)
}
else {
b.find(".subtotal").index(0).text === ViewHelpers.toAmount(tran.tranSiteHeader(1).totalAmount)
b.find(".subtotal").index(1).text === ViewHelpers.toAmount(tran.tranSiteHeader(0).totalAmount)
}
b.find(".outerTaxAmount").text === ViewHelpers.toAmount(0)
if (b.find(".total").index(0).text == ViewHelpers.toAmount(tran.tranSiteHeader(0).totalAmount)) {
b.find(".total").index(1).text === ViewHelpers.toAmount(tran.tranSiteHeader(1).totalAmount)
}
else {
b.find(".total").index(0).text === ViewHelpers.toAmount(tran.tranSiteHeader(1).totalAmount)
b.find(".total").index(1).text === ViewHelpers.toAmount(tran.tranSiteHeader(0).totalAmount)
}
}
doWith(browser.find(".shippingAddressTable")) { b =>
b.find(".name").text === tran.address.firstName + " " + tran.address.lastName
b.find(".zip").text === tran.address.zip1 + " - " + tran.address.zip2
b.find(".prefecture").text === tran.address.prefecture.toString
b.find(".address1").text === tran.address.address1
b.find(".address2").text === tran.address.address2
b.find(".tel1").text === tran.address.tel1
b.find(".comment").text === tran.address.comment
}
doWith(browser.find(".orderHistoryInnerTable2")) { b =>
b.find(".status").text === Messages("transaction.status.ORDERED")
if (b.find(".shippingDate").index(0).text ==
new SimpleDateFormat(Messages("shipping.date.format")).format(
new java.util.Date(date("2013-02-03").toEpochMilli)
)
) {
b.find(".shippingDate").index(1).text ===
new SimpleDateFormat(Messages("shipping.date.format")).format(
new java.util.Date(date("2013-02-04").toEpochMilli)
)
}
else {
b.find(".shippingDate").index(0).text ===
new SimpleDateFormat(Messages("shipping.date.format")).format(
new java.util.Date(date("2013-02-04").toEpochMilli)
)
b.find(".shippingDate").index(1).text ===
new SimpleDateFormat(Messages("shipping.date.format")).format(
new java.util.Date(date("2013-02-03").toEpochMilli)
)
}
}
val (tran0, tran1) = if (
browser.find(".orderHistoryInnerTable3").find("td.itemName").index(0).text == "植木1"
) (0, 1) else (1, 0)
doWith(browser.find(".orderHistoryInnerTable3")) { b =>
b.size === 2
b.get(tran0).find("td.unitPrice").index(0).text === "100円"
b.get(tran0).find("td.quantity").index(0).text === "3"
b.get(tran0).find("td.price").index(0).find(".body").text === "300円"
b.get(tran0).find("td.itemName").index(1).text === "植木3"
b.get(tran0).find("td.unitPrice").index(1).text === "300円"
b.get(tran0).find("td.quantity").index(1).text === "7"
b.get(tran0).find("td.price").index(1).find(".body").text === "2,100円"
b.get(tran0).find("td.subtotalBody").find(".body").text === "2,400円"
b.get(tran1).find("td.unitPrice").text === "200円"
b.get(tran1).find("td.quantity").text === "5"
b.get(tran1).find("td.price").find(".body").text === "1,000円"
b.get(tran1).find("td.itemName").index(0).text === "植木2"
b.get(tran1).find("td.subtotalBody").find(".body").text === "1,000円"
}
val (box0, box1) = if (
browser.find(".orderHistoryInnerTable4").index(0).find("td.boxName").text == "site-box1"
) (0, 1) else (1, 0)
doWith(browser.find(".orderHistoryInnerTable4").index(box0)) { b =>
b.find("td.boxName").text === "site-box1"
b.find("td.boxPrice").text === "123円"
b.find("td.subtotalBody").text === "123円"
}
doWith(browser.find(".orderHistoryInnerTable4").index(box1)) { b =>
b.find("td.boxName").text === "site-box2"
b.find("td.boxPrice").text === "468円"
b.find("td.subtotalBody").text === "468円"
}
doWith(browser.find(".orderHistoryInnerTable1").get(1)) { b =>
b.find(".transactionTime").text ===
"%1$tY/%1$tm/%1$td %1$tH:%1$tM".format(tran.now)
b.find(".tranNo").text === tran.tranHeader.id.get.toString
val user = inject[StoreUserRepo].apply(tran.tranHeader.userId)
b.find(".buyerName").text === user.firstName + " " + user.lastName
}
browser.find(".orderHistoryInnerTable1").index(tran1).find(".subtotal").text === ViewHelpers.toAmount(1468)
browser.find(".orderHistoryInnerTable1").index(tran0).find(".subtotal").text === ViewHelpers.toAmount(2523)
browser.find(".orderHistoryInnerTable1").find(".outerTaxAmount").text === ViewHelpers.toAmount(0)
browser.find(".orderHistoryInnerTable1").index(tran1).find(".total").text === ViewHelpers.toAmount(1468)
browser.find(".orderHistoryInnerTable1").index(tran0).find(".total").text === ViewHelpers.toAmount(2523)
doWith(browser.find(".shippingAddressTable").get(1)) { b =>
b.find(".name").text === tran.address.firstName + " " + tran.address.lastName
b.find(".zip").text === tran.address.zip1 + " - " + tran.address.zip2
b.find(".prefecture").text === tran.address.prefecture.toString
b.find(".address1").text === tran.address.address1
b.find(".address2").text === tran.address.address2
b.find(".tel1").text === tran.address.tel1
b.find(".comment").text === tran.address.comment
}
doWith(browser.find(".orderHistoryInnerTable2").index(tran1)) { b =>
b.find(".status").text === Messages("transaction.status.ORDERED")
b.find(".shippingDate").text ===
new SimpleDateFormat(Messages("shipping.date.format")).format(
new java.util.Date(date("2013-02-04").toEpochMilli)
)
}
doWith(browser.find(".orderHistoryInnerTable2").index(tran0)) { b =>
b.find(".status").text === Messages("transaction.status.ORDERED")
b.find(".shippingDate").text ===
new SimpleDateFormat(Messages("shipping.date.format")).format(
new java.util.Date(date("2013-02-03").toEpochMilli)
)
}
doWith(browser.find(".orderHistoryInnerTable3").index(tran1)) { b =>
b.find("td.itemName").text === "植木2"
b.find("td.unitPrice").text === "200円"
b.find("td.quantity").text === "5"
b.find("td.price").find(".body").text === "1,000円"
b.find("td.subtotalBody").find(".body").text === "1,000円"
}
doWith(browser.find(".orderHistoryInnerTable3").index(tran0)) { b =>
b.find("td.itemName").index(0).text === "植木1"
b.find("td.unitPrice").index(0).text === "100円"
b.find("td.quantity").index(0).text === "3"
b.find("td.price").index(0).find(".body").text === "300円"
b.find("td.subtotalBody").find(".body").text === "2,400円"
}
doWith(browser.find(".orderHistoryInnerTable4").index(tran1)) { b =>
b.find("td.boxName").text === "site-box2"
b.find("td.boxPrice").text === "468円"
b.find("td.subtotalBody").text === "468円"
}
doWith(browser.find(".orderHistoryInnerTable4").index(tran0)) { b =>
b.find("td.boxName").text === "site-box1"
b.find("td.boxPrice").text === "123円"
b.find("td.subtotalBody").text === "123円"
}
}
}
"Show login user's order history list" in new WithBrowser(
WebDriverFactory(CHROME), appl()
) {
inject[Database].withConnection { implicit conn =>
val currencyInfo = inject[CurrencyRegistry]
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
implicit val lang = Lang("ja")
implicit val storeUserRepo = inject[StoreUserRepo]
val Messages = inject[MessagesApi]
implicit val mp: MessagesProvider = new MessagesImpl(lang, Messages)
val user = loginWithTestUser(browser)
val tran = createTransaction(lang, user)
browser.goTo(
controllers.routes.OrderHistory.showOrderHistoryList() + "?lang=" + lang.code
)
browser.await().atMost(5, TimeUnit.SECONDS).untilPage().isLoaded()
browser.webDriver.getTitle === Messages("commonTitle", Messages("order.history.list.title"))
doWith(browser.find(".orderHistoryTable")) { b =>
b.find(".transactionId").index(0).text === tran.tranHeader.id.get.toString
b.find(".transactionDate").index(0).text === "%1$tY/%1$tm/%1$td %1$tH:%1$tM".format(tran.now)
b.find(".siteName").index(0).text === "商店1"
b.find(".price").index(0).text === "2,523円"
b.find(".transactionId").index(1).text === tran.tranHeader.id.get.toString
b.find(".transactionDate").index(1).text === "%1$tY/%1$tm/%1$td %1$tH:%1$tM".format(tran.now)
b.find(".siteName").index(1).text === "商店2"
b.find(".price").index(1).text === "1,468円"
}
browser.find(".transactionId").index(0).find("a").click()
browser.await().atMost(5, TimeUnit.SECONDS).untilPage().isLoaded()
browser.webDriver.getTitle === Messages("commonTitle", Messages("order.history.title"))
browser.find(".subtotal").index(0).text === "2,523円"
browser.find(".subtotal").index(1).text === "1,468円"
}
}
"Can put an item that is bought before into shopping cart" in new WithBrowser(
WebDriverFactory(CHROME), appl()
) {
inject[Database].withConnection { implicit conn =>
val currencyInfo = inject[CurrencyRegistry]
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
implicit val lang = Lang("ja")
implicit val storeUserRepo = inject[StoreUserRepo]
val Messages = inject[MessagesApi]
implicit val mp: MessagesProvider = new MessagesImpl(lang, Messages)
val user = loginWithTestUser(browser)
val tran = createTransaction(lang, user)
browser.goTo(
controllers.routes.Purchase.clear() + "?lang=" + lang.code
)
browser.goTo(
controllers.routes.OrderHistory.showOrderHistory() + "?lang=" + lang.code
)
browser.webDriver.getTitle === Messages("commonTitle", Messages("order.history.title"))
browser.find(".orderHistoryInnerTable3").size === 2
if (browser.find(".orderHistoryInnerTable3").index(0).find("td.itemName").text == "植木1") {
browser.find(".orderHistoryInnerTable3").index(0).find("button").get(0).click()
}
else {
browser.find(".orderHistoryInnerTable3").index(1).find("button").get(0).click()
}
browser.waitUntil(
failFalse(browser.find(".ui-dialog-buttonset").first().displayed())
)
browser.find(".ui-dialog-titlebar").find("span.ui-dialog-title").text === Messages("shopping.cart")
doWith(browser.find("#cartDialogContent")) { b =>
b.find("td.itemName").text === "植木1"
b.find("td.siteName").text === "商店1"
b.find("td.unitPrice").text === "100円"
b.find("td.quantity").text === "3"
b.find("td.price").text === "300円"
}
browser.find(".ui-dialog-buttonset").find("button").get(0).click()
browser.waitUntil(
failFalse(! browser.find(".ui-dialog-buttonset").first().displayed())
)
if (browser.find(".orderHistoryInnerTable3").index(0).find("td.itemName").text == "植木1") {
browser.find(".orderHistoryInnerTable3").index(0).find("button").get(2).click()
}
else {
browser.find(".orderHistoryInnerTable3").index(1).find("button").get(2).click()
}
browser.waitUntil(
failFalse(browser.find(".ui-dialog-buttonset").first().displayed())
)
browser.find(".ui-dialog-titlebar").find("span.ui-dialog-title").text === Messages("shopping.cart")
doWith(browser.find("#cartDialogCurrentContent")) { b =>
b.find("td.itemName").text === "植木1"
b.find("td.siteName").text === "商店1"
b.find("td.unitPrice").text === "100円"
b.find("td.quantity").text === "6"
b.find("td.price").text === "600円"
b.find("td.itemName").get(1).text === "植木3"
b.find("td.siteName").get(1).text === "商店1"
b.find("td.unitPrice").get(1).text === "300円"
b.find("td.quantity").get(1).text === "7"
b.find("td.price").get(1).text === "2,100円"
}
browser.find(".ui-dialog-buttonset").find("button").get(0).click()
browser.waitUntil(
failFalse(! browser.find(".ui-dialog-buttonset").first().displayed())
)
if (browser.find(".orderHistoryInnerTable3").index(0).find("td.itemName").text == "植木1") {
browser.find(".orderHistoryInnerTable3").index(1).find("button").get(0).click()
}
else {
browser.find(".orderHistoryInnerTable3").index(0).find("button").get(0).click()
}
browser.waitUntil(
failFalse(browser.find(".ui-dialog-buttonset").first().displayed())
)
browser.find(".ui-dialog-titlebar").find("span.ui-dialog-title").text === Messages("shopping.cart")
doWith(browser.find("#cartDialogCurrentContent")) { b =>
b.find("td.itemName").text === "植木1"
b.find("td.siteName").text === "商店1"
b.find("td.unitPrice").text === "100円"
b.find("td.quantity").text === "6"
b.find("td.price").text === "600円"
b.find("td.itemName").get(1).text === "植木3"
b.find("td.siteName").get(1).text === "商店1"
b.find("td.unitPrice").get(1).text === "300円"
b.find("td.quantity").get(1).text === "7"
b.find("td.price").get(1).text === "2,100円"
b.find("td.itemName").get(2).text === "植木2"
b.find("td.siteName").get(2).text === "商店2"
b.find("td.unitPrice").get(2).text === "200円"
b.find("td.quantity").get(2).text === "5"
b.find("td.price").get(2).text === "1,000円"
}
}
}
"Can put an item that is bought before into shopping cart and expired." in new WithBrowser(
WebDriverFactory(CHROME), appl()
) {
inject[Database].withConnection { implicit conn =>
val currencyInfo = inject[CurrencyRegistry]
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
implicit val lang = Lang("ja")
implicit val storeUserRepo = inject[StoreUserRepo]
val Messages = inject[MessagesApi]
implicit val mp: MessagesProvider = new MessagesImpl(lang, Messages)
val user = loginWithTestUser(browser)
val tran = createTransaction(lang, user)
browser.goTo(
controllers.routes.Purchase.clear() + "?lang=" + lang.code
)
browser.goTo(
controllers.routes.OrderHistory.showOrderHistory() + "?lang=" + lang.code
)
browser.webDriver.getTitle === Messages("commonTitle", Messages("order.history.title"))
browser.find(".orderHistoryInnerTable3").size === 2
// Expire 植木1
doWith(tran.itemPriceHistory.head) { iph =>
inject[ItemPriceHistoryRepo].update(
iph.id.get, iph.taxId, iph.currency.id, iph.unitPrice, iph.listPrice, iph.costPrice,
Instant.ofEpochMilli(System.currentTimeMillis - 10000)
)
}
if (browser.find(".orderHistoryInnerTable3").index(0).find("td.itemName").text == "植木1") {
browser.find(".orderHistoryInnerTable3").index(0).find("button").get(0).click()
}
else {
browser.find(".orderHistoryInnerTable3").index(1).find("button").get(0).click()
}
browser.await().atMost(30, TimeUnit.SECONDS).untilPage().isLoaded()
browser.waitUntil(
failFalse(browser.webDriver.getTitle == Messages("commonTitle", Messages("itemExpiredTitle")))
)
browser.webDriver.getTitle === Messages("commonTitle", Messages("itemExpiredTitle"))
browser.find(".expiredItemRow").size === 1
browser.find(".expiredItemRow .siteName").text === "商店1"
browser.find(".expiredItemRow .itemName").text === "植木1"
browser.find("#removeExpiredItemsButton").click()
browser.find(".shoppingCartEmpty").text === Messages("shopping.cart.empty")
}
}
}
def createTransaction(lang: Lang, user: StoreUser)(implicit conn: Connection, app: PlayApp): Tran = {
val localeInfo = inject[LocaleInfoRepo]
import localeInfo.{En, Ja}
val currencyInfo = inject[CurrencyRegistry]
implicit val storeUserRepo = inject[StoreUserRepo]
implicit val taxRepo = inject[TaxRepo]
val tax = taxRepo.createNew
val taxHis = inject[TaxHistoryRepo].createNew(tax, TaxType.INNER_TAX, BigDecimal("5"), date("9999-12-31"))
val site1 = inject[SiteRepo].createNew(Ja, "商店1")
val site2 = inject[SiteRepo].createNew(Ja, "商店2")
val cat1 = inject[CategoryRepo].createNew(
Map(Ja -> "植木", En -> "Plant")
)
val item1 = inject[ItemRepo].createNew(cat1)
val item2 = inject[ItemRepo].createNew(cat1)
val item3 = inject[ItemRepo].createNew(cat1)
inject[SiteItemRepo].createNew(site1, item1)
inject[SiteItemRepo].createNew(site2, item2)
inject[SiteItemRepo].createNew(site1, item3)
inject[SiteItemNumericMetadataRepo].createNew(site1.id.get, item1.id.get, SiteItemNumericMetadataType.SHIPPING_SIZE, 1L)
inject[SiteItemNumericMetadataRepo].createNew(site2.id.get, item2.id.get, SiteItemNumericMetadataType.SHIPPING_SIZE, 1L)
inject[SiteItemNumericMetadataRepo].createNew(site1.id.get, item3.id.get, SiteItemNumericMetadataType.SHIPPING_SIZE, 1L)
val itemName1 = inject[ItemNameRepo].createNew(item1, Map(Ja -> "植木1"))
val itemName2 = inject[ItemNameRepo].createNew(item2, Map(Ja -> "植木2"))
val itemName3 = inject[ItemNameRepo].createNew(item3, Map(Ja -> "植木3"))
val itemDesc1 = inject[ItemDescriptionRepo].createNew(item1, site1, "desc1")
val itemDesc2 = inject[ItemDescriptionRepo].createNew(item2, site2, "desc2")
val itemDesc3 = inject[ItemDescriptionRepo].createNew(item3, site1, "desc3")
val itemPrice1 = inject[ItemPriceRepo].createNew(item1, site1)
val itemPrice2 = inject[ItemPriceRepo].createNew(item2, site2)
val itemPrice3 = inject[ItemPriceRepo].createNew(item3, site1)
val itemPriceHis1 = inject[ItemPriceHistoryRepo].createNew(
itemPrice1, tax, currencyInfo.Jpy, BigDecimal("100"), None, BigDecimal("90"), date("9999-12-31")
)
val itemPriceHis2 = inject[ItemPriceHistoryRepo].createNew(
itemPrice2, tax, currencyInfo.Jpy, BigDecimal("200"), None, BigDecimal("190"), date("9999-12-31")
)
val itemPriceHis3 = inject[ItemPriceHistoryRepo].createNew(
itemPrice3, tax, currencyInfo.Jpy, BigDecimal("300"), None, BigDecimal("290"), date("9999-12-31")
)
val shoppingCartItem1 = inject[ShoppingCartItemRepo].addItem(user.id.get, site1.id.get, item1.id.get.id, 3)
val shoppingCartItem2 = inject[ShoppingCartItemRepo].addItem(user.id.get, site2.id.get, item2.id.get.id, 5)
val shoppingCartItem3 = inject[ShoppingCartItemRepo].addItem(user.id.get, site1.id.get, item3.id.get.id, 7)
val addr1 = Address.createNew(
countryCode = CountryCode.JPN,
firstName = "firstName1",
lastName = "lastName1",
zip1 = "zip1",
zip2 = "zip2",
prefecture = JapanPrefecture.東京都,
address1 = "address1-1",
address2 = "address1-2",
tel1 = "tel1-1",
comment = "comment1"
)
val trans1 = inject[TransporterRepo].createNew
val trans2 = inject[TransporterRepo].createNew
val transName1 = inject[TransporterNameRepo].createNew(
trans1.id.get, Ja, "トマト運輸"
)
val transName2 = inject[TransporterNameRepo].createNew(
trans2.id.get, Ja, "ヤダワ急便"
)
val box1 = inject[ShippingBoxRepo].createNew(site1.id.get, 1L, 3, "site-box1")
val box2 = inject[ShippingBoxRepo].createNew(site2.id.get, 1L, 2, "site-box2")
val fee1 = inject[ShippingFeeRepo].createNew(box1.id.get, CountryCode.JPN, JapanPrefecture.東京都.code)
val fee2 = inject[ShippingFeeRepo].createNew(box2.id.get, CountryCode.JPN, JapanPrefecture.東京都.code)
val feeHis1 = inject[ShippingFeeHistoryRepo].createNew(
fee1.id.get, tax.id.get, BigDecimal(123), None, date("9999-12-31")
)
val feeHis2 = inject[ShippingFeeHistoryRepo].createNew(
fee2.id.get, tax.id.get, BigDecimal(234), None, date("9999-12-31")
)
val now = System.currentTimeMillis
val shippingTotal1 = inject[ShippingFeeHistoryRepo].feeBySiteAndItemClass(
CountryCode.JPN, JapanPrefecture.東京都.code,
ShippingFeeEntries().add(site1, 1L, 3).add(site2, 1L, 4),
Instant.ofEpochMilli(now)
)
val shippingDate1 = ShippingDate(
Map(
site1.id.get -> ShippingDateEntry(site1.id.get, date("2013-02-03")),
site2.id.get -> ShippingDateEntry(site2.id.get, date("2013-02-04"))
)
)
val (cartTotal: ShoppingCartTotal, errors: Seq[ItemExpiredException]) =
inject[ShoppingCartItemRepo].listItemsForUser(Ja, LoginSession(user, None, 0))
val tranId = inject[TransactionPersister].persist(
Transaction(
user.id.get, currencyInfo.Jpy, cartTotal, Some(addr1), shippingTotal1, shippingDate1, Instant.ofEpochMilli(now)
)
)
val tranList = TransactionLogHeader.list()
val tranSiteList = inject[TransactionLogSiteRepo].list()
Tran(
now,
tranList(0),
tranSiteList,
transporter1 = trans1,
transporter2 = trans2,
transporterName1 = transName1,
transporterName2 = transName2,
addr1,
itemPriceHis1::itemPriceHis2::itemPriceHis3::Nil
)
}
}
|
ruimo/store2
|
test/functional/OrderHistorySpec.scala
|
Scala
|
apache-2.0
| 25,081 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.models.application.debts
import iht.testhelpers.{AssetsWithAllSectionsSetToNoBuilder, CommonBuilder, TestHelper}
import org.scalatestplus.mockito.MockitoSugar
import common.CommonPlaySpec
class AllLiabilitiesTest extends CommonPlaySpec with MockitoSugar {
private def buildProperty(id: Option[String], value: Option[BigDecimal]) = {
CommonBuilder.buildProperty.copy(
id = id,
address = Some(CommonBuilder.DefaultUkAddress),
propertyType = TestHelper.PropertyTypeDeceasedHome,
typeOfOwnership = TestHelper.TypesOfOwnershipDeceasedOnly,
tenure = TestHelper.TenureFreehold,
value = value
)
}
"areAllDebtsSectionsAnsweredNo" must {
"returns true when all sections answered no in debts" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails
appDetails.allLiabilities.map(_.areAllDebtsSectionsAnsweredNo) shouldBe Some(true)
}
"returns false when all but 1 sections answered no in debts" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy (
funeralExpenses = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = None)))
))
appDetails.allLiabilities.map(_.areAllDebtsSectionsAnsweredNo) shouldBe Some(false)
}
}
"areAllDebtsExceptMortgagesCompleted" must {
"display true when all debts section are complete" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails
appDetails.allLiabilities.flatMap(_.areAllDebtsExceptMortgagesCompleted) shouldBe Some(true)
}
"display false when all but one of debts section are complete" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy (
funeralExpenses = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = None)))
))
appDetails.allLiabilities.flatMap(_.areAllDebtsExceptMortgagesCompleted) shouldBe Some(false)
}
"display false when it has only one completed debt section" in {
val appDetails = CommonBuilder.buildApplicationDetails copy (
allLiabilities = Some(CommonBuilder.buildAllLiabilities copy (
funeralExpenses = Some(CommonBuilder.buildBasicEstateElementLiabilities.copy(
isOwned = Some(false), value = None)),
trust = None,
debtsOutsideUk = None,
jointlyOwned = None,
other = None)
))
appDetails.allLiabilities.flatMap(_.areAllDebtsExceptMortgagesCompleted) shouldBe Some(false)
}
"display false when all of the debts section are not complete" in {
val allLiabilities = Some(CommonBuilder.buildAllLiabilities copy (
funeralExpenses = Some(CommonBuilder.buildBasicEstateElementLiabilities.copy(
isOwned = Some(true), value = Some(BigDecimal(1000000)))),
trust = Some(CommonBuilder.buildBasicEstateElementLiabilities.copy(
isOwned = Some(true), value = Some(BigDecimal(1000000)))),
debtsOutsideUk = Some(CommonBuilder.buildBasicEstateElementLiabilities.copy(
isOwned = Some(true), Some(BigDecimal(1000000)))),
jointlyOwned = Some(CommonBuilder.buildBasicEstateElementLiabilities.copy(
isOwned = None, value = None)),
other = Some(CommonBuilder.buildBasicEstateElementLiabilities.copy(
isOwned = Some(true), Some(BigDecimal(1000000))))))
val appDetails = CommonBuilder.buildApplicationDetails copy(
allLiabilities = allLiabilities,
allAssets = Some(CommonBuilder.buildAllAssets.copy(properties = Some(CommonBuilder.buildProperties)))
)
appDetails.areAllDebtsCompleted shouldBe Some(false)
}
}
"doesAnyDebtSectionHaveAValue" must {
"return true when one debts section has a value" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy (
funeralExpenses = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = Some(BigDecimal(22)))))
))
appDetails.allLiabilities.map(_.doesAnyDebtSectionHaveAValue) shouldBe Some(true)
}
"return true when only mortgages section has a value" in {
val propertyList = List(buildProperty(Some("1"), Some(BigDecimal(100))),
buildProperty(Some("2"), Some(BigDecimal(1000))))
val mortgage1 = CommonBuilder.buildMortgage.copy(
id = "1", value = Some(BigDecimal(5000)), isOwned = Some(true))
val mortgageList = List(mortgage1)
val appDetails = CommonBuilder.buildApplicationDetails.copy(
propertyList = propertyList,
allAssets = Some(
CommonBuilder.buildAllAssets.copy(properties =
Some(CommonBuilder.buildProperties.copy(isOwned = Some(true))))),
allLiabilities = Some(CommonBuilder.buildAllLiabilities.copy(
mortgages = Some(CommonBuilder.buildMortgageEstateElement.copy(isOwned = Some(true), mortgageList))))
)
appDetails.allLiabilities.map(_.doesAnyDebtSectionHaveAValue) shouldBe Some(true)
}
"return false when no debts section has a value" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy(
funeralExpenses = None,
trust = None,
debtsOutsideUk = None,
jointlyOwned = None,
other = None,
mortgages = None
)
))
appDetails.allLiabilities.map(_.doesAnyDebtSectionHaveAValue) shouldBe Some(false)
}
}
"totalValue" must {
"return the correct value" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy(
funeralExpenses = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = Some(BigDecimal(22)))),
trust = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = Some(BigDecimal(122)))),
debtsOutsideUk = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = Some(BigDecimal(222)))),
jointlyOwned = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = Some(BigDecimal(322)))),
other = Some(BasicEstateElementLiabilities(isOwned = Some(true), value = Some(BigDecimal(422))))
)
))
appDetails.allLiabilities.map(_.totalValue()) shouldBe Some(BigDecimal(1110))
}
}
"mortgageValue" must {
"return the correct value" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy(
mortgages = Some(MortgageEstateElement(isOwned = Some(true),
mortgageList = List( Mortgage("", Some(434), Some(true)),
Mortgage("", Some(2331), Some(true)))))
)
))
appDetails.allLiabilities.map(_.mortgageValue) shouldBe Some(BigDecimal(2765))
}
}
"isEmpty" must {
"return true if there is no values for all liabilities fields" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy(
funeralExpenses = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
trust = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
debtsOutsideUk = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
jointlyOwned = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
other = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
mortgages = None
)
))
appDetails.allLiabilities.map(_.isEmpty) shouldBe Some(true)
}
"return false if there is any value in any of liabilities field" in {
val appDetails = AssetsWithAllSectionsSetToNoBuilder.buildApplicationDetails copy (
allLiabilities = Some(AssetsWithAllSectionsSetToNoBuilder.buildAllLiabilities copy(
funeralExpenses = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
trust = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
debtsOutsideUk = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
jointlyOwned = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
other = Some(BasicEstateElementLiabilities(isOwned = Some(false), value = None)),
mortgages = Some(MortgageEstateElement(isOwned = Some(true),
mortgageList = List( Mortgage("", Some(434), Some(true)),
Mortgage("", Some(2331), Some(true)))))
)
))
appDetails.allLiabilities.map(_.isEmpty) shouldBe Some(false)
}
}
}
|
hmrc/iht-frontend
|
test/iht/models/application/debts/AllLiabilitiesTest.scala
|
Scala
|
apache-2.0
| 9,876 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.events.examples
import uk.gov.hmrc.play.events.Measurable
case class ExampleMetricEvent(source: String,
name: String,
data: Map[String, String]) extends Measurable
object ExampleMetricEvent {
def apply(fileId: String, fileType: String) =
new ExampleMetricEvent(
source = "TestApp",
name = "NumberOfCreatedFilings",
data = Map (
"File ID" -> fileId,
"File Type" -> fileType
))
}
|
hmrc/play-events
|
src/test/scala/uk/gov/hmrc/play/events/examples/ExampleMetricEvent.scala
|
Scala
|
apache-2.0
| 1,113 |
package com.datastax.spark.connector.rdd
import com.datastax.oss.driver.api.core.CqlSession
import com.datastax.oss.driver.api.core.cql.AsyncResultSet
import com.datastax.oss.driver.internal.core.cql.ResultSets
import com.datastax.spark.connector._
import com.datastax.spark.connector.util.maybeExecutingAs
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.datasource.JoinHelper
import com.datastax.spark.connector.rdd.reader._
import com.datastax.spark.connector.writer._
import com.google.common.util.concurrent.{FutureCallback, Futures, SettableFuture}
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
import org.apache.spark.metrics.InputMetricsUpdater
import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success}
/**
* An [[org.apache.spark.rdd.RDD RDD]] that will do a selecting join between `left` RDD and the specified
* Cassandra Table This will perform individual selects to retrieve the rows from Cassandra and will take
* advantage of RDDs that have been partitioned with the
* [[com.datastax.spark.connector.rdd.partitioner.ReplicaPartitioner]]
*
* @tparam L item type on the left side of the join (any RDD)
* @tparam R item type on the right side of the join (fetched from Cassandra)
*/
class CassandraLeftJoinRDD[L, R] (
override val left: RDD[L],
val keyspaceName: String,
val tableName: String,
val connector: CassandraConnector,
val columnNames: ColumnSelector = AllColumns,
val joinColumns: ColumnSelector = PartitionKeyColumns,
val where: CqlWhereClause = CqlWhereClause.empty,
val limit: Option[CassandraLimit] = None,
val clusteringOrder: Option[ClusteringOrder] = None,
val readConf: ReadConf = ReadConf(),
manualRowReader: Option[RowReader[R]] = None,
override val manualRowWriter: Option[RowWriter[L]] = None)(
implicit
val leftClassTag: ClassTag[L],
val rightClassTag: ClassTag[R],
@transient val rowWriterFactory: RowWriterFactory[L],
@transient val rowReaderFactory: RowReaderFactory[R])
extends CassandraRDD[(L, Option[R])](left.sparkContext, left.dependencies)
with CassandraTableRowReaderProvider[R]
with AbstractCassandraJoin[L, Option[R]] {
override type Self = CassandraLeftJoinRDD[L, R]
override protected val classTag = rightClassTag
override lazy val rowReader: RowReader[R] = manualRowReader match {
case Some(rr) => rr
case None => rowReaderFactory.rowReader(tableDef, columnNames.selectFrom(tableDef))
}
override protected def copy(
columnNames: ColumnSelector = columnNames,
where: CqlWhereClause = where,
limit: Option[CassandraLimit] = limit,
clusteringOrder: Option[ClusteringOrder] = None,
readConf: ReadConf = readConf,
connector: CassandraConnector = connector
): Self = {
new CassandraLeftJoinRDD[L, R](
left = left,
keyspaceName = keyspaceName,
tableName = tableName,
connector = connector,
columnNames = columnNames,
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf
)
}
override def cassandraCount(): Long = {
columnNames match {
case SomeColumns(_) =>
logWarning("You are about to count rows but an explicit projection has been specified.")
case _ =>
}
val counts =
new CassandraLeftJoinRDD[L, Long](
left = left,
connector = connector,
keyspaceName = keyspaceName,
tableName = tableName,
columnNames = SomeColumns(RowCountRef),
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf
)
counts.map(_._2.getOrElse(0L)).reduce(_ + _)
}
def on(joinColumns: ColumnSelector): CassandraLeftJoinRDD[L, R] = {
new CassandraLeftJoinRDD[L, R](
left = left,
connector = connector,
keyspaceName = keyspaceName,
tableName = tableName,
columnNames = columnNames,
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf
)
}
/**
* Turns this CassandraLeftJoinRDD into a factory for converting other RDD's after being serialized
* This method is for streaming operations as it allows us to Serialize a template JoinRDD
* and the use that serializable template in the DStream closure. This gives us a fully serializable
* leftJoinWithCassandra operation
*/
private[connector] def applyToRDD(left: RDD[L]): CassandraLeftJoinRDD[L, R] = {
new CassandraLeftJoinRDD[L, R](
left,
keyspaceName,
tableName,
connector,
columnNames,
joinColumns,
where,
limit,
clusteringOrder,
readConf,
Some(rowReader),
Some(rowWriter)
)
}
private[rdd] def fetchIterator(
session: CqlSession,
bsb: BoundStatementBuilder[L],
rowMetadata: CassandraRowMetadata,
leftIterator: Iterator[L],
metricsUpdater: InputMetricsUpdater
): Iterator[(L, Option[R])] = {
import com.datastax.spark.connector.util.Threads.BlockingIOExecutionContext
val queryExecutor = QueryExecutor(session, readConf.parallelismLevel, None, None)
def pairWithRight(left: L): SettableFuture[Iterator[(L, Option[R])]] = {
val resultFuture = SettableFuture.create[Iterator[(L, Option[R])]]
val leftSide = Iterator.continually(left)
val stmt = bsb.bind(left)
.update(_.setPageSize(readConf.fetchSizeInRows))
.executeAs(readConf.executeAs)
queryExecutor.executeAsync(stmt).onComplete {
case Success(rs) =>
val resultSet = new PrefetchingResultSetIterator(rs)
val iteratorWithMetrics = resultSet.map(metricsUpdater.updateMetrics)
/* This is a much less than ideal place to actually rate limit, we are buffering
these futures this means we will most likely exceed our threshold*/
val throttledIterator = iteratorWithMetrics.map(maybeRateLimit)
val rightSide = resultSet.isEmpty match {
case true => Iterator.single(None)
case false => throttledIterator.map(r => Some(rowReader.read(r, rowMetadata)))
}
resultFuture.set(leftSide.zip(rightSide))
case Failure(throwable) =>
resultFuture.setException(throwable)
}
resultFuture
}
val queryFutures = leftIterator.map(left => {
requestsPerSecondRateLimiter.maybeSleep(1)
pairWithRight(left)
})
JoinHelper.slidingPrefetchIterator(queryFutures, readConf.parallelismLevel).flatten
}
}
|
datastax/spark-cassandra-connector
|
connector/src/main/scala/com/datastax/spark/connector/rdd/CassandraLeftJoinRDD.scala
|
Scala
|
apache-2.0
| 6,691 |
package cbt
import java.io._
object paths{
val cbtHome: File = new File(Option(System.getenv("CBT_HOME")).get)
val mavenCache: File = cbtHome ++ "/cache/maven"
val userHome: File = new File(Option(System.getProperty("user.home")).get)
val stage1: File = new File(Option(System.getenv("STAGE1")).get)
val stage2: File = cbtHome ++ "/stage2"
val nailgun: File = new File(Option(System.getenv("NAILGUN")).get)
private val target = Option(System.getenv("TARGET")).get.stripSuffix("/")
val stage1Target: File = stage1 ++ ("/" ++ target)
val stage2Target: File = stage2 ++ ("/" ++ target)
val nailgunTarget: File = nailgun ++ ("/" ++ target)
val sonatypeLogin: File = cbtHome ++ "/sonatype.login"
}
|
tobias-johansson/cbt
|
stage1/paths.scala
|
Scala
|
bsd-2-clause
| 717 |
package slick.jdbc.hikaricp
import java.sql.{Connection, Driver}
import com.typesafe.config.Config
import slick.jdbc.{JdbcDataSource, JdbcDataSourceFactory}
import slick.util.ConfigExtensionMethods._
/** A JdbcDataSource for a HikariCP connection pool.
* See `slick.jdbc.JdbcBackend#Database.forConfig` for documentation on the config parameters. */
class HikariCPJdbcDataSource(val ds: com.zaxxer.hikari.HikariDataSource, val hconf: com.zaxxer.hikari.HikariConfig)
extends JdbcDataSource {
def createConnection(): Connection = ds.getConnection()
def close(): Unit = ds.close()
override val maxConnections: Option[Int] = Some(ds.getMaximumPoolSize)
}
object HikariCPJdbcDataSource extends JdbcDataSourceFactory {
import com.zaxxer.hikari._
def forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): HikariCPJdbcDataSource = {
val hconf = new HikariConfig()
// Essential settings
// Use HikariCP `dataSourceClassName` as the main configuration and fallback to
// `dataSourceClass`, `driverClassName` and finally `drive`.
c.getStringOpt("dataSourceClassName").orElse(c.getStringOpt("dataSourceClass")) match {
case Some(className) => hconf.setDataSourceClassName(className)
case None => c.getStringOpt("driverClassName").orElse(c.getStringOpt("driver")).foreach(hconf.setDriverClassName)
}
// Use `jdbcUrl` an then `url` to configure the pool. According to HikariCP docs, when
// using this property with "old" drivers, you may also need to set the driverClassName
// property.
c.getStringOpt("jdbcUrl").orElse(c.getStringOpt("url")).foreach(hconf.setJdbcUrl)
c.getStringOpt("username").orElse(c.getStringOpt("user")).foreach(hconf.setUsername)
c.getStringOpt("password").foreach(hconf.setPassword)
c.getPropertiesOpt("properties").foreach(hconf.setDataSourceProperties)
// Frequently used pool configuration
c.getBooleanOpt("autoCommit").foreach(hconf.setAutoCommit)
val numThreads = c.getIntOr("numThreads", 20)
hconf.setConnectionTimeout(c.getMillisecondsOr("connectionTimeout", 1000))
hconf.setIdleTimeout(c.getMillisecondsOr("idleTimeout", 600000))
hconf.setMaxLifetime(c.getMillisecondsOr("maxLifetime", 1800000))
c.getStringOpt("connectionTestQuery").foreach(hconf.setConnectionTestQuery)
c.getStringOpt("connectionInitSql").foreach(hconf.setConnectionInitSql)
hconf.setMaximumPoolSize(c.getIntOpt("maximumPoolSize").orElse(c.getIntOpt("maxConnections")).getOrElse(numThreads))
hconf.setMinimumIdle(c.getIntOpt("minimumIdle").orElse(c.getIntOpt("minConnections")).getOrElse(numThreads))
hconf.setPoolName(c.getStringOr("poolName", name))
// Infrequently used
// `initializationFailFast` is deprecated and should be replaced by
// `initializationFailTimeout`. See HikariCP docs for more information:
// https://github.com/brettwooldridge/HikariCP#infrequently-used
// But we still respect the value if it configured.
c.getBooleanOpt("initializationFailFast").foreach(hconf.setInitializationFailFast)
// The default value for `initializationFailFast` was false, which means the pool
// will not fail to start if there is a problem when connecting to the database.
// To keep this behavior, we need to set `initializationFailTimeout` to -1 as
// documented by HikariCP.
hconf.setInitializationFailTimeout(c.getMillisecondsOr("initializationFailTimeout", -1))
c.getBooleanOpt("isolateInternalQueries").foreach(hconf.setIsolateInternalQueries)
c.getBooleanOpt("allowPoolSuspension").foreach(hconf.setAllowPoolSuspension)
c.getBooleanOpt("readOnly").foreach(hconf.setReadOnly)
c.getBooleanOpt("registerMbeans").foreach(hconf.setRegisterMbeans)
c.getStringOpt("catalog").foreach(hconf.setCatalog)
c.getStringOpt("connectionInitSql").foreach(hconf.setConnectionInitSql)
c.getStringOpt("transactionIsolation")
.orElse(c.getStringOpt("isolation"))
.map("TRANSACTION_" + _)
.foreach(hconf.setTransactionIsolation)
hconf.setValidationTimeout(c.getMillisecondsOr("validationTimeout", 1000))
hconf.setLeakDetectionThreshold(c.getMillisecondsOr("leakDetectionThreshold", 0))
c.getStringOpt("schema").foreach(hconf.setSchema)
val ds = new HikariDataSource(hconf)
new HikariCPJdbcDataSource(ds, hconf)
}
}
|
kwark/slick
|
slick-hikaricp/src/main/scala/slick/jdbc/hikaricp/HikariCPJdbcDataSource.scala
|
Scala
|
bsd-2-clause
| 4,373 |
package org.vertx.scala.core
import org.vertx.java.core.{ SSLSupport => JSSLSupport }
import org.vertx.scala.{Self, AsJava}
trait SSLSupport extends Self
with AsJava {
override type J <: JSSLSupport[_]
/**
* If `ssl` is `true`, this signifies that any connections will be SSL connections.
* @return A reference to this, so multiple invocations can be chained together.
*/
def setSSL(ssl: Boolean): this.type = wrap(asJava.setSSL(ssl))
/**
*
* @return Is SSL enabled?
*/
def isSSL: Boolean = asJava.isSSL
/**
* Set the path to the SSL key store. This method should only be used in SSL
* mode, i.e. after [[org.vertx.scala.core.SSLSupport.setSSL(boolean)]] has been set to `true`.<p>
* The SSL key store is a standard Java Key Store, and will contain the client certificate. Client certificates are
* only required if the server requests client authentication.<p>
* @return A reference to this, so multiple invocations can be chained together.
*/
def setKeyStorePath(path: String): this.type = wrap(asJava.setKeyStorePath(path))
/**
*
* @return Get the key store path
*/
def getKeyStorePath: String = asJava.getKeyStorePath
/**
* Set the password for the SSL key store. This method should only be used in SSL mode, i.e. after
* [[org.vertx.scala.core.SSLSupport.setSSL(boolean)]] has been set to `true`.<p>
* @return A reference to this, so multiple invocations can be chained together.
*/
def setKeyStorePassword(pwd: String): this.type = wrap(asJava.setKeyStorePassword(pwd))
/**
*
* @return Get the key store password
*/
def getKeyStorePassword: String = asJava.getKeyStorePassword
/**
* Set the path to the SSL trust store. This method should only be used in SSL mode, i.e. after
* [[org.vertx.scala.core.SSLSupport.setSSL(boolean)]] has been set to `true`.<p>
* The trust store is a standard Java Key Store, and should contain the certificates of any servers that the client trusts.
* @return A reference to this, so multiple invocations can be chained together.
*/
def setTrustStorePath(path: String): this.type = wrap(asJava.setTrustStorePath(path))
/**
*
* @return Get the trust store path
*/
def getTrustStorePath: String = asJava.getTrustStorePath
/**
* Set the password for the SSL trust store. This method should only be used in SSL mode, i.e. after
* [[org.vertx.scala.core.SSLSupport.setSSL(boolean)]] has been set to `true`.<p>
* @return A reference to this, so multiple invocations can be chained together.
*/
def setTrustStorePassword(pwd: String): this.type = wrap(asJava.setTrustStorePassword(pwd))
/**
*
* @return Get trust store password
*/
def getTrustStorePassword: String = asJava.getTrustStorePassword
}
|
vert-x/mod-lang-scala
|
src/main/scala/org/vertx/scala/core/SSLSupport.scala
|
Scala
|
apache-2.0
| 2,794 |
package nb.bar.management.model
import scala.scalajs.js
import scala.scalajs.js.annotation.ScalaJSDefined
/**
* Created by Nadie on 29/05/16.
*/
@ScalaJSDefined
trait Product extends js.Object{
val name: String;
val price: Float;
val productId: Int;
// def getName(): String;
// def getPrice(): Float;
// def getId(): Long;
}
|
gdabi/SamoApp
|
src/main/scala/nb/bar/management/model/Product.scala
|
Scala
|
gpl-3.0
| 364 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.infinity.server
import scala.concurrent.duration._
import org.scalatest.Assertions
import _root_.unfiltered.filter.Planify
import _root_.unfiltered.jetty.Http
import _root_.unfiltered.request._
import _root_.unfiltered.response._
import es.tid.cosmos.common.BearerToken
import es.tid.cosmos.infinity.common.credentials.{ClusterCredentials, Credentials, UserCredentials}
import es.tid.cosmos.infinity.common.permissions.UserProfile
class MockCosmosApi(port: Int) extends Assertions {
private var infinitySecret: String = "default"
private var groupMapping: Map[String, String] = Map.empty
private var profileMapping: Map[(String, String), String] = Map.empty
private var redirectTo: Option[String] = None
private var processingDelay: FiniteDuration = 0.seconds
private var unavailable: Boolean = false
def givenInfinitySecret(secret: String): Unit = {
infinitySecret = secret
}
def givenProfile(credentials: Credentials, profile: UserProfile): Unit = {
profileMapping += profileKey(credentials) -> formatProfile(profile)
}
def givenMalformedProfile(credentials: Credentials): Unit = {
profileMapping += profileKey(credentials) -> "malformed profile"
}
private def profileKey(credentials: Credentials) = credentials match {
case UserCredentials(key, secret) => (key, secret)
case ClusterCredentials(secret) => ("cluster", secret)
}
def givenUserGroups(handle: String, groups: Seq[String]): Unit = {
val body = groups.mkString("{ \\"groups\\": [\\"", "\\", \\"", "\\"]}")
groupMapping += handle -> body
}
def givenTemporarilyUnavailableService(): Unit = {
unavailable = true
}
def givenMalformedGroupMapping(handle: String): Unit = {
groupMapping += handle -> "]malformed {response["
}
def givenRedirectionResponse(target: String): Unit = {
redirectTo = Some(target)
}
def givenResponseIsDelayed(delay: FiniteDuration): Unit = {
processingDelay = delay
}
def withServer[T](block: => T): T = {
val server = defineServer()
try {
server.start()
block
} finally {
server.stop().join()
}
}
private def defineServer() = {
object Handle extends Params.Extract("handle", Params.first)
object ApiKey extends Params.Extract("apiKey", Params.first)
object ApiSecret extends Params.Extract("apiSecret", Params.first)
object ClusterSecret extends Params.Extract("clusterSecret", Params.first)
Http.local(port).filter(Planify {
case req @ GET(Authorization(auth) & Path("/infinity/v1/auth")) => action(auth) {
val credentialsOpt = req match {
case Params(ApiKey(key) & ApiSecret(secret)) => Some(key -> secret)
case Params(ClusterSecret(secret)) => Some("cluster" -> secret)
case _ => None
}
if (credentialsOpt.isEmpty) Unauthorized
else (for {
credentials <- credentialsOpt
profile <- profileMapping.get(credentials)
} yield {
Ok ~> ResponseString(profile)
}).getOrElse(BadRequest)
}
case req @ GET(Authorization(auth) & Path("/infinity/v1/groups") & Params(Handle(handle))) =>
action(auth) {
groupMapping.get(handle) match {
case None => BadRequest ~> ResponseString("Handle not found")
case Some(response) => Ok ~> ResponseString(response)
}
}
case req => fail(s"Unexpected request $req")
})
}
private def action[T](authHeader: String)(block: => ResponseFunction[T]): ResponseFunction[T] =
if (unavailable) ServiceUnavailable
else authHeader match {
case BearerToken(token) if token == infinitySecret =>
if (redirectTo.isDefined) Redirect(redirectTo.get)
else {
Thread.sleep(processingDelay.toMillis)
block
}
case _ => Forbidden
}
private def formatProfile(profile: UserProfile): String = {
val originsField = profile.accessFrom.fold("") { origins =>
""" "origins":""" + origins.mkString("[\\"", "\\", \\"", "\\"],")
}
s"""
|{
| "user": "${profile.username}",
| "groups": ${profile.groups.mkString("[\\"", "\\", \\"", "\\"],")},
| $originsField
| "accessFromSharedCluster": ${profile.accessFromSharedCluster}
|}
""".stripMargin
}
}
|
telefonicaid/fiware-cosmos-platform
|
infinity/server/src/test/scala/es/tid/cosmos/infinity/server/MockCosmosApi.scala
|
Scala
|
apache-2.0
| 4,982 |
package io.estatico.effect.laws.imports
import org.scalacheck.Prop
trait LawInstances extends LawTypes {
implicit def toIsEqArrow[A](a: A): IsEqArrow[A] = new IsEqArrow(a)
implicit def lawsIsEqToProp[A : Eq](isEq: IsEq[A]): Prop = cats.laws.discipline.catsLawsIsEqToProp(isEq)
}
/** Re-export of cats' IsEqArrow */
final class IsEqArrow[A](val repr: A) extends AnyVal {
def <->(rhs: A): cats.laws.IsEq[A] = cats.laws.IsEqArrow(repr) <-> rhs
}
|
estatico/scala-effect
|
laws/src/main/scala/io/estatico/effect/laws/imports/LawInstances.scala
|
Scala
|
apache-2.0
| 454 |
package mimir.sql;
import java.sql.SQLException
import java.util
import mimir.Database
import mimir.algebra._
import mimir.provenance._
import mimir.optimizer.operator.{InlineProjections, PushdownSelections}
import mimir.util.SqlUtils
import com.typesafe.scalalogging.LazyLogging
import sparsity.statement.Statement
import sparsity.Name
import scala.collection.JavaConversions._
import sparsity.select.SelectBody
sealed abstract class TargetClause
// case class AnnotateTarget(invisSch:Seq[(ProjectArg, (String,Type), String)]) extends TargetClause
case class ProjectTarget(cols:Seq[ProjectArg]) extends TargetClause
case class AggregateTarget(gbCols:Seq[Var], aggCols:Seq[AggFunction]) extends TargetClause
case class AllTarget() extends TargetClause
/**
* Utility methods for converting from RA Operators back into JSqlParser's Select objects
*/
class RAToSql(db: Database)
extends LazyLogging
{
/**
* An optimizing rewrite to align the expected and real schemas of table operators
*
* RA Table operators are allowed to define their own naming conventions. This
* forces us into an ugly hack where we need to wrap each table access in a nested
* select. These naming rewrites can sometimes be pulled out into the parent object
* by wrapping the table operator in a project that handles the renaming. This rewrite
* does so.
*/
def standardizeTables(oper: Operator): Operator =
{
oper match {
case Table(name, source, tgtSch, tgtMetadata) => {
val realSch = db.catalog.tableSchema(source, name) match {
case Some(realSch) => realSch
case None => throw new SQLException(s"Unknown Table '$source.$name'");
}
val schMap = tgtSch.map(_._1).zip(realSch.map(_._1)).map (
{ case (tgt, real) => ProjectArg(tgt, Var(real)) }
)
val metadata = tgtMetadata.map( {
case (out, Var(in), t) => ((in, Var(in), t), ProjectArg(out, Var(in)))
case (out, RowIdVar(), t) => ((ID("ROWID"), RowIdVar(), t), ProjectArg(out, Var(ID("ROWID"))))
case (o, i, t) => throw new SQLException(s"Unsupported Metadata: $o <- $i:$t")
})
Project(
schMap ++ metadata.map(_._2),
Table(name, source, realSch, metadata.map(_._1))
)
}
case _ => oper.rebuild(oper.children.map(standardizeTables(_)))
}
}
def apply(oper: Operator) = convert(oper)
def apply(expr: Expression) = convert(expr)
def apply(expr: Expression, sources: Seq[(Name,Seq[Name])]) = convert(expr, sources)
/**
* [use case] Convert an operator tree into JSQLParser's SelectBody type.
*/
def convert(oper: Operator): SelectBody =
{
// The actual recursive conversion is factored out into a separate fn
// so that we can do a little preprocessing.
logger.debug(s"PRE-CONVERT: $oper")
// standardizeTables adds a new layer of projections that we may be
// able to optimize away.
val optimized =
InlineProjections(PushdownSelections(oper))
// println("OPTIMIZED: "+optimized)
// and then actually do the conversion
makeSelect(optimized)
}
def replaceUnion(
target:SelectBody,
union: (sparsity.select.Union.Type, SelectBody)
): SelectBody =
{
SelectBody(
distinct = target.distinct,
target = target.target,
from = target.from,
where = target.where,
groupBy = target.groupBy,
having = target.having,
orderBy = target.orderBy,
limit = target.limit,
offset = target.offset,
union = Some(union)
)
}
def assembleUnion(first: SelectBody, rest: Seq[SelectBody]): SelectBody =
{
if(rest.isEmpty){ return first }
first.union match {
case Some((t, subq)) =>
replaceUnion(subq, (t, assembleUnion(subq, rest)))
case None =>
replaceUnion(first, (sparsity.select.Union.All, assembleUnion(rest.head, rest.tail)))
}
}
/**
* Step 1: Strip UNIONs off the top of the operator stack
*
* These get converted to JSqlParser UNIONs. Both branches invoke step 2
*/
def makeSelect(oper:Operator): SelectBody =
{
logger.trace(s"makeSelect: \\n$oper")
val unionClauses = OperatorUtils.extractUnionClauses(oper)
val unionSelects = unionClauses.map { makeSimpleSelect(_) }
assembleUnion(unionSelects.head, unionSelects.tail)
}
/**
* Step 2: Unwrap an operator stack into a PlainSelect
*
* The optimal case here is when operators are organized into
* the form:
* Limit(Sort(Project/Aggregate(Select(Join(...)))))
* If these are out of order, we'll need to wrap them in nested
* selects... but that's ok. We pick off as many of them as we
* can and then stick them into a plain select
*
* Note that operators are unwrapped outside in, so they need to be
* applied in reverse order of how they are evaluated.
*/
private def makeSimpleSelect(oper:Operator): SelectBody =
{
var head = oper
logger.debug("Assembling Plain Select:\\n"+oper)
// Limit clause is the final processing step, so we handle
// it first.
val (limitClause, offsetClause):(Option[Long], Option[Long]) =
head match {
case Limit(offset, limit, src) => {
logger.debug("Assembling Plain Select: Including a LIMIT")
////// Remember to strip the limit operator off //////
head = src
(limit, if(offset > 0) { Some(offset) } else { None })
}
case _ => (None, None)
}
// Sort comes after limit, but before Project/Select.
// We need to get the per-source column bindings before actually adding the
// clause, so save the clause until we have those.
//
// There's also the oddity that ORDER BY behaves differently for aggregate
// and non-aggregate queries. That is, for aggregate queries, ORDER BY uses
// the schema defined in the target clause. For non-aggregate queries,
// ORDER BY is *supposed* to use the schema of the source columns (although
// some databases allow you to use the target column names too). Because this
// means a different ordering Sort(Aggregate(...)) vs Project(Sort(...)), we
// simply assume that the projection can be inlined.
val sortColumns: Seq[SortColumn] =
head match {
case Sort(cols, src) => {
head = src;
logger.debug("Assembling Plain Select: Will include a SORT: "+cols);
cols
}
case _ => Seq()
}
// Project/Aggregate is next... Don't actually convert them yet, but
// pull them off the stack and save the arguments
//
// We also save a set of bindings to rewrite the Sort clause if needed
//
val (preRenderTarget:TargetClause, sortBindings:Map[ID,Expression]) =
head match {
case p@Project(cols, src) => {
logger.debug("Assembling Plain Select: Target is a flat projection")
head = src;
(ProjectTarget(cols), p.bindings)
}
case Aggregate(gbCols, aggCols, src) => {
logger.debug("Assembling Plain Select: Target is an aggregation")
head = src;
(AggregateTarget(gbCols, aggCols), Map())
}
case _ => {
logger.debug("Assembling Plain Select: Target involves no computation")
(AllTarget(), Map())
}
}
// Strip off the sources, select condition(s) and so forth
val (condition, froms) = extractSelectsAndJoins(head)
// Extract the synthesized table names
val schemas: Seq[(Name, Seq[Name])] =
froms.flatMap { SqlUtils.getSchemas(_, db) }
// Sanity check...
val extractedSchema = schemas.flatMap(_._2).toSet
val expectedSchema:Set[Name] = preRenderTarget match {
//case AnnotateTarget(invisScm) => head.columnNames.union(invisScm.map(invisCol => ExpressionUtils.getColumns(invisCol._1.expression))).toSet
case ProjectTarget(cols) =>
cols.flatMap { col => ExpressionUtils.getColumns(col.expression) }
.map { col => col.quoted }
.toSet
case AggregateTarget(gbCols, aggCols) =>
gbCols.map { col => col.name.quoted }
.toSet ++
aggCols.flatMap { agg => agg.args
.flatMap { arg => ExpressionUtils.getColumns(arg) } }
.map { col => col.quoted }
.toSet
case AllTarget() => head.columnNames
.map { col => col.quoted }
.toSet
}
if(!(expectedSchema -- extractedSchema).isEmpty){
throw new SQLException(s"Error Extracting Joins!\\nExpected: $expectedSchema\\nGot: $extractedSchema\\nMissing: ${expectedSchema -- extractedSchema}\\n$head\\n${froms.mkString("\\n")}")
}
// Add the WHERE clause if needed
val whereClause:Option[sparsity.expression.Expression] = condition match {
case BoolPrimitive(true) => None
case _ => {
logger.debug(s"Assembling Plain Select: Target has a WHERE ($condition)")
Some(convert(condition, schemas))
}
}
// Apply the ORDER BY clause if we found one earlier
// Remember that the clause may have been further transformed if we hit a
// projection instead of an aggregation.
val sortOrder = sortColumns.map { col =>
sparsity.select.OrderBy(
convert(Eval.inline(col.expression, sortBindings), schemas),
col.ascending
)
}
logger.debug(s"Assembling Plain Select: ORDER BY: "+sortOrder)
// Finally, generate the target clause
val (target, groupBy): (
Seq[sparsity.select.SelectTarget],
Option[Seq[sparsity.expression.Expression]]
) = preRenderTarget match {
case ProjectTarget(cols) => {
(
cols.map { col =>
sparsity.select.SelectExpression(
convert(col.expression, schemas),
Some(Name(col.name.id, true))
)
},
None
)
}
case AggregateTarget(gbCols, aggCols) => {
val gbConverted = gbCols.map { convert(_, schemas) }
val gbTargets = gbConverted.map { sparsity.select.SelectExpression(_) }
val aggTargets = aggCols.map( agg => {
sparsity.select.SelectExpression(
sparsity.expression.Function(
Name(agg.function.id, true),
( if(agg.function.id == "count") { None }
else { Some(agg.args.map { convert(_, schemas) }) }
),
agg.distinct
),
Some(Name(agg.alias.id, true))
)
})
(
(gbTargets ++ aggTargets),
(if(gbConverted.isEmpty) { None } else { Some(gbConverted) })
)
}
case AllTarget() => {
(
Seq(sparsity.select.SelectAll()),
None
)
}
}
return sparsity.select.SelectBody(
target = target,
limit = limitClause,
offset = offsetClause,
where = whereClause,
orderBy = sortOrder,
from = froms,
groupBy = groupBy
)
}
/**
* Step 3: Build a FromItem Tree
*
* Selects, Joins, Tables, etc.. can be stacked into an odd tree
* structure. This method simultaneously pulls up Selects, while
* converting Joins, Tables, etc... into the corresponding
* JSqlParser FromItem tree.
*
* If we get something that doesn't map to a FromItem, the conversion
* punts back up to step 1.
*/
private def extractSelectsAndJoins(oper: Operator):
(Expression, Seq[sparsity.select.FromElement]) =
{
oper match {
case Select(cond, source) =>
val (childCond, froms) =
extractSelectsAndJoins(source)
(
ExpressionUtils.makeAnd(cond, childCond),
froms
)
/*case Annotate(subj,invisScm) => {
/*subj match {
case Table(name, alias, sch, metadata) => {
extractSelectsAndJoins(Table(name, alias, sch, metadata.union(invisScm.map(f => (f._2._1, f._1.expression, f._2._2)))))
}
case _ => extractSelectsAndJoins(subj)
}*/
extractSelectsAndJoins(Project(invisScm.map( _._1), subj))
}*/
case Join(lhs, rhs) =>
val (lhsCond, lhsFroms) = extractSelectsAndJoins(lhs)
val (rhsCond, rhsFroms) = extractSelectsAndJoins(rhs)
(
ExpressionUtils.makeAnd(lhsCond, rhsCond),
lhsFroms ++ rhsFroms
)
case LeftOuterJoin(lhs, rhs, cond) =>
val lhsFrom = makeSubSelect(lhs)
val rhsFrom = makeSubSelect(rhs)
val schemas = SqlUtils.getSchemas(lhsFrom, db)++
SqlUtils.getSchemas(rhsFrom, db)
val condition = convert(cond, schemas)
val joinItem =
sparsity.select.FromJoin(
lhsFrom,
rhsFrom,
t = sparsity.select.Join.LeftOuter,
on = condition
)
(
BoolPrimitive(true),
Seq(joinItem)
)
case Table(name, source, tgtSch, metadata) =>
val realSch = db.catalog.tableSchema(source, name) match {
case Some(realSch) => realSch
case None => throw new SQLException(s"Unknown Table `$source`.`$name`");
}
// Since Mimir's RA tree structure has no real notion of aliasing,
// it's only really safe to inline tables directly into a query
// when tgtSch == realSch. Eventually, we should add some sort of
// rewrite that tacks on aliasing metadata... but for now let's see
// how much milage we can get out of this simple check.
if(realSch.map(_._1). // Take names from the real schema
zip(tgtSch.map(_._1)). // Align with names from the target schema
forall( { case (real,tgt) => real.equals(tgt) } )
// Ensure that both are equivalent.
&& metadata.forall { // And make sure only standardized metadata are preserved
case (ID("ROWID"), RowIdVar(), _) => true
case _ => false
}
){
// If they are equivalent, then...
(
BoolPrimitive(true),
Seq(new sparsity.select.FromTable(None, Name(name.id, true), None))
)
} else {
// If they're not equivalent, revert to old behavior
(
BoolPrimitive(true),
Seq(makeSubSelect(standardizeTables(oper)))
)
}
case HardTable(schema,data) => {
val unionChain = data.foldRight(None:Option[sparsity.select.SelectBody]) {
case (row, nextSelectBody) =>
Some(
SelectBody(
target = schema.zip(row).map {
case ((col, _), v) =>
sparsity.select.SelectExpression(convert(v), Some(col.quoted))
},
union = nextSelectBody.map { (sparsity.select.Union.All, _) }
)
)
}
val query = unionChain match {
case Some(query) => query
case None =>
SelectBody(
target = schema.map { case (col, _) =>
sparsity.select.SelectExpression(sparsity.expression.NullPrimitive(), Some(col.quoted))
},
where = Some(sparsity.expression.Comparison(
sparsity.expression.LongPrimitive(1),
sparsity.expression.Comparison.Neq,
sparsity.expression.LongPrimitive(1)
))
)
}
// might need to do something to play nice with oracle here like setFromItem(new Table(null, "dual"))
(BoolPrimitive(true), Seq(sparsity.select.FromSelect(query, Name("SINGLETON"))))
}
case View(name, query, annotations) =>
logger.warn("Inlined view when constructing SQL: RAToSQL will not use materialized views")
extractSelectsAndJoins(query)
case AdaptiveView(schema, name, query, annotations) =>
logger.warn("Inlined view when constructing SQL: RAToSQL will not use materialized views")
extractSelectsAndJoins(query)
case _ => (BoolPrimitive(true), Seq(makeSubSelect(oper)))
}
}
/**
* Punt an Operator conversion back to step 1 and make a SubSelect
*
* If Step 3 hits something it can't convert directly to a FromItem,
* we restart the conversion process by going back to step 1 to wrap
* the operator in a nested Select.
*
* The nested select (SubSelect) needs to be assigned an alias, which
* we assign using the (guaranteed to be unique) first element of the
* schema.
*/
def makeSubSelect(oper: Operator) =
sparsity.select.FromSelect(
makeSelect(oper),
Name("SUBQ_"+oper.columnNames.head, true)
)
/**
* Make sure that the schemas of union elements follow the same order
*/
private def alignUnionOrders(clauses: Seq[Operator]): Seq[Operator] =
{
val targetSchema = clauses.head.columnNames
clauses.map { clause =>
if(clause.columnNames.equals(targetSchema)){
clause
} else {
Project(
targetSchema.map( col => ProjectArg(col, Var(col)) ),
clause
)
}
}
}
def convert(e: Expression): sparsity.expression.Expression = {
convert(e, Seq())
}
def convert(
e: Expression,
sources: Seq[(Name,Seq[Name])]
): sparsity.expression.Expression = {
e match {
case IntPrimitive(v) => sparsity.expression.LongPrimitive(v)
case StringPrimitive(v) => sparsity.expression.StringPrimitive(v)
case FloatPrimitive(v) => sparsity.expression.DoublePrimitive(v)
case RowIdPrimitive(v) => sparsity.expression.StringPrimitive(v)
case TypePrimitive(t) => sparsity.expression.StringPrimitive(t.toString())
case BoolPrimitive(true) =>
sparsity.expression.Comparison(
sparsity.expression.LongPrimitive(1),
sparsity.expression.Comparison.Eq,
sparsity.expression.LongPrimitive(1)
)
case BoolPrimitive(false) =>
sparsity.expression.Comparison(
sparsity.expression.LongPrimitive(1),
sparsity.expression.Comparison.Neq,
sparsity.expression.LongPrimitive(1)
)
case NullPrimitive() => sparsity.expression.NullPrimitive()
case DatePrimitive(y,m,d) =>
sparsity.expression.Function(Name("DATE"),
Some(Seq(sparsity.expression.StringPrimitive("%04d-%02d-%02d".format(y, m, d))))
)
case Comparison(op, l, r) =>
sparsity.expression.Comparison(convert(l, sources), op, convert(r, sources))
case Arithmetic(op, l, r) =>
sparsity.expression.Arithmetic(convert(l, sources), op, convert(r, sources))
case Var(n) =>
convertColumn(n, sources)
case JDBCVar(t) =>
sparsity.expression.JDBCVar()
case Conditional(_, _, _) => {
val (whenClauses, elseClause) = ExpressionUtils.foldConditionalsToCase(e)
sparsity.expression.CaseWhenElse(
None,
whenClauses.map { case (when, then) => (
convert(when, sources),
convert(then, sources)
)},
convert(elseClause, sources)
)
}
case IsNullExpression(subexp) =>
sparsity.expression.IsNull(convert(subexp, sources))
case Not(subexp) =>
sparsity.expression.Not(convert(subexp, sources))
case mimir.algebra.Function(ID("cast"), Seq(body_arg, TypePrimitive(t))) =>
sparsity.expression.Cast(
convert(body_arg, sources),
Name(t.toString, true)
)
case mimir.algebra.Function(ID("cast"), _) =>
throw new SQLException("Invalid Cast: "+e)
case mimir.algebra.Function(fname, fargs) =>
sparsity.expression.Function(
fname.quoted,
Some(fargs.map { convert(_, sources) })
)
}
}
private def convertColumn(
n:ID,
sources: Seq[(Name,Seq[Name])]
): sparsity.expression.Column =
{
val src = sources.find {
case (_, vars) => vars.exists { n.equals(_) }
}
if(src.isEmpty)
throw new SQLException("Could not find appropriate source for '"+n+"' in "+sources)
sparsity.expression.Column(Name(n.id, true), Some(src.get._1))
}
}
|
UBOdin/mimir
|
src/main/scala/mimir/sql/RAToSql.scala
|
Scala
|
apache-2.0
| 20,607 |
package dsmoq.controllers
import org.scalatra.ActionResult
import org.scalatra.NotFound
import org.scalatra.Ok
import org.scalatra.ScalatraServlet
/**
* /にマッピングされるサーブレットクラス
* Viewのファイルへのルーティングを提供する。
*/
class ResourceController extends ScalatraServlet {
/**
* 取り扱う拡張子
*/
object Ext {
val Js = """.*\\.js$""".r
val SourceMap = """.*\\.map$""".r
val Json = """.*\\.json$""".r
val Css = """.*\\.css$""".r
val Html = """.*\\.html$""".r
val Woff = """.*\\.woff""".r
val Ttf = """.*\\.ttf""".r
val Otf = """.*\\.otf""".r
val Eot = """.*\\.eot""".r
val Jpeg = """.*\\.jpe?g$""".r
val Png = """.*\\.png$""".r
val Gif = """.*\\.gif$""".r
val Zip = """.*\\.zip""".r
val Txt = """.*\\.txt""".r
val Csv = """.*\\.csv""".r
}
/**
* ファイル名をファイルに変換する。
*
* @param filename ファイル名
* @return 実ファイルオブジェクト
*/
def resource(filename: String): java.io.File = {
new java.io.File(
"../client/www/" + filename
// servletContext.getResource("filename").getFile
)
}
get("/*") {
contentType = "text/html"
resource("index.html")
}
get("/resources/(.*)$".r) {
returnResource(params("captures"))
}
post("/resources/(.*)$".r) {
returnResource(params("captures"))
}
delete("/resources/(.*)$".r) {
returnResource(params("captures"))
}
put("/resources/(.*)$".r) {
returnResource(params("captures"))
}
/**
* ファイル名をActionResultに変換する。
*
* @param filename ファイル名
* @return
* Ok(File) ファイル名からContent-Typeが決定できる場合、実ファイルオブジェクト
* NotFound ファイル名からContent-Typeが決定できない場合
*/
def returnResource(filename: String): ActionResult = {
contextTypeForName(filename) match {
case None => NotFound()
case Some(x) => {
contentType = x
Ok(resource(filename))
}
}
}
/**
* ファイル名からContent-Typeを取得する。
*
* @param filename ファイル名
* @return Content-Typeの文字列
*/
def contextTypeForName(filename: String): Option[String] = {
filename match {
case Ext.Js() => Some("application/javascript")
case Ext.SourceMap() => Some("application/json")
case Ext.Json() => Some("application/json")
case Ext.Css() => Some("text/css")
case Ext.Html() => Some("text/html")
case Ext.Ttf() => Some("application/x-font-ttf")
case Ext.Otf() => Some("application/x-font-opentype")
case Ext.Woff() => Some("application/font-woff")
case Ext.Eot() => Some("application/vnd.ms-fontobject")
case Ext.Jpeg() => Some("image/jpeg")
case Ext.Png() => Some("image/png")
case Ext.Gif() => Some("image/gif")
case Ext.Zip() => Some("application/zip")
case Ext.Txt() => Some("text/plain")
case Ext.Csv() => Some("text/csv")
case _ => None
}
}
}
|
nkawa/dsmoq
|
server/apiServer/src/main/scala/dsmoq/controllers/ResourceController.scala
|
Scala
|
apache-2.0
| 3,106 |
/* Copyright (c) 2015 Andrée Ekroth.
* Distributed under the MIT License (MIT).
* See accompanying file LICENSE or copy at
* http://opensource.org/licenses/MIT
*/
package com.github.ekroth
package spotify
/** Commands corresponding to the Spotify Web API v1. */
trait Commands {
self: Caching with Extensions =>
import scala.collection.immutable.Seq
import scala.concurrent.{ ExecutionContext, Future }
import scalaz._
import Scalaz._
import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.{ Authorization, OAuth2BearerToken, RawHeader }
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.{ HttpRequest, Uri }
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model.HttpMethods.{ GET, POST }
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import spray.json._
import DefaultJsonProtocol._
import errorhandling._
private[spotify] val accountsBaseUri = Uri.Empty.withScheme(Uri.httpScheme(true)).withHost("accounts.spotify.com")
private[spotify] val baseUri = Uri.Empty.withScheme(Uri.httpScheme(true)).withHost("api.spotify.com").withPath(Path / "v1")
private[spotify] val tokenUri = accountsBaseUri.withPath(Path / "api" / "token")
private[spotify] val meUri = baseUri.withPath(Path / "me")
val spotifyMaxOffset = Int.MaxValue
val spotifyMaxLimit = 50
/** Use tagged Strings in order to force the user to use the provided Scope-values.
* The trait is private, and therefore the user may only use the values defined in
* the `Scope` object. Imagine, this is a purely compile-time requirement, and the
* runtime overhead is zero. This is kind of cool. */
private[spotify] trait ScopeTag
private[spotify] type Scope = String @@ ScopeTag
/** Scopes for user access. */
object Scope {
private[spotify] def apply(s: String): Scope = Tag.of[ScopeTag](s)
private[spotify] def unwrap(s: Scope): String = Tag.unwrap(s)
val playlistModifyPrivate: Scope = Scope("playlist-modify-private")
val playlistModifyPublic: Scope = Scope("playlist-modify-public")
val playlistReadPrivate: Scope = Scope("playlist-read-private")
val streaming: Scope = Scope("streaming")
val userFollowModify: Scope = Scope("user-follow-modify")
val userFollowRead: Scope = Scope("user-follow-read")
val userLibraryModify: Scope = Scope("user-library-modify")
val userLibraryRead: Scope = Scope("user-library-read")
val userReadBirthdate: Scope = Scope("user-read-birthdate")
val userReadEmail: Scope = Scope("user-read-email")
val userReadPrivate: Scope = Scope("user-read-private")
val all: Seq[Scope] = Seq(
playlistModifyPrivate,
playlistModifyPublic,
playlistReadPrivate,
streaming,
userFollowModify,
userFollowRead,
userLibraryModify,
userLibraryRead,
userReadBirthdate,
userReadEmail,
userReadPrivate
)
}
/** Create a redirect URL.
*
* The server generates a state variable, selects some scopes and
* redirects the user to this URL. The user is then redirected back
* to the redirect URL set in `spotify.Credentials`.
*
* @param state Optional state variable.
* @param scopes Scopes.
*/
def redirectUri(state: Option[String], scopes: Scope*)(implicit srv: Credentials): Uri = {
val base = accountsBaseUri.withPath(Path / "authorize").withQuery(
("response_type", "code"),
("client_id", srv.clientId),
("redirect_uri", srv.redirectUri))
val withState = state.toList.map(s => ("state", s))
val withScopes = scopes.toList.map(s => ("scope", Scope.unwrap(s)))
base.withQuery((withState ++ withScopes): _*)
}
private[spotify] def get[T : JsonFormat](uri: Uri, token: Token, inner: Option[String])
(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext): ResultF[T] = Result.async {
(for {
resp <- Http().singleRequest(HttpRequest(
GET,
uri = uri,
headers = Seq(Authorization(OAuth2BearerToken(token.accessToken)))))
jsonResp <- Unmarshal(resp.entity).to[JsValue]
} yield {
val js = inner.map { x =>
val JsObject(fields) = jsonResp.asJsObject
fields(x)
}.getOrElse(jsonResp)
js.convertTo[T].right
}).recover {
case x: Exception => SpotifyError.Thrown(x).left
case x => SpotifyError.Unknown(s"During `get`: $x").left
}
}
/** Get the current user's private profile. */
def currentUserProfile(user: UserAuth)(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext): ResultF[UserPrivate] =
get[UserPrivate](meUri, user, None)
/** Get the current user's liked tracks. */
def currentUserTracks(user: UserAuth, limit: Int = spotifyMaxLimit)
(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext): ResultF[Pager[SavedTrack]] = {
requireBounds(1, limit, spotifyMaxLimit, "limit")
get[Paging[SavedTrack]](
meUri.withPath(Path / "tracks").withQuery(("limit", limit.toString)),
user,
None).map(_.withExt())
}
def currentUserFollowedArtists(user: UserAuth, limit: Int = spotifyMaxLimit)
(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext): ResultF[Pager[ArtistFull]] = {
requireBounds(1, limit, spotifyMaxLimit, "limit")
get[Paging[ArtistFull]](
meUri.withPath(Path / "following").withQuery(("type", "artist"), ("limit", limit.toString)),
user,
Some("artists")).map(_.withExt(Some("artists")))
}
def currentUserIsFollowing(user: UserAuth, ids: Seq[String])
(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext): ResultF[Seq[(String, Boolean)]] = {
requireBounds(1, ids.size, spotifyMaxLimit, "ids")
get[Seq[Boolean]](
meUri.withPath(Path / "following" / "contains").withQuery(("type", "artist"), ("ids", ids.mkString(","))),
user,
None).map(x => ids.zip(x))
}
def searchArtist(client: ClientAuth, query: String, limit: Int = spotifyMaxLimit)
(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext): ResultF[Pager[ArtistFull]] = {
requireBounds(1, limit, spotifyMaxLimit, "limit")
get[Paging[ArtistFull]](
baseUri.withPath(Path / "search").withQuery(("type", "artist"), ("q", query)),
client,
Some("artists")).map(_.withExt(Some("artists")))
}
/** Refresh user token.
*
* This requires that the user has been authorized before and is
* available in the Cache. The cache is refreshed if the user
* authorization refresh is successful.
*/
def userRefresh(authCode: AuthCode)(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext, srv: Credentials): ResultF[Option[UserAuth]] = Result.async {
loadUser(authCode) match {
case Some(user) => {
(for {
post <- Http().singleRequest(HttpRequest(
POST,
uri = tokenUri,
headers = Seq(
RawHeader("grant_type", "refresh_token"),
RawHeader("refresh_token", user.refreshToken),
RawHeader("client_id", srv.clientId),
RawHeader("client_secret", srv.clientSecret))))
jsonResp <- Unmarshal(post.entity).to[JsValue]
} yield {
val JsObject(fields) = jsonResp.asJsObject
val JsString(accessToken) = fields("access_token")
val JsNumber(expiresIn) = fields("expires_in")
val refreshedUser = user.copy(accessToken = accessToken, expires = (System.currentTimeMillis / 1000 + expiresIn.toLong))
saveUser(user)
user.some.right
}).recover {
case x: Exception => SpotifyError.Thrown(x).left
case x => SpotifyError.Unknown(s"During `userRefresh`: $x").left
}
}
case None => Future.successful(None.right)
}
}
/** Refresh client token.
*
* This doesn't exists, and is equal to `clientAuth`.
*/
def clientRefresh(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext, srv: Credentials): ResultF[ClientAuth] = clientAuth
/** Authorize the client.
*
* Authorize the client and update the cache.
*/
def clientAuth(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext, srv: Credentials): ResultF[ClientAuth] = Result.async {
(for {
post <- Http().singleRequest(HttpRequest(
POST,
uri = tokenUri,
headers = Seq(
RawHeader("grant_type", "client_credentials"),
RawHeader("client_id", srv.clientId),
RawHeader("client_secret", srv.clientSecret))))
jsonResp <- Unmarshal(post.entity).to[JsValue]
} yield {
val JsObject(fields) = jsonResp.asJsObject
val JsString(accessToken) = fields("access_token")
val JsNumber(expiresIn) = fields("expires_in")
val client = ClientAuth(accessToken, expiresIn.toLong)
saveClient(client)
client.right
}).recover {
case x: Exception => SpotifyError.Thrown(x).left
case x => SpotifyError.Unknown(s"During `clientAuth`: $x").left
}
}
/* Authorize user using the authorization code.
*
* Make a POST request to the spotify "/api/token".
* The `UserAuth` is saved in the cache with key `authCode`.
*/
def userAuth(authCode: AuthCode)(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext, srv: Credentials): ResultF[UserAuth] = Result.async {
(for {
post <- Http().singleRequest(HttpRequest(
POST,
uri = tokenUri,
headers = Seq(
RawHeader("grant_type", "authorization_code"),
RawHeader("code", authCode),
RawHeader("client_id", srv.clientId),
RawHeader("client_secret", srv.clientSecret))))
jsonResp <- Unmarshal(post.entity).to[JsValue]
} yield {
val JsObject(fields) = jsonResp.asJsObject
val JsString(accessToken) = fields("access_token")
val JsNumber(expiresIn) = fields("expires_in")
val JsString(refreshToken) = fields("refresh_token")
val user = UserAuth(authCode, accessToken, expiresIn.toLong, refreshToken)
saveUser(user)
user.right
}).recover {
case x: Exception => SpotifyError.Thrown(x).left
case x => SpotifyError.Unknown(s"During `userAuth`: $x").left
}
}
/** Get client authorization and refresh if expired. This requires that the client has authorized before. */
def getClient(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext, srv: Credentials): ResultF[ClientAuth] = {
loadClient() match {
case Some(client) if client.isExpired => clientRefresh
case Some(client) => Result.okF(client)
case None => clientAuth
}
}
/* Get user and refresh as needed. This requires that the user has authorized before. */
def getUser(authCode: String)(implicit sys: ActorSystem, fm: Materializer, ec: ExecutionContext, srv: Credentials): ResultF[Option[UserAuth]] = {
loadUser(authCode) match {
case Some(user) if user.isExpired => userRefresh(authCode)
case Some(user) => Result.okF(user.some)
case None => Result.okF(None)
}
}
}
|
ekroth/play-spotify
|
src/main/scala/com/github/ekroth/spotify/Commands.scala
|
Scala
|
mit
| 11,282 |
/**
*
* Copyright (C) 2017 University of Bamberg, Software Technologies Research Group
* <https://www.uni-bamberg.de/>, <http://www.swt-bamberg.de/>
*
* This file is part of the Data Structure Investigator (DSI) project, which received financial support by the
* German Research Foundation (DFG) under grant no. LU 1748/4-1, see
* <http://www.swt-bamberg.de/dsi/>.
*
* DSI is licensed under the GNU GENERAL PUBLIC LICENSE (Version 3), see
* the LICENSE file at the project's top-level directory for details or consult <http://www.gnu.org/licenses/>.
*
* DSI is free software: you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or any later version.
*
* DSI is a RESEARCH PROTOTYPE and distributed WITHOUT ANY
* WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* The following people contributed to the conception and realization of the present DSI distribution (in
* alphabetic order by surname):
*
* - Jan H. Boockmann
* - Gerald Lüttgen
* - Thomas Rupprecht
* - David H. White
*
*/
/**
* @author DSI
*
* IDsOliEntryPointCreator.scala created on Jan 28, 2015
*
* Description: Interface for DSI's entry pointer tag creator component
*/
package entrypoint
/**
* @author DSI
*
*/
trait IDsOliEntryPointCreator {
def createEPTs() : DsOliFeatureSet
}
|
uniba-swt/DSIsrc
|
src/entrypoint/IDsOliEntryPointCreator.scala
|
Scala
|
gpl-3.0
| 1,525 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.