code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package uk.gov.digital.ho.proving.financialstatus.api.validation
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.i18n.LocaleContextHolder
import org.springframework.context.support.ResourceBundleMessageSource
import org.springframework.stereotype.Component
@Component
class ServiceMessages @Autowired()(val messageSource: ResourceBundleMessageSource) {
val INVALID_ACCOUNT_NUMBER: String = getMessage("invalid.account.number")
val INVALID_SORT_CODE: String = getMessage("invalid.sort.code")
val INVALID_MINIMUM_VALUE: String = getMessage("invalid.minimum.value")
val CONNECTION_TIMEOUT: String = getMessage("connection.timeout")
val CONNECTION_REFUSED: String = getMessage("connection.refused")
val UNKNOWN_CONNECTION_EXCEPTION: String = getMessage("unknown.connection.exception")
val INVALID_FROM_DATE: String = getMessage("invalid.from.date")
val INVALID_TO_DATE: String = getMessage("invalid.to.date")
val INVALID_DOB_DATE: String = getMessage("invalid.dob.date")
val INVALID_USER_ID: String = getMessage("invalid.user.id")
val INVALID_TUITION_FEES: String = getMessage("invalid.tuition.fees")
val INVALID_TUITION_FEES_PAID: String = getMessage("invalid.tuition.fees.paid")
val INVALID_ACCOMMODATION_FEES_PAID: String = getMessage("invalid.accommodation.fees.paid")
val INVALID_DEPENDANTS: String = getMessage("invalid.dependants.value")
val INVALID_DEPENDANTS_NOTALLOWED: String = getMessage("invalid.dependants.notallowed")
val INVALID_IN_LONDON: String = getMessage("invalid.in.london.value")
val INVALID_IN_DEPENDANTS_ONLY: String = getMessage("invalid.dependants.only.value")
val INVALID_SORT_CODE_VALUE = "000000"
val INVALID_ACCOUNT_NUMBER_VALUE = "00000000"
val INVALID_COURSE_LENGTH: String = getMessage("invalid.course.length")
val RESOURCE_NOT_FOUND: String = getMessage("resource.not.found")
val PATH_ERROR_MISSING_VALUE: String = getMessage("path.error.missing.value")
val INVALID_COURSE_START_DATE: String = getMessage("invalid.course.start.date")
val INVALID_COURSE_END_DATE: String = getMessage("invalid.course.end.date")
val INVALID_CONTINUATION_END_DATE: String = getMessage("invalid.continuation.end.date")
val INVALID_COURSE_START_DATE_VALUE: String = getMessage("invalid.course.start.date.value")
val INVALID_COURSE_END_DATE_VALUE: String = getMessage("invalid.course.end.date.value")
val INVALID_ORIGINAL_COURSE_START_DATE_VALUE: String = getMessage("invalid.original.course.start.date.value")
val INVALID_USER_PROFILE: String = getMessage("unable.to.retrieve.user.profile")
val INVALID_DATES: String = getMessage("invalid.dates")
def INVALID_STUDENT_TYPE(params: String*): String = getMessage("invalid.student.type", params)
def INVALID_COURSE_TYPE(params: String*): String = getMessage("invalid.course.type", params)
def INVALID_APPLICANT_TYPE(params: String*): String = getMessage("invalid.applicant.type", params)
def INVALID_VARIANT_TYPE(params: String*): String = getMessage("invalid.variant.type", params)
def INVALID_TIER_TYPE(params: String*): String = getMessage("invalid.tier.type", params)
def INVALID_NUM_OF_DEPENDANTS(params: String*): String = getMessage("invalid.num.of_dependants", params)
def INVALID_ARGUMENT(params: String*): String = getMessage("invalid.argument", params)
def USER_CONSENT_NOT_GIVEN(params: String*): String = getMessage("user.consent.not.given", params)
def NO_RECORDS_FOR_ACCOUNT(params: String*): String = getMessage("no.records.for.account", params)
def MISSING_PARAMETER(params: String*): String = getMessage("missing.parameter", params)
def PARAMETER_CONVERSION_ERROR(params: String*): String = getMessage("parameter.conversion.error", params)
val UNEXPECTED_ERROR: String = getMessage("unexpected.error")
val OK: String = "OK"
val REST_MISSING_PARAMETER: String = getMessage("rest.missing.parameter")
val REST_INVALID_PARAMETER_TYPE: String = getMessage("rest.invalid.parameter.type")
val REST_INVALID_PARAMETER_FORMAT: String = getMessage("rest.invalid.parameter.format")
val REST_INVALID_PARAMETER_VALUE: String = getMessage("rest.invalid.parameter.value")
val REST_INTERNAL_ERROR: String = getMessage("rest.internal.error")
val REST_API_SERVER_ERROR: String = getMessage("rest.api.server.error")
val REST_API_CLIENT_ERROR: String = getMessage("rest.api.client.error")
val REST_API_CONNECTION_ERROR: String = getMessage("rest.api.connection.error")
def getMessage(message: String): String = messageSource.getMessage(message, Nil.toArray[Object], LocaleContextHolder.getLocale)
def getMessage[T](message: String, params: Seq[T]): String = getMessage(message, params.map(_.asInstanceOf[Object]).toArray[Object])
def getMessage(message: String, params: Array[Object]): String = messageSource.getMessage(message, params, LocaleContextHolder.getLocale)
}
| UKHomeOffice/pttg-fs-api | src/main/scala/uk/gov/digital/ho/proving/financialstatus/api/validation/ServiceMessages.scala | Scala | mit | 4,903 |
package org.abhijitsarkar.moviedb
import java.net.URL
import akka.http.scaladsl.marshalling.{Marshal, Marshaller, ToEntityMarshaller}
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.{HttpEntity, HttpResponse, StatusCode}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.util.FastFuture
import akka.stream.ActorAttributes
import akka.stream.scaladsl.{Keep, Sink, Source}
import cats.data.{EitherT, OptionT}
import cats.implicits._
import org.abhijitsarkar.moviedb.ExcelMovieParser.parseMovies
import org.abhijitsarkar.moviedb.MovieProtocol._
import spray.json._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Try, Success => Successful}
/**
* @author Abhijit Sarkar
*/
trait MovieController extends MovieService {
// (StatusCode, T) can be marshaled, if a ToEntityMarshaller[T] is available
// http://doc.akka.io/docs/akka-http/current/scala/http/common/marshalling.html
private implicit val movieMarshaller: ToEntityMarshaller[Movie] = Marshaller.oneOf(
Marshaller.withFixedContentType(`application/json`) { m =>
HttpEntity(`application/json`, m.toJson.compactPrint)
})
private def persistMovie(m: Movie, successCode: StatusCode): Future[HttpResponse] = persistMovies
.runWith(Source.single(Right(m)), Sink.head)
._2.flatten.transformWith {
case Successful(i) if (i == 1) => FastFuture.successful(HttpResponse(status = successCode))
case Failure(ex) => FastFuture.successful(HttpResponse(status = InternalServerError, entity = ex.getMessage))
}
private def transformResponse(e: EitherT[Future, String, Movie]): Future[HttpResponse] = {
e.value.transform(_ match {
case Right(m) => persistMovie(m, Created)
case Left(msg) => FastFuture.successful(HttpResponse(status = InternalServerError, entity = msg))
}, identity)
}.flatten
val routes = {
logRequestResult(getClass.getSimpleName) {
pathPrefix("movies") {
path(Segment) { id =>
get {
complete {
OptionT(findMovieById(id))
.semiflatMap(m => Marshal(OK -> m).to[HttpResponse])
.getOrElse(HttpResponse(status = NotFound))
}
} ~
delete {
complete {
OptionT(deleteMovie(id))
.map[StatusCode](_ => NoContent)
.getOrElse(NotFound)
}
} ~
put {
complete {
val m = EitherT(findMovieByImdbId(id))
m
.semiflatMap(_ => findMovieById(id))
.semiflatMap {
case Some(x) => persistMovie(x, NoContent)
case _ => transformResponse(m)
}
.getOrElseF(transformResponse(m))
}
}
} ~ (post & entity(as[String])) { url =>
complete {
Try(new URL(url)) match {
case Successful(u) => {
val src = Source.fromIterator(() => parseMovies(u).iterator)
src
.via(findMovieByTitleAndYear)
.via(persistMovies)
.completionTimeout(5.minutes)
.toMat(Sink.fold(FastFuture.successful(0))((acc, elem) => (acc |@| elem).map(_ + _)))(Keep.right)
// http://doc.akka.io/docs/akka/current/scala/dispatchers.html
// http://blog.akka.io/streams/2016/07/06/threading-and-concurrency-in-akka-streams-explained
// http://doc.akka.io/docs/akka/current/scala/stream/stream-parallelism.html
.withAttributes(ActorAttributes.dispatcher("blocking-io-dispatcher"))
.run.flatten
.onComplete {
case Successful(n) => logger.info(s"Created $n movies")
case Failure(t) => logger.error(t, "Failed to create movies")
}
Accepted
}
case Failure(t) => logger.error(t, "Bad URL"); BadRequest -> "Bad URL"
}
}
}
}
}
}
}
| asarkar/akka | movie-db/src/main/scala/org/abhijitsarkar/moviedb/MovieController.scala | Scala | gpl-3.0 | 4,250 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2016 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.module.extpowermeter.rapl
import java.util.UUID
import org.powerapi.core.power._
import org.powerapi.core.target.Target
import org.powerapi.core.{ExternalPMeter, MessageBus, OSHelper}
import org.powerapi.module.extpowermeter.ExtPowerMeterChannel.{publishRAPLPowerReport, subscribeRAPLRawPowerReport, unsubscribeRAPLRawPowerReport}
import org.powerapi.module.extpowermeter.ExtPowerMeterSensor
/**
* RAPL's sensor.
*
* @author <a href="mailto:[email protected]">Maxime Colmant</a>
*/
class RAPLSensor(eventBus: MessageBus, muid: UUID, target: Target, osHelper: OSHelper, pMeter: ExternalPMeter)
extends ExtPowerMeterSensor(eventBus, muid, target,
subscribeRAPLRawPowerReport, unsubscribeRAPLRawPowerReport, publishRAPLPowerReport,
osHelper, pMeter, 0.W)
| Spirals-Team/powerapi | powerapi-core/src/main/scala/org/powerapi/module/extpowermeter/rapl/RAPLSensor.scala | Scala | agpl-3.0 | 1,679 |
package temportalist.origin.api.common.tile
import net.minecraft.nbt.NBTTagCompound
import net.minecraft.tileentity.TileEntity
import net.minecraft.util.EnumFacing
import net.minecraftforge.common.capabilities.Capability
import net.minecraftforge.items.{CapabilityItemHandler, ItemStackHandler}
/**
*
* Created by TheTemportalist on 6/7/2016.
*
* @author TheTemportalist
*/
trait ITileInventory extends TileEntity {
private val inventory = new ItemStackHandler(this.getSlots)
def getSlots: Int
final def getInventory: ItemStackHandler = this.inventory
final def serializeInventory: NBTTagCompound = {
this.inventory.serializeNBT()
}
final def deserializeInventory(nbt: NBTTagCompound): Unit ={
this.inventory.deserializeNBT(nbt)
}
override def hasCapability(capability: Capability[_], facing: EnumFacing): Boolean = {
if (capability == CapabilityItemHandler.ITEM_HANDLER_CAPABILITY) true
else super.hasCapability(capability, facing)
}
override def getCapability[T](capability: Capability[T], facing: EnumFacing): T = {
if (capability == CapabilityItemHandler.ITEM_HANDLER_CAPABILITY)
CapabilityItemHandler.ITEM_HANDLER_CAPABILITY.cast(this.inventory)
else super.getCapability(capability, facing)
}
}
| TheTemportalist/Origin | src/api/scala/temportalist/origin/api/common/tile/ITileInventory.scala | Scala | apache-2.0 | 1,246 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.anormdb
import com.twitter.zipkin.common.{Service, DependencyLink, Dependencies}
import com.twitter.algebird.Moments
import com.twitter.util.Time
import com.twitter.util.Await
import com.twitter.conversions.time._
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AnormAggregatesTest extends FunSuite {
test("store and get dependencies") {
val db = new DB(new DBConfig("sqlite-memory", new DBParams(dbName = "zipkinAggregatesTest1")))
val con = db.install()
val aggregates = new AnormAggregates(db, Some(con))
val dl1 = new DependencyLink(new Service("parent1"), new Service("child1"), Moments(18))
val dl2 = new DependencyLink(new Service("parent2"), new Service("child2"), Moments(42))
val dep1 = new Dependencies(Time.fromSeconds(1), Time.fromSeconds(2), List(dl1, dl2))
Await.result(aggregates.storeDependencies(dep1))
val agg1 = Await.result(aggregates.getDependencies(Some(dep1.startTime), Some(dep1.endTime))) // Inclusive, start to end
val agg2 = Await.result(aggregates.getDependencies(Some(Time.fromSeconds(0)), Some(Time.now))) // All time
val agg3 = Await.result(aggregates.getDependencies(Some(Time.fromSeconds(0)), None)) // 0 to +1.day
val agg4 = Await.result(aggregates.getDependencies(Some(Time.fromSeconds(0)), Some(Time.fromSeconds(1) + 1.millisecond))) // end inside the dependency
val agg5 = Await.result(aggregates.getDependencies(Some(Time.fromSeconds(1) + 1.millisecond), Some(Time.fromSeconds(2) - 1.millisecond))) // start and end inside the dependency
val agg6 = Await.result(aggregates.getDependencies(Some(Time.fromSeconds(1) + 1.millisecond), Some(Time.fromSeconds(3)))) // start inside the dependency
assert(agg1.links === dep1.links)
assert(agg2.links === dep1.links)
assert(agg3.links === dep1.links)
assert(agg4.links.isEmpty)
assert(agg5.links.isEmpty)
assert(agg6.links.isEmpty)
con.close()
}
}
| travisbrown/zipkin | zipkin-anormdb/src/test/scala/com/twitter/zipkin/storage/anormdb/AnormAggregatesTest.scala | Scala | apache-2.0 | 2,645 |
package com.mesosphere.cosmos.model.thirdparty.marathon
import com.mesosphere.cosmos.model.AppId
import com.mesosphere.universe.{ReleaseVersion, PackageDetailsVersion}
case class MarathonApp(
id: AppId,
labels: Map[String, String],
uris: List[String], /*TODO: uri type*/
cpus: Double,
mem: Double,
instances: Int,
cmd: Option[String],
container: Option[MarathonAppContainer]
) {
def packageName: Option[String] = labels.get(MarathonApp.nameLabel)
def packageReleaseVersion: Option[ReleaseVersion] = labels.get(MarathonApp.releaseLabel).map(ReleaseVersion)
def packageVersion: Option[PackageDetailsVersion] = labels.get(MarathonApp.versionLabel).map(PackageDetailsVersion)
def packageRepository: Option[String] = labels.get(MarathonApp.repositoryLabel)
}
object MarathonApp {
val frameworkNameLabel = "DCOS_PACKAGE_FRAMEWORK_NAME"
val isFrameworkLabel = "DCOS_PACKAGE_IS_FRAMEWORK"
val metadataLabel = "DCOS_PACKAGE_METADATA"
val nameLabel = "DCOS_PACKAGE_NAME"
val registryVersionLabel = "DCOS_PACKAGE_REGISTRY_VERSION"
val releaseLabel = "DCOS_PACKAGE_RELEASE"
val repositoryLabel = "DCOS_PACKAGE_SOURCE"
val versionLabel = "DCOS_PACKAGE_VERSION"
val commandLabel = "DCOS_PACKAGE_COMMAND"
}
| movicha/cosmos | cosmos-model/src/main/scala/com/mesosphere/cosmos/model/thirdparty/marathon/MarathonApp.scala | Scala | apache-2.0 | 1,240 |
package io.fintrospect.renderers
import java.net.URL
import com.twitter.finagle.http.Method.Get
import com.twitter.finagle.http.path.Path
import com.twitter.finagle.http.{Request, Response, Status}
import io.fintrospect.formats.Xml.ResponseBuilder._
import io.fintrospect.util.ExtractionError
import io.fintrospect.{Security, ServerRoute}
class SiteMapModuleRenderer(baseUrl: URL) extends ModuleRenderer {
override def badRequest(badParameters: Seq[ExtractionError]): Response = BadRequest(badParameters.toString())
override def notFound(request: Request): Response = HttpResponse(Status.NotFound).build()
override def description(basePath: Path, security: Security, routes: Seq[ServerRoute[_, _]]): Response = {
def buildUrl(route: ServerRoute[_, _]) =
<url>
<loc>
{baseUrl + route.describeFor(basePath)}
</loc>
</url>
Ok(<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{routes.filter(_.method == Get).map(buildUrl)}
</urlset>)
}
}
| daviddenton/fintrospect | core/src/main/scala/io/fintrospect/renderers/SiteMapModuleRenderer.scala | Scala | apache-2.0 | 1,017 |
import com.amazonaws.services.{ dynamodbv2 => aws }
object ProjectionType {
val All = aws.model.ProjectionType.ALL
val Include = aws.model.ProjectionType.INCLUDE
val KeysOnly = aws.model.ProjectionType.KEYS_ONLY
}
| hirokikonishi/awscala | aws/dynamo/src/main/scala/ProjectionType.scala | Scala | apache-2.0 | 224 |
package common
import composition.TestHarness
import org.scalatest.concurrent.Eventually.PatienceConfig
import org.scalatest.selenium.WebBrowser.{click, cookie, cookies, delete, go}
import pages.BeforeYouStartPageSteps
import pages.CaptureCertificateDetailsPageSteps
import pages.ConfirmBusinessPageSteps
import pages.SetupBusinessDetailsPageSteps
import pages.VehicleLookupPageSteps
import pages.VrmLockedPageSteps
import pages.vrm_assign.ConfirmBusinessPage
import pages.vrm_assign.VehicleLookupPage
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory._
import uk.gov.dvla.vehicles.presentation.common.helpers.webbrowser.WebBrowserDriver
final class CommonStepDefs(
beforeYouStart: BeforeYouStartPageSteps,
vehicleLookup: VehicleLookupPageSteps,
vrmLocked: VrmLockedPageSteps,
captureCertificateDetails: CaptureCertificateDetailsPageSteps,
setupBusinessDetails: SetupBusinessDetailsPageSteps,
confirmBusiness: ConfirmBusinessPageSteps
)(implicit webDriver: WebBrowserDriver)
extends helpers.AcceptanceTestHelper with TestHarness {
def `start the Assign service` = {
beforeYouStart.`go to BeforeYouStart page`.
`is displayed`
delete all cookies
beforeYouStart.`click 'Start now' button`
vehicleLookup.`is displayed`
this
}
def `quit the browser` = {
webDriver.quit()
this
}
def `perform vehicle lookup (trader acting)`(replacementVRN: String,
registrationNumber: String,
docRefNumber: String,
postcode: String) = {
vehicleLookup.
enter(replacementVRN, registrationNumber, docRefNumber, postcode).
`keeper is not acting`.
`find vehicle`
this
}
def `goToVehicleLookupPage` = {
go to VehicleLookupPage
this
}
def `enterCertificateDetails` = {
captureCertificateDetails.`is displayed`
captureCertificateDetails.`enter certificate details`("1", "11111", "111111", "ABC123")
captureCertificateDetails.`submit details`
this
}
def `provide business details` = {
setupBusinessDetails.
`is displayed`.
`enter business details`
confirmBusiness.`is displayed`
click on ConfirmBusinessPage.confirm
this
}
def `check tracking cookie is fresh` = {
val c = cookie(TrackingIdCookieName)
try {
// The java method returns void or throws, so to make it testable you should wrap it in a try-catch.
c.underlying.validate()
} catch {
case e: Throwable => fail(s"Cookie should be valid and not have thrown exception: $e")
}
this
}
}
| dvla/vrm-assign-online | acceptance-tests/src/test/scala/common/CommonStepDefs.scala | Scala | mit | 2,875 |
package extensions
import play.api.mvc._
import play.api.mvc.Results._
import jp.t2v.lab.play2.auth._
import models.Role
import models.Role._
import models.Account
import scala.concurrent._
import scala.reflect._
import controllers.MovieSearchController
trait AuthConfigImpl extends AuthConfig {
/**
* A type that is used to identify a user.`String`, `Int`, `Long` and so on.
*/
type Id = Int
/**
* A type that represents a user in your application. `User`, `Account` and so on.
*/
type User = Account
/**
* A type that is defined by every action for authorization.
* This sample uses the following trait:
*
* sealed trait Role
* case object Administrator extends Role
* case object NormalUser extends Role
*/
type Authority = Role
/**
* A `ClassTag` is used to retrieve an id from the Cache API.
* Use something like this:
*/
val idTag: ClassTag[Id] = classTag[Id]
/**
* The session timeout in seconds
*/
val sessionTimeoutInSeconds: Int = 3600
/**
* A function that returns a `User` object from an `Id`.
* You can alter the procedure to suit your application.
*/
def resolveUser(id: Id)(implicit ctx: ExecutionContext): Future[Option[User]] = Future.successful(Account.findById(id))
/**
* Where to redirect the user after a successful login.
*/
def loginSucceeded(request: RequestHeader)(implicit ctx: ExecutionContext): Future[Result] =
Future.successful(Redirect(controllers.routes.ChatRoomController.listGroups))
/**
* Where to redirect the user after logging out
*/
def logoutSucceeded(request: RequestHeader)(implicit ctx: ExecutionContext): Future[Result] =
Future.successful(Redirect(controllers.routes.ApplicationController.index))
/**
* If the user is not logged in and tries to access a protected resource then redirect them as follows:
*/
def authenticationFailed(request: RequestHeader)(implicit ctx: ExecutionContext): Future[Result] =
Future.successful(Redirect(controllers.routes.ApplicationController.index))
/**
* If authorization failed (usually incorrect password) redirect the user as follows:
*/
override def authorizationFailed(request: RequestHeader, user: User, authority: Option[Authority])(implicit context: ExecutionContext): Future[Result] = {
Future.successful(Forbidden("no permission"))
}
/**
* A function that determines what `Authority` a user has.
* You should alter this procedure to suit your application.
*/
def authorize(user: User, authority: Authority)(implicit ctx: ExecutionContext): Future[Boolean] = Future.successful {
(user.role, authority) match {
case (Administrator, _) => true
case (NormalUser, NormalUser) => true
case _ => false
}
}
/**
* (Optional)
* You can custom SessionID Token handler.
* Default implementation use Cookie.
*/
override lazy val tokenAccessor = new CookieTokenAccessor(
/*
* Whether use the secure option or not use it in the cookie.
* Following code is default.
*/
cookieSecureOption = play.api.Play.isProd(play.api.Play.current),
cookieMaxAge = Some(sessionTimeoutInSeconds)
)
}
| tech4242/scala-backchannel-app | app/extensions/AuthConfigImpl.scala | Scala | bsd-3-clause | 3,232 |
@xrefCyclist class A
@xrefCyclist class B
class C {
@introspectCyclist def x = 2
} | scalamacros/paradise | tests/src/test/scala/annotations/neg/macro-annotation-cyclic-a/Test_2.scala | Scala | bsd-3-clause | 86 |
package leo.modules.external
import leo.LeoTestSuite
import leo.datastructures.Term.mkAtom
import leo.datastructures._
import leo.modules.HOLSignature._
import leo.modules.output.{SZS_CounterSatisfiable, SZS_Theorem}
/**
* Created by mwisnie on 1/17/17.
*/
class ExternalProverTest extends LeoTestSuite {
def createSmallProblem(implicit sig : Signature) : Set[AnnotatedClause] = {
val p = mkAtom(sig.addUninterpreted("p", o))
val l1 = Literal(p, true)
val l2 = Literal(p, false)
Set(
AnnotatedClause(Clause(l1), ClauseAnnotation.NoAnnotation),
AnnotatedClause(Clause(l2), ClauseAnnotation.NoAnnotation)
)
}
def createSmallCSAProblem(implicit sig : Signature) : Set[AnnotatedClause] = {
val p = mkAtom(sig.addUninterpreted("p", o))
val l = Literal(p,false)
Set(AnnotatedClause(Clause(l), ClauseAnnotation.NoAnnotation))
}
test("Prover Creation") {
try {
val p = ExternalProver.createLeo2("/home/mwisnie/prover/leo2/bin/leo")
assert(p.name == "leo2")
} catch {
case e : NoSuchMethodException => println("Check your export of leo2 : 'export leo = /...'")
case e : Exception => fail(e.getMessage)
}
}
test("Prover Creation by name") {
try {
val p = ExternalProver.createProver("leo2","/home/mwisnie/prover/leo2/bin/leo")
assert(p.name == "leo2")
} catch {
case e : NoSuchMethodException => println("Check your export of leo2 : 'export leo = /...'")
case e : Exception => fail(e.getMessage)
}
}
test("Translate problem"){
val p = ExternalProver.createLeo2("/home/mwisnie/prover/leo2/bin/leo")
implicit val s = getFreshSignature
val example = createSmallProblem
val trans = p.translateProblem(example.map(_.cl), Capabilities.THF)
println(s"Translate problem : \n${trans.mkString("\n")}")
}
test("Call prover (THM)"){
val p = ExternalProver.createLeo2("/home/mwisnie/prover/leo2/bin/leo")
implicit val s = getFreshSignature
val example = createSmallProblem
val fres = p.call(example, example.map(_.cl), s, Capabilities.THF, 1000)
// Test the non-blocking waiting for the result.
while(!fres.isCompleted) {
try{
Thread.sleep(50)
println("Look for result")
} catch{ case e : InterruptedException => ()}
}
fres.value match {
case Some(res) =>
println(s"Got result ${res.szsStatus()} with exitvalue ${res.exitValue}")
assert(res.szsStatus == SZS_Theorem, s"Expected ${SZS_Theorem.apply} but got ${res.szsStatus()}")
case None => fail("Got no result from the prover")
}
}
test("Call prover (CSA)") {
val p = ExternalProver.createLeo2("/home/mwisnie/prover/leo2/bin/leo")
implicit val s = getFreshSignature
val example = createSmallCSAProblem
val fres = p.call(example, example.map(_.cl), s, Capabilities.THF, 1000)
// Test the non-blocking waiting for the result.
while(!fres.isCompleted) {
try{
Thread.sleep(50)
println("Look for result")
} catch{ case e : InterruptedException => ()}
}
fres.value match {
case Some(res) =>
println(s"Got result ${res.szsStatus()} with exitvalue ${res.exitValue}")
assert(res.szsStatus == SZS_CounterSatisfiable, s"Expected ${SZS_CounterSatisfiable.apply} but got ${res.szsStatus()}")
case None => fail("Got no result from the prover")
}
}
}
| lex-lex/Leo-III | src/test/scala/leo/modules/external/ExternalProverTest.scala | Scala | bsd-3-clause | 3,431 |
package jigg.nlp.ccg.parser
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import jigg.nlp.ccg.lexicon.{CandAssignedSentence, PoS}
import scala.collection.mutable.ArrayBuffer
trait Context {
def sentence: CandAssignedSentence
def state: State
def word(i:Int): Int = sentence.word(i).id
def pos(i:Int): Int = sentence.pos(i).id // if you want to change the definition of pos, please override
val s0 = state.s0
val s1 = state.s1
val s2 = state.s2
val s3 = state.s3
val q0:Option[Int] = if (state.j < sentence.size) Some(state.j) else None
val q1:Option[Int] = if (state.j+1 < sentence.size) Some(state.j+1) else None
val q2:Option[Int] = if (state.j+2 < sentence.size) Some(state.j+2) else None
val q3:Option[Int] = if (state.j+3 < sentence.size) Some(state.j+2) else None
}
@SerialVersionUID(1L)
trait FeatureExtractor extends Serializable {
def addFeatures(c:Context, features:ArrayBuffer[UF]): Unit
}
@SerialVersionUID(8433695275219196660L)
class ZhangExtractor extends FeatureExtractor {
import FeatureTypes._
import FeatureTypes.{ZhangTemplate => TMP}
def addFeatures(ctx:Context, features:ArrayBuffer[UF]) = {
case class Item(w: Char, p: Short, c: Short)
@inline def w(item: Item):Char = item.w
@inline def p(item: Item):Short = item.p
@inline def c(item: Item):Short = item.c
def getItemsAt(s:WrappedCategory) = Item(ctx.word(s.head).toChar, ctx.pos(s.head).toShort, s.cat.toShort)
def wordPoSAt(i:Int) = Item(ctx.word(i).toChar, ctx.pos(i).toShort, 0.toShort)
val s0: Option[Item] = ctx.s0 map { getItemsAt(_) }
val s1 = ctx.s1 map { getItemsAt(_) }
val s2 = ctx.s2 map { getItemsAt(_) }
val s3 = ctx.s3 map { getItemsAt(_) }
val q0 = ctx.q0 map { wordPoSAt(_) }
val q1 = ctx.q1 map { wordPoSAt(_) }
val q2 = ctx.q2 map { wordPoSAt(_) }
val q3 = ctx.q3 map { wordPoSAt(_) }
val s0l = ctx.state.s0l map { getItemsAt(_) }
val s0r = ctx.state.s0r map { getItemsAt(_) }
val s0u = ctx.state.s0u map { getItemsAt(_) }
val s0h = ctx.state.s0h map { getItemsAt(_) }
val s1l = ctx.state.s1l map { getItemsAt(_) }
val s1r = ctx.state.s1r map { getItemsAt(_) }
val s1u = ctx.state.s1u map { getItemsAt(_) }
s0 foreach { S0 =>
features += WP(w(S0), p(S0), TMP.wS0_pS0)
features += C(c(S0), TMP.cS0)
features += PC(p(S0), c(S0), TMP.pS0_cS0)
features += WC(w(S0), c(S0), TMP.wS0_cS0)
}
s1 foreach { S1 =>
features += WP(w(S1), p(S1), TMP.wS1_pS1)
features += C(c(S1), TMP.cS1)
features += PC(p(S1), c(S1), TMP.pS1_cS1)
features += WC(w(S1), c(S1), TMP.wS1_cS1)
}
s2 foreach { S2 =>
features += PC(p(S2), c(S2), TMP.pS2_cS2)
features += WC(w(S2), c(S2), TMP.wS2_cS2)
}
s3 foreach { S3 =>
features += PC(p(S3), c(S3), TMP.pS3_cS3)
features += WC(w(S3), c(S3), TMP.wS3_cS3)
}
q0 foreach { Q0 => features += WP(w(Q0), p(Q0), TMP.wQ0_pQ0) }
q1 foreach { Q1 => features += WP(w(Q1), p(Q1), TMP.wQ1_pQ1) }
q2 foreach { Q2 => features += WP(w(Q2), p(Q2), TMP.wQ2_pQ2) }
q3 foreach { Q3 => features += WP(w(Q3), p(Q3), TMP.wQ3_pQ3) }
s0l foreach { S0L =>
features += PC(p(S0L), c(S0L), TMP.pS0L_cS0L)
features += WC(w(S0L), c(S0L), TMP.wS0L_cS0L)
}
s0r foreach { S0R =>
features += PC(p(S0R), c(S0R), TMP.pS0R_cS0R)
features += WC(w(S0R), c(S0R), TMP.wS0R_cS0R)
}
s0u foreach { S0U =>
features += PC(p(S0U), c(S0U), TMP.pS0U_cS0U)
features += WC(w(S0U), c(S0U), TMP.wS0U_cS0U)
}
s1l foreach { S1L =>
features += PC(p(S1L), c(S1L), TMP.pS1L_cS1L)
features += WC(w(S1L), c(S1L), TMP.wS1L_cS1L)
}
s1r foreach { S1R =>
features += PC(p(S1R), c(S1R), TMP.pS1R_cS1R)
features += WC(w(S1R), c(S1R), TMP.wS1R_cS1R)
}
s1u foreach { S1U =>
features += PC(p(S1U), c(S1U), TMP.pS1U_cS1U)
features += WC(w(S1U), c(S1U), TMP.wS1U_cS1U)
}
s1 foreach { S1 => s0 foreach { S0 =>
features += WCWC(w(S0), c(S0), w(S1), c(S1), TMP.wS0_cS0_wS1_cS1)
features += WC(w(S1), c(S0), TMP.wS1_cS0)
features += WC(w(S0), c(S1), TMP.wS0_cS1)
features += CC(c(S0), c(S1), TMP.cS0_cS1)
}}
s0 foreach { S0 => q0 foreach { Q0 =>
features += WCWP(w(S0), c(S0), w(Q0), p(Q0), TMP.wS0_cS0_wQ0_pQ0)
features += WPC(w(Q0), p(Q0), c(S0), TMP.wQ0_pQ0_cS0)
features += WPC(w(S0), p(Q0), c(S0), TMP.wS0_pQ0_cS0)
features += PC(p(Q0), c(S0), TMP.pQ0_cS0)
}}
s1 foreach { S1 => q0 foreach { Q0 =>
features += WCWP(w(S1), c(S1), w(Q0), p(Q0), TMP.wS1_cS1_wQ0_pQ0)
features += WPC(w(Q0), p(Q0), c(S1), TMP.wQ0_pQ0_cS1)
features += WPC(w(S1), p(Q0), c(S1), TMP.wS1_pQ0_cS1)
features += PC(p(Q0), c(S1), TMP.pQ0_cS1)
}}
s1 foreach { S1 => s0 foreach { S0 => q0 foreach { Q0 =>
features += WPCC(w(S0), p(Q0), c(S0), c(S1), TMP.wS0_pQ0_cS0_cS1)
features += WPCC(w(S1), p(Q0), c(S0), c(S1), TMP.wS1_pQ0_cS0_cS1)
features += WPCC(w(Q0), p(Q0), c(S0), c(S1), TMP.wQ0_pQ0_cS0_cS1)
features += PCC(p(Q0), c(S0), c(S1), TMP.pQ0_cS0_cS1)
features += PPP(p(S0), p(S1), p(Q0), TMP.pS0_pS1_pQ0)
}}}
s0 foreach { S0 => q0 foreach { Q0 => q1 foreach { Q1 =>
features += WPPC(w(S0), p(Q0), p(Q1), c(S0), TMP.wS0_pQ0_pQ1_cS0)
features += WPPC(w(Q0), p(Q0), p(Q1), c(S0), TMP.wQ0_pQ0_pQ1_cS0)
features += WPPC(w(Q1), p(S0), p(Q1), c(S0), TMP.wQ1_pQ0_pQ1_cS0)
features += PPC(p(Q0), p(Q1), c(S0), TMP.pQ0_pQ1_cS0)
features += PPP(p(S0), p(Q0), p(Q1), TMP.pS0_pQ0_pQ1)
}}}
s2 foreach { S2 => s1 foreach { S1 => s0 foreach { S0 =>
features += WCCC(w(S0), c(S0), c(S1), c(S2), TMP.wS0_cS0_cS1_cS2)
features += WCCC(w(S1), c(S0), c(S1), c(S2), TMP.wS1_cS0_cS1_cS2)
features += WCCC(w(S2), c(S0), c(S1), c(S2), TMP.wS2_cS0_cS1_cS2)
features += CCC(c(S0), c(S1), c(S2), TMP.cS0_cS1_cS2)
features += PPP(p(S0), p(S1), p(S2), TMP.pS0_pS1_pS2)
}}}
s0l foreach { S0L => s0h foreach { S0H => s0 foreach { S0 =>
features += CCC(c(S0), c(S0H), c(S0L), TMP.cS0_cS0H_cS0L)
}}}
s0r foreach { S0R => s0h foreach { S0H => s0 foreach { S0 =>
features += CCC(c(S0), c(S0H), c(S0R), TMP.cS0_cS0H_cS0R)
}}}
s0r foreach { S0R => q0 foreach { Q0 => s0 foreach { S0 =>
features += PCC(p(Q0), c(S0), c(S0R), TMP.pQ0_cS0_cS0R)
features += WCC(w(Q0), c(S0), c(S0R), TMP.wQ0_cS0_cS0R)
}}}
s0l foreach { S0L => s1 foreach { S1 => s0 foreach { S0 =>
features += CCC(c(S0), c(S0L), c(S1), TMP.cS0_cS0L_cS1)
features += WCC(w(S1), c(S0), c(S0L), TMP.wS1_cS0_cS0L)
}}}
s1r foreach { S1R => s1 foreach { S1 => s0 foreach { S0 =>
features += CCC(c(S0), c(S1), c(S1R), TMP.cS0_cS1_cS1R)
features += WCC(w(S0), c(S1), c(S1R), TMP.wS0_cS1_cS1R)
}}}
}
}
class BasicFinishedExtractor extends FeatureExtractor {
import FeatureTypes._
import FeatureTypes.{FinishTemplate => TMP}
def addFeatures(ctx:Context, features:ArrayBuffer[UF]) = {
def p(s: WrappedCategory) = ctx.pos(s.head).toShort
def c(s: WrappedCategory) = s.category.id.toShort
ctx.s0 match {
case Some(s0) =>
features += C(c(s0), TMP.cS0)
features += PC(p(s0), c(s0), TMP.pS0_cS0)
case None =>
}
ctx.s1 match {
case Some(s1) => features += C(c(s1), TMP.cS1)
case None => features += Empty(TMP.no_cS1)
}
ctx.s2 match {
case Some(s2) => features += C(c(s2), TMP.cS2)
case None => features += Empty(TMP.no_cS2)
}
}
}
trait FeatureExtractorsBase extends Serializable {
def methods: Seq[FeatureExtractor]
def pos2id: (PoS=>Int) = { pos => pos.id }
def finishMethods: Seq[FeatureExtractor] = Seq(new BasicFinishedExtractor())
class ContextWithCustomPosLevel(
override val sentence:CandAssignedSentence,
override val state:State) extends Context {
override def pos(i:Int) = pos2id(sentence.pos(i))
}
def context(sentence:CandAssignedSentence, state:State): Context =
new ContextWithCustomPosLevel(sentence, state)
//var features = new ArrayBuffer[UF]
def extractUnlabeledFeatures(sentence:CandAssignedSentence, state:State) =
extractUnlabeledHelper(sentence, state, methods)
def extractFeaturesFromFinishedTree(sentence: CandAssignedSentence, state: State) =
extractUnlabeledHelper(sentence, state, finishMethods)
def extractUnlabeledHelper(sentence:CandAssignedSentence, state:State, extractors: Seq[FeatureExtractor]): Seq[UF] = {
val features = new ArrayBuffer[UF]
features += FeatureTypes.Bias()
val ctx = context(sentence, state)
extractors.foreach { _.addFeatures(ctx, features) }
features
}
}
class FeatureExtractors(
override val methods: Seq[FeatureExtractor],
override val pos2id: (PoS=>Int)) extends FeatureExtractorsBase
| mynlp/jigg | src/main/scala/jigg/nlp/ccg/parser/ShiftReduceFeatureExtractors.scala | Scala | apache-2.0 | 9,433 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util
import scala.math.Ordering
import scala.collection.mutable
import scala.collection.JavaConverters._
import Compat.SortedSetCompat
private[util] class NavigableView[E](original: NavigableSet[E],
inner: () => mutable.SortedSet[Box[E]],
lowerBound: Option[E], lowerInclusive: Boolean,
upperBound: Option[E], upperInclusive: Boolean)
extends AbstractCollection[E] with NavigableSet[E] with SortedSet[E] {
def size(): Int =
iterator.asScala.size
override def contains(o: Any): Boolean =
inner().contains(Box(o.asInstanceOf[E]))
override def add(e: E): Boolean = {
val comp = comparator()
lowerBound.foreach { bound =>
val cmp = comp.compare(e, bound)
if (cmp < 0 || (!lowerInclusive && cmp==0))
throw new IllegalArgumentException()
}
upperBound.foreach { bound =>
val cmp = comp.compare(e, bound)
if (cmp > 0 || (!upperInclusive && cmp==0))
throw new IllegalArgumentException()
}
original.add(e)
}
override def remove(o: Any): Boolean =
original.remove(o)
private def _iterator(iter: scala.collection.Iterator[E]): Iterator[E] = {
new Iterator[E] {
private var last: Option[E] = None
def hasNext(): Boolean = iter.hasNext
def next(): E = {
last = Some(iter.next())
last.get
}
def remove(): Unit = {
if (last.isEmpty) {
throw new IllegalStateException()
} else {
last.foreach(original.remove(_))
last = None
}
}
}
}
def iterator(): Iterator[E] =
_iterator(inner().iterator.map(_.inner))
def descendingIterator(): Iterator[E] =
_iterator(iterator.asScala.toList.reverse.iterator)
override def removeAll(c: Collection[_]): Boolean = {
val iter = c.iterator()
var changed = false
while (iter.hasNext)
changed = remove(iter.next) || changed
changed
}
override def addAll(c: Collection[_ <: E]): Boolean =
original.addAll(c)
def lower(e: E): E =
headSet(e, false).asScala.lastOption.getOrElse(null.asInstanceOf[E])
def floor(e: E): E =
headSet(e, true).asScala.lastOption.getOrElse(null.asInstanceOf[E])
def ceiling(e: E): E =
tailSet(e, true).asScala.headOption.getOrElse(null.asInstanceOf[E])
def higher(e: E): E =
tailSet(e, false).asScala.headOption.getOrElse(null.asInstanceOf[E])
def pollFirst(): E = {
val polled = inner().headOption
if (polled.isDefined) {
val elem = polled.get.inner
remove(elem)
elem
} else null.asInstanceOf[E]
}
def pollLast(): E = {
val polled = inner().lastOption
if (polled.isDefined) {
val elem = polled.get.inner
remove(elem)
elem
} else null.asInstanceOf[E]
}
def comparator(): Comparator[E] = {
new Comparator[E] {
val ordering = inner().ordering
def compare(a: E, b: E): Int =
ordering.compare(Box(a), Box(b))
}
}
def first(): E = {
val iter = iterator()
if (iter.hasNext) iter.next
else null.asInstanceOf[E]
}
def last(): E = {
val iter = iterator()
var result = null.asInstanceOf[E]
while (iter.hasNext)
result = iter.next()
result
}
def subSet(fromElement: E, fromInclusive: Boolean, toElement: E,
toInclusive: Boolean): NavigableSet[E] = {
val innerNow = inner()
val boxedFrom = Box(fromElement)
val boxedTo = Box(toElement)
val subSetFun = { () =>
val toTs =
if (toInclusive) innerNow.rangeTo(boxedTo)
else innerNow.rangeUntil(boxedTo)
if (fromInclusive) toTs.rangeFrom(boxedFrom)
else toTs.rangeFrom(boxedFrom).clone() -= boxedFrom
}
new NavigableView(this, subSetFun,
Some(fromElement), fromInclusive,
Some(toElement), toInclusive)
}
def headSet(toElement: E, inclusive: Boolean): NavigableSet[E] = {
val innerNow = inner()
val boxed = Box(toElement)
val headSetFun =
if (inclusive) () => innerNow.rangeTo(boxed)
else () => innerNow.rangeUntil(boxed)
new NavigableView(this, headSetFun,
None, true,
Some(toElement), inclusive)
}
def tailSet(fromElement: E, inclusive: Boolean): NavigableSet[E] = {
val innerNow = inner()
val boxed = Box(fromElement)
val tailSetFun =
if (inclusive) () => innerNow.rangeFrom(boxed)
else () => innerNow.rangeFrom(boxed).clone() -= boxed
new NavigableView(this, tailSetFun,
Some(fromElement), inclusive,
None, true)
}
def subSet(fromElement: E, toElement: E): NavigableSet[E] =
subSet(fromElement, true, toElement, false)
def headSet(toElement: E): NavigableSet[E] =
headSet(toElement, false)
def tailSet(fromElement: E): NavigableSet[E] =
tailSet(fromElement, true)
def descendingSet(): NavigableSet[E] = {
val descSetFun = { () =>
val innerNow = inner()
val retSet = new mutable.TreeSet[Box[E]]()(innerNow.ordering.reverse)
retSet ++= innerNow
retSet
}
new NavigableView(this, descSetFun, None, true, None, true)
}
}
| nicolasstucki/scala-js | javalib/src/main/scala/java/util/NavigableView.scala | Scala | apache-2.0 | 5,390 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.stream
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.api.config.ExecutionConfigOptions.UpsertMaterialize
import org.apache.flink.table.catalog.{ObjectIdentifier, ResolvedCatalogTable}
import org.apache.flink.table.connector.sink.DynamicTableSink
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.abilities.sink.SinkAbilitySpec
import org.apache.flink.table.planner.plan.metadata.FlinkRelMetadataQuery
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSinkSpec
import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecSink
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, InputProperty}
import org.apache.flink.table.planner.plan.utils.{ChangelogPlanUtils, FlinkRelOptUtil, RelDescriptionWriterImpl}
import org.apache.flink.table.planner.utils.JavaScalaConversionUtil.toScala
import org.apache.flink.types.RowKind
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.hint.RelHint
import org.apache.calcite.util.ImmutableBitSet
import java.io.{PrintWriter, StringWriter}
import java.util
import scala.collection.JavaConversions._
/**
* Stream physical RelNode to to write data into an external sink defined by a
* [[DynamicTableSink]].
*/
class StreamPhysicalSink(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
hints: util.List[RelHint],
tableIdentifier: ObjectIdentifier,
catalogTable: ResolvedCatalogTable,
tableSink: DynamicTableSink,
abilitySpecs: Array[SinkAbilitySpec])
extends Sink(cluster, traitSet, inputRel, hints, tableIdentifier, catalogTable, tableSink)
with StreamPhysicalRel {
override def requireWatermark: Boolean = false
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new StreamPhysicalSink(
cluster,
traitSet,
inputs.get(0),
hints,
tableIdentifier,
catalogTable,
tableSink,
abilitySpecs)
}
override def translateToExecNode(): ExecNode[_] = {
val inputChangelogMode = ChangelogPlanUtils.getChangelogMode(
getInput.asInstanceOf[StreamPhysicalRel]).get
val tableSinkSpec = new DynamicTableSinkSpec(
tableIdentifier,
catalogTable,
util.Arrays.asList(abilitySpecs: _*))
tableSinkSpec.setTableSink(tableSink)
val tableConfig = FlinkRelOptUtil.getTableConfigFromContext(this)
tableSinkSpec.setReadableConfig(tableConfig.getConfiguration)
val primaryKeys = toScala(catalogTable.getResolvedSchema
.getPrimaryKey).map(_.getColumns).map(toScala[String]).getOrElse(Seq())
val upsertMaterialize = tableConfig.getConfiguration.get(
ExecutionConfigOptions.TABLE_EXEC_SINK_UPSERT_MATERIALIZE) match {
case UpsertMaterialize.FORCE => primaryKeys.nonEmpty
case UpsertMaterialize.NONE => false
case UpsertMaterialize.AUTO =>
val insertOnly = tableSink
.getChangelogMode(inputChangelogMode)
.containsOnly(RowKind.INSERT)
if (!insertOnly && primaryKeys.nonEmpty) {
val columnNames = catalogTable.getResolvedSchema.getColumnNames
val pks = ImmutableBitSet.of(primaryKeys.map(columnNames.indexOf): _*)
val fmq = FlinkRelMetadataQuery.reuseOrCreate(getCluster.getMetadataQuery)
val uniqueKeys = fmq.getUniqueKeys(getInput)
val changeLogUpsertKeys = fmq.getUpsertKeys(getInput)
if (uniqueKeys != null &&
uniqueKeys.exists(pks.contains) &&
!(changeLogUpsertKeys != null &&
changeLogUpsertKeys.exists(pks.contains))) {
true
} else {
false
}
} else {
false
}
}
new StreamExecSink(
tableSinkSpec,
inputChangelogMode,
InputProperty.DEFAULT,
FlinkTypeFactory.toLogicalRowType(getRowType),
upsertMaterialize,
getDescriptionWithUpsert(upsertMaterialize)
)
}
/**
* The inputChangelogMode can only be obtained in translateToExecNode phase.
*/
def getDescriptionWithUpsert(upsertMaterialize: Boolean): String = {
val sw = new StringWriter
val pw = new PrintWriter(sw)
val relWriter = new RelDescriptionWriterImpl(pw)
this.explainTerms(relWriter)
relWriter.itemIf("upsertMaterialize", "true", upsertMaterialize)
relWriter.done(this)
sw.toString
}
}
| tillrohrmann/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalSink.scala | Scala | apache-2.0 | 5,446 |
package definiti.core.end2end
import definiti.common.ast._
import definiti.common.program.Ok
import definiti.core.ProgramResultMatchers._
import definiti.common.utils.ASTUtils._
class AliasTypeSpec extends EndToEndSpec {
import AliasTypeSpec._
"Project.generatePublicAST" should "generate the AST with an alias containing generics" in {
val output = processFile("aliasTypes.ListAlias")
output shouldBe ok[Root]
}
it should "generate the AST with an inline verification" in {
val expected = Ok(inlineVerification)
val output = processFile("aliasTypes.inline-verification")
output should beResult[Root](expected)
}
it should "invalid the AST when the inline verification is invalid" in {
val output = processFile("aliasTypes.invalid-inline-verification")
output shouldBe ko[Root]
}
}
object AliasTypeSpec {
val inlineVerificationFile = "src/test/resources/samples/aliasTypes/inline-verification.def"
val inlineVerification = root(
AliasType(
name = "ListAlias",
fullName = "ListAlias",
genericTypes = Seq("A"),
parameters = Seq.empty,
alias = TypeDeclaration(
typeName = "List",
genericTypes = Seq(TypeDeclaration("A", Seq.empty, Seq.empty, Location(inlineVerificationFile, 1, 26, 1, 27))),
parameters = Seq.empty,
location = Location(inlineVerificationFile, 1, 21, 1, 28)
),
inherited = Seq.empty,
verifications = Seq(AtomicTypeVerification(
message = LiteralMessage("The list should not be empty", Location(inlineVerificationFile, 3, 5, 3, 35)),
function = DefinedFunction(
parameters = Seq(ParameterDefinition(
name = "list",
typeReference = TypeReference("ListAlias", Seq(TypeReference("A"))),
location = Location(inlineVerificationFile, 4, 6, 4, 10)
)),
body = MethodCall(
expression = Reference(
name = "list",
returnType = TypeReference("ListAlias", Seq(TypeReference("A"))),
location = Location(inlineVerificationFile, 5, 7, 5, 11)
),
method = "nonEmpty",
parameters = Seq.empty,
generics = Seq.empty,
returnType = TypeReference("Boolean"),
location = Location(inlineVerificationFile, 5, 7, 5, 22)
),
genericTypes = Seq.empty,
location = Location(inlineVerificationFile, 4, 5, 6, 6)
),
location = Location(inlineVerificationFile, 2, 3, 7, 4)
)),
comment = None,
location = Location(inlineVerificationFile, 1, 1, 8, 2)
)
)
} | definiti/definiti-core | src/test/scala/definiti/core/end2end/AliasTypeSpec.scala | Scala | mit | 2,643 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io._
import java.lang.ref.{ReferenceQueue => JReferenceQueue, WeakReference}
import java.nio.ByteBuffer
import java.nio.channels.Channels
import java.util.Collections
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Random
import scala.util.control.NonFatal
import com.codahale.metrics.{MetricRegistry, MetricSet}
import org.apache.commons.io.IOUtils
import org.apache.spark._
import org.apache.spark.executor.DataReadMethod
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.internal.config.Network
import org.apache.spark.memory.{MemoryManager, MemoryMode}
import org.apache.spark.metrics.source.Source
import org.apache.spark.network._
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.network.client.StreamCallbackWithID
import org.apache.spark.network.shuffle._
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo
import org.apache.spark.network.util.TransportConf
import org.apache.spark.rpc.RpcEnv
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.serializer.{SerializerInstance, SerializerManager}
import org.apache.spark.shuffle.{ShuffleManager, ShuffleWriteMetricsReporter}
import org.apache.spark.storage.memory._
import org.apache.spark.unsafe.Platform
import org.apache.spark.util._
import org.apache.spark.util.io.ChunkedByteBuffer
/* Class for returning a fetched block and associated metrics. */
private[spark] class BlockResult(
val data: Iterator[Any],
val readMethod: DataReadMethod.Value,
val bytes: Long)
/**
* Abstracts away how blocks are stored and provides different ways to read the underlying block
* data. Callers should call [[dispose()]] when they're done with the block.
*/
private[spark] trait BlockData {
def toInputStream(): InputStream
/**
* Returns a Netty-friendly wrapper for the block's data.
*
* Please see `ManagedBuffer.convertToNetty()` for more details.
*/
def toNetty(): Object
def toChunkedByteBuffer(allocator: Int => ByteBuffer): ChunkedByteBuffer
def toByteBuffer(): ByteBuffer
def size: Long
def dispose(): Unit
}
private[spark] class ByteBufferBlockData(
val buffer: ChunkedByteBuffer,
val shouldDispose: Boolean) extends BlockData {
override def toInputStream(): InputStream = buffer.toInputStream(dispose = false)
override def toNetty(): Object = buffer.toNetty
override def toChunkedByteBuffer(allocator: Int => ByteBuffer): ChunkedByteBuffer = {
buffer.copy(allocator)
}
override def toByteBuffer(): ByteBuffer = buffer.toByteBuffer
override def size: Long = buffer.size
override def dispose(): Unit = {
if (shouldDispose) {
buffer.dispose()
}
}
}
/**
* Manager running on every node (driver and executors) which provides interfaces for putting and
* retrieving blocks both locally and remotely into various stores (memory, disk, and off-heap).
*
* Note that [[initialize()]] must be called before the BlockManager is usable.
*/
private[spark] class BlockManager(
executorId: String,
rpcEnv: RpcEnv,
val master: BlockManagerMaster,
val serializerManager: SerializerManager,
val conf: SparkConf,
memoryManager: MemoryManager,
mapOutputTracker: MapOutputTracker,
shuffleManager: ShuffleManager,
val blockTransferService: BlockTransferService,
securityManager: SecurityManager,
externalShuffleClient: Option[ExternalShuffleClient])
extends BlockDataManager with BlockEvictionHandler with Logging {
// same as `conf.get(config.SHUFFLE_SERVICE_ENABLED)`
private[spark] val externalShuffleServiceEnabled: Boolean = externalShuffleClient.isDefined
private val remoteReadNioBufferConversion =
conf.get(Network.NETWORK_REMOTE_READ_NIO_BUFFER_CONVERSION)
val diskBlockManager = {
// Only perform cleanup if an external service is not serving our shuffle files.
val deleteFilesOnStop =
!externalShuffleServiceEnabled || executorId == SparkContext.DRIVER_IDENTIFIER
new DiskBlockManager(conf, deleteFilesOnStop)
}
// Visible for testing
private[storage] val blockInfoManager = new BlockInfoManager
private val futureExecutionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonCachedThreadPool("block-manager-future", 128))
// Actual storage of where blocks are kept
private[spark] val memoryStore =
new MemoryStore(conf, blockInfoManager, serializerManager, memoryManager, this)
private[spark] val diskStore = new DiskStore(conf, diskBlockManager, securityManager)
memoryManager.setMemoryStore(memoryStore)
// Note: depending on the memory manager, `maxMemory` may actually vary over time.
// However, since we use this only for reporting and logging, what we actually want here is
// the absolute maximum value that `maxMemory` can ever possibly reach. We may need
// to revisit whether reporting this value as the "max" is intuitive to the user.
private val maxOnHeapMemory = memoryManager.maxOnHeapStorageMemory
private val maxOffHeapMemory = memoryManager.maxOffHeapStorageMemory
private val externalShuffleServicePort = StorageUtils.externalShuffleServicePort(conf)
var blockManagerId: BlockManagerId = _
// Address of the server that serves this executor's shuffle files. This is either an external
// service, or just our own Executor's BlockManager.
private[spark] var shuffleServerId: BlockManagerId = _
// Client to read other executors' shuffle files. This is either an external service, or just the
// standard BlockTransferService to directly connect to other Executors.
private[spark] val shuffleClient = externalShuffleClient.getOrElse(blockTransferService)
// Max number of failures before this block manager refreshes the block locations from the driver
private val maxFailuresBeforeLocationRefresh =
conf.get(config.BLOCK_FAILURES_BEFORE_LOCATION_REFRESH)
private val slaveEndpoint = rpcEnv.setupEndpoint(
"BlockManagerEndpoint" + BlockManager.ID_GENERATOR.next,
new BlockManagerSlaveEndpoint(rpcEnv, this, mapOutputTracker))
// Pending re-registration action being executed asynchronously or null if none is pending.
// Accesses should synchronize on asyncReregisterLock.
private var asyncReregisterTask: Future[Unit] = null
private val asyncReregisterLock = new Object
// Field related to peer block managers that are necessary for block replication
@volatile private var cachedPeers: Seq[BlockManagerId] = _
private val peerFetchLock = new Object
private var lastPeerFetchTimeNs = 0L
private var blockReplicationPolicy: BlockReplicationPolicy = _
// A DownloadFileManager used to track all the files of remote blocks which are above the
// specified memory threshold. Files will be deleted automatically based on weak reference.
// Exposed for test
private[storage] val remoteBlockTempFileManager =
new BlockManager.RemoteBlockDownloadFileManager(this)
private val maxRemoteBlockToMem = conf.get(config.MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM)
/**
* Abstraction for storing blocks from bytes, whether they start in memory or on disk.
*
* @param blockSize the decrypted size of the block
*/
private abstract class BlockStoreUpdater[T](
blockSize: Long,
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
tellMaster: Boolean,
keepReadLock: Boolean) {
/**
* Reads the block content into the memory. If the update of the block store is based on a
* temporary file this could lead to loading the whole file into a ChunkedByteBuffer.
*/
protected def readToByteBuffer(): ChunkedByteBuffer
protected def blockData(): BlockData
protected def saveToDiskStore(): Unit
private def saveDeserializedValuesToMemoryStore(inputStream: InputStream): Boolean = {
try {
val values = serializerManager.dataDeserializeStream(blockId, inputStream)(classTag)
memoryStore.putIteratorAsValues(blockId, values, classTag) match {
case Right(_) => true
case Left(iter) =>
// If putting deserialized values in memory failed, we will put the bytes directly
// to disk, so we don't need this iterator and can close it to free resources
// earlier.
iter.close()
false
}
} finally {
IOUtils.closeQuietly(inputStream)
}
}
private def saveSerializedValuesToMemoryStore(bytes: ChunkedByteBuffer): Boolean = {
val memoryMode = level.memoryMode
memoryStore.putBytes(blockId, blockSize, memoryMode, () => {
if (memoryMode == MemoryMode.OFF_HEAP && bytes.chunks.exists(!_.isDirect)) {
bytes.copy(Platform.allocateDirectBuffer)
} else {
bytes
}
})
}
/**
* Put the given data according to the given level in one of the block stores, replicating
* the values if necessary.
*
* If the block already exists, this method will not overwrite it.
*
* If keepReadLock is true, this method will hold the read lock when it returns (even if the
* block already exists). If false, this method will hold no locks when it returns.
*
* @return true if the block was already present or if the put succeeded, false otherwise.
*/
def save(): Boolean = {
doPut(blockId, level, classTag, tellMaster, keepReadLock) { info =>
val startTimeNs = System.nanoTime()
// Since we're storing bytes, initiate the replication before storing them locally.
// This is faster as data is already serialized and ready to send.
val replicationFuture = if (level.replication > 1) {
Future {
// This is a blocking action and should run in futureExecutionContext which is a cached
// thread pool.
replicate(blockId, blockData(), level, classTag)
}(futureExecutionContext)
} else {
null
}
if (level.useMemory) {
// Put it in memory first, even if it also has useDisk set to true;
// We will drop it to disk later if the memory store can't hold it.
val putSucceeded = if (level.deserialized) {
saveDeserializedValuesToMemoryStore(blockData().toInputStream())
} else {
saveSerializedValuesToMemoryStore(readToByteBuffer())
}
if (!putSucceeded && level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
saveToDiskStore()
}
} else if (level.useDisk) {
saveToDiskStore()
}
val putBlockStatus = getCurrentBlockStatus(blockId, info)
val blockWasSuccessfullyStored = putBlockStatus.storageLevel.isValid
if (blockWasSuccessfullyStored) {
// Now that the block is in either the memory or disk store,
// tell the master about it.
info.size = blockSize
if (tellMaster && info.tellMaster) {
reportBlockStatus(blockId, putBlockStatus)
}
addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus)
}
logDebug(s"Put block ${blockId} locally took ${Utils.getUsedTimeNs(startTimeNs)}")
if (level.replication > 1) {
// Wait for asynchronous replication to finish
try {
ThreadUtils.awaitReady(replicationFuture, Duration.Inf)
} catch {
case NonFatal(t) =>
throw new Exception("Error occurred while waiting for replication to finish", t)
}
}
if (blockWasSuccessfullyStored) {
None
} else {
Some(blockSize)
}
}.isEmpty
}
}
/**
* Helper for storing a block from bytes already in memory.
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*/
private case class ByteBufferBlockStoreUpdater[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
bytes: ChunkedByteBuffer,
tellMaster: Boolean = true,
keepReadLock: Boolean = false)
extends BlockStoreUpdater[T](bytes.size, blockId, level, classTag, tellMaster, keepReadLock) {
override def readToByteBuffer(): ChunkedByteBuffer = bytes
/**
* The ByteBufferBlockData wrapper is not disposed of to avoid releasing buffers that are
* owned by the caller.
*/
override def blockData(): BlockData = new ByteBufferBlockData(bytes, false)
override def saveToDiskStore(): Unit = diskStore.putBytes(blockId, bytes)
}
/**
* Helper for storing a block based from bytes already in a local temp file.
*/
private case class TempFileBasedBlockStoreUpdater[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
tmpFile: File,
blockSize: Long,
tellMaster: Boolean = true,
keepReadLock: Boolean = false)
extends BlockStoreUpdater[T](blockSize, blockId, level, classTag, tellMaster, keepReadLock) {
override def readToByteBuffer(): ChunkedByteBuffer = {
val allocator = level.memoryMode match {
case MemoryMode.ON_HEAP => ByteBuffer.allocate _
case MemoryMode.OFF_HEAP => Platform.allocateDirectBuffer _
}
blockData().toChunkedByteBuffer(allocator)
}
override def blockData(): BlockData = diskStore.getBytes(tmpFile, blockSize)
override def saveToDiskStore(): Unit = diskStore.moveFileToBlock(tmpFile, blockSize, blockId)
override def save(): Boolean = {
val res = super.save()
tmpFile.delete()
res
}
}
/**
* Initializes the BlockManager with the given appId. This is not performed in the constructor as
* the appId may not be known at BlockManager instantiation time (in particular for the driver,
* where it is only learned after registration with the TaskScheduler).
*
* This method initializes the BlockTransferService and ShuffleClient, registers with the
* BlockManagerMaster, starts the BlockManagerWorker endpoint, and registers with a local shuffle
* service if configured.
*/
def initialize(appId: String): Unit = {
blockTransferService.init(this)
externalShuffleClient.foreach { shuffleClient =>
shuffleClient.init(appId)
}
blockReplicationPolicy = {
val priorityClass = conf.get(config.STORAGE_REPLICATION_POLICY)
val clazz = Utils.classForName(priorityClass)
val ret = clazz.getConstructor().newInstance().asInstanceOf[BlockReplicationPolicy]
logInfo(s"Using $priorityClass for block replication policy")
ret
}
val id =
BlockManagerId(executorId, blockTransferService.hostName, blockTransferService.port, None)
val idFromMaster = master.registerBlockManager(
id,
maxOnHeapMemory,
maxOffHeapMemory,
slaveEndpoint)
blockManagerId = if (idFromMaster != null) idFromMaster else id
shuffleServerId = if (externalShuffleServiceEnabled) {
logInfo(s"external shuffle service port = $externalShuffleServicePort")
BlockManagerId(executorId, blockTransferService.hostName, externalShuffleServicePort)
} else {
blockManagerId
}
// Register Executors' configuration with the local shuffle service, if one should exist.
if (externalShuffleServiceEnabled && !blockManagerId.isDriver) {
registerWithExternalShuffleServer()
}
logInfo(s"Initialized BlockManager: $blockManagerId")
}
def shuffleMetricsSource: Source = {
import BlockManager._
if (externalShuffleServiceEnabled) {
new ShuffleMetricsSource("ExternalShuffle", shuffleClient.shuffleMetrics())
} else {
new ShuffleMetricsSource("NettyBlockTransfer", shuffleClient.shuffleMetrics())
}
}
private def registerWithExternalShuffleServer() {
logInfo("Registering executor with local external shuffle service.")
val shuffleConfig = new ExecutorShuffleInfo(
diskBlockManager.localDirs.map(_.toString),
diskBlockManager.subDirsPerLocalDir,
shuffleManager.getClass.getName)
val MAX_ATTEMPTS = conf.get(config.SHUFFLE_REGISTRATION_MAX_ATTEMPTS)
val SLEEP_TIME_SECS = 5
for (i <- 1 to MAX_ATTEMPTS) {
try {
// Synchronous and will throw an exception if we cannot connect.
shuffleClient.asInstanceOf[ExternalShuffleClient].registerWithShuffleServer(
shuffleServerId.host, shuffleServerId.port, shuffleServerId.executorId, shuffleConfig)
return
} catch {
case e: Exception if i < MAX_ATTEMPTS =>
logError(s"Failed to connect to external shuffle server, will retry ${MAX_ATTEMPTS - i}"
+ s" more times after waiting $SLEEP_TIME_SECS seconds...", e)
Thread.sleep(SLEEP_TIME_SECS * 1000L)
case NonFatal(e) =>
throw new SparkException("Unable to register with external shuffle server due to : " +
e.getMessage, e)
}
}
}
/**
* Report all blocks to the BlockManager again. This may be necessary if we are dropped
* by the BlockManager and come back or if we become capable of recovering blocks on disk after
* an executor crash.
*
* This function deliberately fails silently if the master returns false (indicating that
* the slave needs to re-register). The error condition will be detected again by the next
* heart beat attempt or new block registration and another try to re-register all blocks
* will be made then.
*/
private def reportAllBlocks(): Unit = {
logInfo(s"Reporting ${blockInfoManager.size} blocks to the master.")
for ((blockId, info) <- blockInfoManager.entries) {
val status = getCurrentBlockStatus(blockId, info)
if (info.tellMaster && !tryToReportBlockStatus(blockId, status)) {
logError(s"Failed to report $blockId to master; giving up.")
return
}
}
}
/**
* Re-register with the master and report all blocks to it. This will be called by the heart beat
* thread if our heartbeat to the block manager indicates that we were not registered.
*
* Note that this method must be called without any BlockInfo locks held.
*/
def reregister(): Unit = {
// TODO: We might need to rate limit re-registering.
logInfo(s"BlockManager $blockManagerId re-registering with master")
master.registerBlockManager(blockManagerId, maxOnHeapMemory, maxOffHeapMemory, slaveEndpoint)
reportAllBlocks()
}
/**
* Re-register with the master sometime soon.
*/
private def asyncReregister(): Unit = {
asyncReregisterLock.synchronized {
if (asyncReregisterTask == null) {
asyncReregisterTask = Future[Unit] {
// This is a blocking action and should run in futureExecutionContext which is a cached
// thread pool
reregister()
asyncReregisterLock.synchronized {
asyncReregisterTask = null
}
}(futureExecutionContext)
}
}
}
/**
* For testing. Wait for any pending asynchronous re-registration; otherwise, do nothing.
*/
def waitForAsyncReregister(): Unit = {
val task = asyncReregisterTask
if (task != null) {
try {
ThreadUtils.awaitReady(task, Duration.Inf)
} catch {
case NonFatal(t) =>
throw new Exception("Error occurred while waiting for async. reregistration", t)
}
}
}
/**
* Interface to get local block data. Throws an exception if the block cannot be found or
* cannot be read successfully.
*/
override def getBlockData(blockId: BlockId): ManagedBuffer = {
if (blockId.isShuffle) {
shuffleManager.shuffleBlockResolver.getBlockData(blockId.asInstanceOf[ShuffleBlockId])
} else {
getLocalBytes(blockId) match {
case Some(blockData) =>
new BlockManagerManagedBuffer(blockInfoManager, blockId, blockData, true)
case None =>
// If this block manager receives a request for a block that it doesn't have then it's
// likely that the master has outdated block statuses for this block. Therefore, we send
// an RPC so that this block is marked as being unavailable from this block manager.
reportBlockStatus(blockId, BlockStatus.empty)
throw new BlockNotFoundException(blockId.toString)
}
}
}
/**
* Put the block locally, using the given storage level.
*
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*/
override def putBlockData(
blockId: BlockId,
data: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Boolean = {
putBytes(blockId, new ChunkedByteBuffer(data.nioByteBuffer()), level)(classTag)
}
override def putBlockDataAsStream(
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[_]): StreamCallbackWithID = {
val (_, tmpFile) = diskBlockManager.createTempLocalBlock()
val channel = new CountingWritableChannel(
Channels.newChannel(serializerManager.wrapForEncryption(new FileOutputStream(tmpFile))))
logTrace(s"Streaming block $blockId to tmp file $tmpFile")
new StreamCallbackWithID {
override def getID: String = blockId.name
override def onData(streamId: String, buf: ByteBuffer): Unit = {
while (buf.hasRemaining) {
channel.write(buf)
}
}
override def onComplete(streamId: String): Unit = {
logTrace(s"Done receiving block $blockId, now putting into local blockManager")
// Note this is all happening inside the netty thread as soon as it reads the end of the
// stream.
channel.close()
val blockSize = channel.getCount
TempFileBasedBlockStoreUpdater(blockId, level, classTag, tmpFile, blockSize).save()
}
override def onFailure(streamId: String, cause: Throwable): Unit = {
// the framework handles the connection itself, we just need to do local cleanup
channel.close()
tmpFile.delete()
}
}
}
/**
* Get the BlockStatus for the block identified by the given ID, if it exists.
* NOTE: This is mainly for testing.
*/
def getStatus(blockId: BlockId): Option[BlockStatus] = {
blockInfoManager.get(blockId).map { info =>
val memSize = if (memoryStore.contains(blockId)) memoryStore.getSize(blockId) else 0L
val diskSize = if (diskStore.contains(blockId)) diskStore.getSize(blockId) else 0L
BlockStatus(info.level, memSize = memSize, diskSize = diskSize)
}
}
/**
* Get the ids of existing blocks that match the given filter. Note that this will
* query the blocks stored in the disk block manager (that the block manager
* may not know of).
*/
def getMatchingBlockIds(filter: BlockId => Boolean): Seq[BlockId] = {
// The `toArray` is necessary here in order to force the list to be materialized so that we
// don't try to serialize a lazy iterator when responding to client requests.
(blockInfoManager.entries.map(_._1) ++ diskBlockManager.getAllBlocks())
.filter(filter)
.toArray
.toSeq
}
/**
* Tell the master about the current storage status of a block. This will send a block update
* message reflecting the current status, *not* the desired storage level in its block info.
* For example, a block with MEMORY_AND_DISK set might have fallen out to be only on disk.
*
* droppedMemorySize exists to account for when the block is dropped from memory to disk (so
* it is still valid). This ensures that update in master will compensate for the increase in
* memory on slave.
*/
private def reportBlockStatus(
blockId: BlockId,
status: BlockStatus,
droppedMemorySize: Long = 0L): Unit = {
val needReregister = !tryToReportBlockStatus(blockId, status, droppedMemorySize)
if (needReregister) {
logInfo(s"Got told to re-register updating block $blockId")
// Re-registering will report our new block for free.
asyncReregister()
}
logDebug(s"Told master about block $blockId")
}
/**
* Actually send a UpdateBlockInfo message. Returns the master's response,
* which will be true if the block was successfully recorded and false if
* the slave needs to re-register.
*/
private def tryToReportBlockStatus(
blockId: BlockId,
status: BlockStatus,
droppedMemorySize: Long = 0L): Boolean = {
val storageLevel = status.storageLevel
val inMemSize = Math.max(status.memSize, droppedMemorySize)
val onDiskSize = status.diskSize
master.updateBlockInfo(blockManagerId, blockId, storageLevel, inMemSize, onDiskSize)
}
/**
* Return the updated storage status of the block with the given ID. More specifically, if
* the block is dropped from memory and possibly added to disk, return the new storage level
* and the updated in-memory and on-disk sizes.
*/
private def getCurrentBlockStatus(blockId: BlockId, info: BlockInfo): BlockStatus = {
info.synchronized {
info.level match {
case null =>
BlockStatus.empty
case level =>
val inMem = level.useMemory && memoryStore.contains(blockId)
val onDisk = level.useDisk && diskStore.contains(blockId)
val deserialized = if (inMem) level.deserialized else false
val replication = if (inMem || onDisk) level.replication else 1
val storageLevel = StorageLevel(
useDisk = onDisk,
useMemory = inMem,
useOffHeap = level.useOffHeap,
deserialized = deserialized,
replication = replication)
val memSize = if (inMem) memoryStore.getSize(blockId) else 0L
val diskSize = if (onDisk) diskStore.getSize(blockId) else 0L
BlockStatus(storageLevel, memSize, diskSize)
}
}
}
/**
* Get locations of an array of blocks.
*/
private def getLocationBlockIds(blockIds: Array[BlockId]): Array[Seq[BlockManagerId]] = {
val startTimeNs = System.nanoTime()
val locations = master.getLocations(blockIds).toArray
logDebug(s"Got multiple block location in ${Utils.getUsedTimeNs(startTimeNs)}")
locations
}
/**
* Cleanup code run in response to a failed local read.
* Must be called while holding a read lock on the block.
*/
private def handleLocalReadFailure(blockId: BlockId): Nothing = {
releaseLock(blockId)
// Remove the missing block so that its unavailability is reported to the driver
removeBlock(blockId)
throw new SparkException(s"Block $blockId was not found even though it's read-locked")
}
/**
* Get block from local block manager as an iterator of Java objects.
*/
def getLocalValues(blockId: BlockId): Option[BlockResult] = {
logDebug(s"Getting local block $blockId")
blockInfoManager.lockForReading(blockId) match {
case None =>
logDebug(s"Block $blockId was not found")
None
case Some(info) =>
val level = info.level
logDebug(s"Level for block $blockId is $level")
val taskContext = Option(TaskContext.get())
if (level.useMemory && memoryStore.contains(blockId)) {
val iter: Iterator[Any] = if (level.deserialized) {
memoryStore.getValues(blockId).get
} else {
serializerManager.dataDeserializeStream(
blockId, memoryStore.getBytes(blockId).get.toInputStream())(info.classTag)
}
// We need to capture the current taskId in case the iterator completion is triggered
// from a different thread which does not have TaskContext set; see SPARK-18406 for
// discussion.
val ci = CompletionIterator[Any, Iterator[Any]](iter, {
releaseLock(blockId, taskContext)
})
Some(new BlockResult(ci, DataReadMethod.Memory, info.size))
} else if (level.useDisk && diskStore.contains(blockId)) {
val diskData = diskStore.getBytes(blockId)
val iterToReturn: Iterator[Any] = {
if (level.deserialized) {
val diskValues = serializerManager.dataDeserializeStream(
blockId,
diskData.toInputStream())(info.classTag)
maybeCacheDiskValuesInMemory(info, blockId, level, diskValues)
} else {
val stream = maybeCacheDiskBytesInMemory(info, blockId, level, diskData)
.map { _.toInputStream(dispose = false) }
.getOrElse { diskData.toInputStream() }
serializerManager.dataDeserializeStream(blockId, stream)(info.classTag)
}
}
val ci = CompletionIterator[Any, Iterator[Any]](iterToReturn, {
releaseLockAndDispose(blockId, diskData, taskContext)
})
Some(new BlockResult(ci, DataReadMethod.Disk, info.size))
} else {
handleLocalReadFailure(blockId)
}
}
}
/**
* Get block from the local block manager as serialized bytes.
*/
def getLocalBytes(blockId: BlockId): Option[BlockData] = {
logDebug(s"Getting local block $blockId as bytes")
assert(!blockId.isShuffle, s"Unexpected ShuffleBlockId $blockId")
blockInfoManager.lockForReading(blockId).map { info => doGetLocalBytes(blockId, info) }
}
/**
* Get block from the local block manager as serialized bytes.
*
* Must be called while holding a read lock on the block.
* Releases the read lock upon exception; keeps the read lock upon successful return.
*/
private def doGetLocalBytes(blockId: BlockId, info: BlockInfo): BlockData = {
val level = info.level
logDebug(s"Level for block $blockId is $level")
// In order, try to read the serialized bytes from memory, then from disk, then fall back to
// serializing in-memory objects, and, finally, throw an exception if the block does not exist.
if (level.deserialized) {
// Try to avoid expensive serialization by reading a pre-serialized copy from disk:
if (level.useDisk && diskStore.contains(blockId)) {
// Note: we purposely do not try to put the block back into memory here. Since this branch
// handles deserialized blocks, this block may only be cached in memory as objects, not
// serialized bytes. Because the caller only requested bytes, it doesn't make sense to
// cache the block's deserialized objects since that caching may not have a payoff.
diskStore.getBytes(blockId)
} else if (level.useMemory && memoryStore.contains(blockId)) {
// The block was not found on disk, so serialize an in-memory copy:
new ByteBufferBlockData(serializerManager.dataSerializeWithExplicitClassTag(
blockId, memoryStore.getValues(blockId).get, info.classTag), true)
} else {
handleLocalReadFailure(blockId)
}
} else { // storage level is serialized
if (level.useMemory && memoryStore.contains(blockId)) {
new ByteBufferBlockData(memoryStore.getBytes(blockId).get, false)
} else if (level.useDisk && diskStore.contains(blockId)) {
val diskData = diskStore.getBytes(blockId)
maybeCacheDiskBytesInMemory(info, blockId, level, diskData)
.map(new ByteBufferBlockData(_, false))
.getOrElse(diskData)
} else {
handleLocalReadFailure(blockId)
}
}
}
/**
* Get block from remote block managers.
*
* This does not acquire a lock on this block in this JVM.
*/
private[spark] def getRemoteValues[T: ClassTag](blockId: BlockId): Option[BlockResult] = {
val ct = implicitly[ClassTag[T]]
getRemoteManagedBuffer(blockId).map { data =>
val values =
serializerManager.dataDeserializeStream(blockId, data.createInputStream())(ct)
new BlockResult(values, DataReadMethod.Network, data.size)
}
}
private def preferExecutors(locations: Seq[BlockManagerId]): Seq[BlockManagerId] = {
val (executors, shuffleServers) = locations.partition(_.port != externalShuffleServicePort)
executors ++ shuffleServers
}
/**
* Return a list of locations for the given block, prioritizing the local machine since
* multiple block managers can share the same host, followed by hosts on the same rack.
*
* Within each of the above listed groups (same host, same rack and others) executors are
* preferred over the external shuffle service.
*/
private[spark] def sortLocations(locations: Seq[BlockManagerId]): Seq[BlockManagerId] = {
val locs = Random.shuffle(locations)
val (preferredLocs, otherLocs) = locs.partition(_.host == blockManagerId.host)
val orderedParts = blockManagerId.topologyInfo match {
case None => Seq(preferredLocs, otherLocs)
case Some(_) =>
val (sameRackLocs, differentRackLocs) = otherLocs.partition {
loc => blockManagerId.topologyInfo == loc.topologyInfo
}
Seq(preferredLocs, sameRackLocs, differentRackLocs)
}
orderedParts.map(preferExecutors).reduce(_ ++ _)
}
/**
* Get block from remote block managers as a ManagedBuffer.
*/
private def getRemoteManagedBuffer(blockId: BlockId): Option[ManagedBuffer] = {
logDebug(s"Getting remote block $blockId")
require(blockId != null, "BlockId is null")
var runningFailureCount = 0
var totalFailureCount = 0
// Because all the remote blocks are registered in driver, it is not necessary to ask
// all the slave executors to get block status.
val locationsAndStatus = master.getLocationsAndStatus(blockId)
val blockSize = locationsAndStatus.map { b =>
b.status.diskSize.max(b.status.memSize)
}.getOrElse(0L)
val blockLocations = locationsAndStatus.map(_.locations).getOrElse(Seq.empty)
// If the block size is above the threshold, we should pass our FileManger to
// BlockTransferService, which will leverage it to spill the block; if not, then passed-in
// null value means the block will be persisted in memory.
val tempFileManager = if (blockSize > maxRemoteBlockToMem) {
remoteBlockTempFileManager
} else {
null
}
val locations = sortLocations(blockLocations)
val maxFetchFailures = locations.size
var locationIterator = locations.iterator
while (locationIterator.hasNext) {
val loc = locationIterator.next()
logDebug(s"Getting remote block $blockId from $loc")
val data = try {
val buf = blockTransferService.fetchBlockSync(loc.host, loc.port, loc.executorId,
blockId.toString, tempFileManager)
if (blockSize > 0 && buf.size() == 0) {
throw new IllegalStateException("Empty buffer received for non empty block")
}
buf
} catch {
case NonFatal(e) =>
runningFailureCount += 1
totalFailureCount += 1
if (totalFailureCount >= maxFetchFailures) {
// Give up trying anymore locations. Either we've tried all of the original locations,
// or we've refreshed the list of locations from the master, and have still
// hit failures after trying locations from the refreshed list.
logWarning(s"Failed to fetch block after $totalFailureCount fetch failures. " +
s"Most recent failure cause:", e)
return None
}
logWarning(s"Failed to fetch remote block $blockId " +
s"from $loc (failed attempt $runningFailureCount)", e)
// If there is a large number of executors then locations list can contain a
// large number of stale entries causing a large number of retries that may
// take a significant amount of time. To get rid of these stale entries
// we refresh the block locations after a certain number of fetch failures
if (runningFailureCount >= maxFailuresBeforeLocationRefresh) {
locationIterator = sortLocations(master.getLocations(blockId)).iterator
logDebug(s"Refreshed locations from the driver " +
s"after ${runningFailureCount} fetch failures.")
runningFailureCount = 0
}
// This location failed, so we retry fetch from a different one by returning null here
null
}
if (data != null) {
// If the ManagedBuffer is a BlockManagerManagedBuffer, the disposal of the
// byte buffers backing it may need to be handled after reading the bytes.
// In this case, since we just fetched the bytes remotely, we do not have
// a BlockManagerManagedBuffer. The assert here is to ensure that this holds
// true (or the disposal is handled).
assert(!data.isInstanceOf[BlockManagerManagedBuffer])
return Some(data)
}
logDebug(s"The value of block $blockId is null")
}
logDebug(s"Block $blockId not found")
None
}
/**
* Get block from remote block managers as serialized bytes.
*/
def getRemoteBytes(blockId: BlockId): Option[ChunkedByteBuffer] = {
getRemoteManagedBuffer(blockId).map { data =>
// SPARK-24307 undocumented "escape-hatch" in case there are any issues in converting to
// ChunkedByteBuffer, to go back to old code-path. Can be removed post Spark 2.4 if
// new path is stable.
if (remoteReadNioBufferConversion) {
new ChunkedByteBuffer(data.nioByteBuffer())
} else {
ChunkedByteBuffer.fromManagedBuffer(data)
}
}
}
/**
* Get a block from the block manager (either local or remote).
*
* This acquires a read lock on the block if the block was stored locally and does not acquire
* any locks if the block was fetched from a remote block manager. The read lock will
* automatically be freed once the result's `data` iterator is fully consumed.
*/
def get[T: ClassTag](blockId: BlockId): Option[BlockResult] = {
val local = getLocalValues(blockId)
if (local.isDefined) {
logInfo(s"Found block $blockId locally")
return local
}
val remote = getRemoteValues[T](blockId)
if (remote.isDefined) {
logInfo(s"Found block $blockId remotely")
return remote
}
None
}
/**
* Downgrades an exclusive write lock to a shared read lock.
*/
def downgradeLock(blockId: BlockId): Unit = {
blockInfoManager.downgradeLock(blockId)
}
/**
* Release a lock on the given block with explicit TaskContext.
* The param `taskContext` should be passed in case we can't get the correct TaskContext,
* for example, the input iterator of a cached RDD iterates to the end in a child
* thread.
*/
def releaseLock(blockId: BlockId, taskContext: Option[TaskContext] = None): Unit = {
val taskAttemptId = taskContext.map(_.taskAttemptId())
// SPARK-27666. When a task completes, Spark automatically releases all the blocks locked
// by this task. We should not release any locks for a task that is already completed.
if (taskContext.isDefined && taskContext.get.isCompleted) {
logWarning(s"Task ${taskAttemptId.get} already completed, not releasing lock for $blockId")
} else {
blockInfoManager.unlock(blockId, taskAttemptId)
}
}
/**
* Registers a task with the BlockManager in order to initialize per-task bookkeeping structures.
*/
def registerTask(taskAttemptId: Long): Unit = {
blockInfoManager.registerTask(taskAttemptId)
}
/**
* Release all locks for the given task.
*
* @return the blocks whose locks were released.
*/
def releaseAllLocksForTask(taskAttemptId: Long): Seq[BlockId] = {
blockInfoManager.releaseAllLocksForTask(taskAttemptId)
}
/**
* Retrieve the given block if it exists, otherwise call the provided `makeIterator` method
* to compute the block, persist it, and return its values.
*
* @return either a BlockResult if the block was successfully cached, or an iterator if the block
* could not be cached.
*/
def getOrElseUpdate[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = {
// Attempt to read the block from local or remote storage. If it's present, then we don't need
// to go through the local-get-or-put path.
get[T](blockId)(classTag) match {
case Some(block) =>
return Left(block)
case _ =>
// Need to compute the block.
}
// Initially we hold no locks on this block.
doPutIterator(blockId, makeIterator, level, classTag, keepReadLock = true) match {
case None =>
// doPut() didn't hand work back to us, so the block already existed or was successfully
// stored. Therefore, we now hold a read lock on the block.
val blockResult = getLocalValues(blockId).getOrElse {
// Since we held a read lock between the doPut() and get() calls, the block should not
// have been evicted, so get() not returning the block indicates some internal error.
releaseLock(blockId)
throw new SparkException(s"get() failed for block $blockId even though we held a lock")
}
// We already hold a read lock on the block from the doPut() call and getLocalValues()
// acquires the lock again, so we need to call releaseLock() here so that the net number
// of lock acquisitions is 1 (since the caller will only call release() once).
releaseLock(blockId)
Left(blockResult)
case Some(iter) =>
// The put failed, likely because the data was too large to fit in memory and could not be
// dropped to disk. Therefore, we need to pass the input iterator back to the caller so
// that they can decide what to do with the values (e.g. process them without caching).
Right(iter)
}
}
/**
* @return true if the block was stored or false if an error occurred.
*/
def putIterator[T: ClassTag](
blockId: BlockId,
values: Iterator[T],
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
require(values != null, "Values is null")
doPutIterator(blockId, () => values, level, implicitly[ClassTag[T]], tellMaster) match {
case None =>
true
case Some(iter) =>
// Caller doesn't care about the iterator values, so we can close the iterator here
// to free resources earlier
iter.close()
false
}
}
/**
* A short circuited method to get a block writer that can write data directly to disk.
* The Block will be appended to the File specified by filename. Callers should handle error
* cases.
*/
def getDiskWriter(
blockId: BlockId,
file: File,
serializerInstance: SerializerInstance,
bufferSize: Int,
writeMetrics: ShuffleWriteMetricsReporter): DiskBlockObjectWriter = {
val syncWrites = conf.get(config.SHUFFLE_SYNC)
new DiskBlockObjectWriter(file, serializerManager, serializerInstance, bufferSize,
syncWrites, writeMetrics, blockId)
}
/**
* Put a new block of serialized bytes to the block manager.
*
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*
* @return true if the block was stored or false if an error occurred.
*/
def putBytes[T: ClassTag](
blockId: BlockId,
bytes: ChunkedByteBuffer,
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
require(bytes != null, "Bytes is null")
val blockStoreUpdater =
ByteBufferBlockStoreUpdater(blockId, level, implicitly[ClassTag[T]], bytes, tellMaster)
blockStoreUpdater.save()
}
/**
* Helper method used to abstract common code from [[BlockStoreUpdater.save()]]
* and [[doPutIterator()]].
*
* @param putBody a function which attempts the actual put() and returns None on success
* or Some on failure.
*/
private def doPut[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[_],
tellMaster: Boolean,
keepReadLock: Boolean)(putBody: BlockInfo => Option[T]): Option[T] = {
require(blockId != null, "BlockId is null")
require(level != null && level.isValid, "StorageLevel is null or invalid")
val putBlockInfo = {
val newInfo = new BlockInfo(level, classTag, tellMaster)
if (blockInfoManager.lockNewBlockForWriting(blockId, newInfo)) {
newInfo
} else {
logWarning(s"Block $blockId already exists on this machine; not re-adding it")
if (!keepReadLock) {
// lockNewBlockForWriting returned a read lock on the existing block, so we must free it:
releaseLock(blockId)
}
return None
}
}
val startTimeNs = System.nanoTime()
var exceptionWasThrown: Boolean = true
val result: Option[T] = try {
val res = putBody(putBlockInfo)
exceptionWasThrown = false
if (res.isEmpty) {
// the block was successfully stored
if (keepReadLock) {
blockInfoManager.downgradeLock(blockId)
} else {
blockInfoManager.unlock(blockId)
}
} else {
removeBlockInternal(blockId, tellMaster = false)
logWarning(s"Putting block $blockId failed")
}
res
} catch {
// Since removeBlockInternal may throw exception,
// we should print exception first to show root cause.
case NonFatal(e) =>
logWarning(s"Putting block $blockId failed due to exception $e.")
throw e
} finally {
// This cleanup is performed in a finally block rather than a `catch` to avoid having to
// catch and properly re-throw InterruptedException.
if (exceptionWasThrown) {
// If an exception was thrown then it's possible that the code in `putBody` has already
// notified the master about the availability of this block, so we need to send an update
// to remove this block location.
removeBlockInternal(blockId, tellMaster = tellMaster)
// The `putBody` code may have also added a new block status to TaskMetrics, so we need
// to cancel that out by overwriting it with an empty block status. We only do this if
// the finally block was entered via an exception because doing this unconditionally would
// cause us to send empty block statuses for every block that failed to be cached due to
// a memory shortage (which is an expected failure, unlike an uncaught exception).
addUpdatedBlockStatusToTaskMetrics(blockId, BlockStatus.empty)
}
}
val usedTimeMs = Utils.getUsedTimeNs(startTimeNs)
if (level.replication > 1) {
logDebug(s"Putting block ${blockId} with replication took $usedTimeMs")
} else {
logDebug(s"Putting block ${blockId} without replication took ${usedTimeMs}")
}
result
}
/**
* Put the given block according to the given level in one of the block stores, replicating
* the values if necessary.
*
* If the block already exists, this method will not overwrite it.
*
* @param keepReadLock if true, this method will hold the read lock when it returns (even if the
* block already exists). If false, this method will hold no locks when it
* returns.
* @return None if the block was already present or if the put succeeded, or Some(iterator)
* if the put failed.
*/
private def doPutIterator[T](
blockId: BlockId,
iterator: () => Iterator[T],
level: StorageLevel,
classTag: ClassTag[T],
tellMaster: Boolean = true,
keepReadLock: Boolean = false): Option[PartiallyUnrolledIterator[T]] = {
doPut(blockId, level, classTag, tellMaster = tellMaster, keepReadLock = keepReadLock) { info =>
val startTimeNs = System.nanoTime()
var iteratorFromFailedMemoryStorePut: Option[PartiallyUnrolledIterator[T]] = None
// Size of the block in bytes
var size = 0L
if (level.useMemory) {
// Put it in memory first, even if it also has useDisk set to true;
// We will drop it to disk later if the memory store can't hold it.
if (level.deserialized) {
memoryStore.putIteratorAsValues(blockId, iterator(), classTag) match {
case Right(s) =>
size = s
case Left(iter) =>
// Not enough space to unroll this block; drop to disk if applicable
if (level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(blockId, out, iter)(classTag)
}
size = diskStore.getSize(blockId)
} else {
iteratorFromFailedMemoryStorePut = Some(iter)
}
}
} else { // !level.deserialized
memoryStore.putIteratorAsBytes(blockId, iterator(), classTag, level.memoryMode) match {
case Right(s) =>
size = s
case Left(partiallySerializedValues) =>
// Not enough space to unroll this block; drop to disk if applicable
if (level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
partiallySerializedValues.finishWritingToStream(out)
}
size = diskStore.getSize(blockId)
} else {
iteratorFromFailedMemoryStorePut = Some(partiallySerializedValues.valuesIterator)
}
}
}
} else if (level.useDisk) {
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(blockId, out, iterator())(classTag)
}
size = diskStore.getSize(blockId)
}
val putBlockStatus = getCurrentBlockStatus(blockId, info)
val blockWasSuccessfullyStored = putBlockStatus.storageLevel.isValid
if (blockWasSuccessfullyStored) {
// Now that the block is in either the memory or disk store, tell the master about it.
info.size = size
if (tellMaster && info.tellMaster) {
reportBlockStatus(blockId, putBlockStatus)
}
addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus)
logDebug(s"Put block $blockId locally took ${Utils.getUsedTimeNs(startTimeNs)}")
if (level.replication > 1) {
val remoteStartTimeNs = System.nanoTime()
val bytesToReplicate = doGetLocalBytes(blockId, info)
// [SPARK-16550] Erase the typed classTag when using default serialization, since
// NettyBlockRpcServer crashes when deserializing repl-defined classes.
// TODO(ekl) remove this once the classloader issue on the remote end is fixed.
val remoteClassTag = if (!serializerManager.canUseKryo(classTag)) {
scala.reflect.classTag[Any]
} else {
classTag
}
try {
replicate(blockId, bytesToReplicate, level, remoteClassTag)
} finally {
bytesToReplicate.dispose()
}
logDebug(s"Put block $blockId remotely took ${Utils.getUsedTimeNs(remoteStartTimeNs)}")
}
}
assert(blockWasSuccessfullyStored == iteratorFromFailedMemoryStorePut.isEmpty)
iteratorFromFailedMemoryStorePut
}
}
/**
* Attempts to cache spilled bytes read from disk into the MemoryStore in order to speed up
* subsequent reads. This method requires the caller to hold a read lock on the block.
*
* @return a copy of the bytes from the memory store if the put succeeded, otherwise None.
* If this returns bytes from the memory store then the original disk store bytes will
* automatically be disposed and the caller should not continue to use them. Otherwise,
* if this returns None then the original disk store bytes will be unaffected.
*/
private def maybeCacheDiskBytesInMemory(
blockInfo: BlockInfo,
blockId: BlockId,
level: StorageLevel,
diskData: BlockData): Option[ChunkedByteBuffer] = {
require(!level.deserialized)
if (level.useMemory) {
// Synchronize on blockInfo to guard against a race condition where two readers both try to
// put values read from disk into the MemoryStore.
blockInfo.synchronized {
if (memoryStore.contains(blockId)) {
diskData.dispose()
Some(memoryStore.getBytes(blockId).get)
} else {
val allocator = level.memoryMode match {
case MemoryMode.ON_HEAP => ByteBuffer.allocate _
case MemoryMode.OFF_HEAP => Platform.allocateDirectBuffer _
}
val putSucceeded = memoryStore.putBytes(blockId, diskData.size, level.memoryMode, () => {
// https://issues.apache.org/jira/browse/SPARK-6076
// If the file size is bigger than the free memory, OOM will happen. So if we
// cannot put it into MemoryStore, copyForMemory should not be created. That's why
// this action is put into a `() => ChunkedByteBuffer` and created lazily.
diskData.toChunkedByteBuffer(allocator)
})
if (putSucceeded) {
diskData.dispose()
Some(memoryStore.getBytes(blockId).get)
} else {
None
}
}
}
} else {
None
}
}
/**
* Attempts to cache spilled values read from disk into the MemoryStore in order to speed up
* subsequent reads. This method requires the caller to hold a read lock on the block.
*
* @return a copy of the iterator. The original iterator passed this method should no longer
* be used after this method returns.
*/
private def maybeCacheDiskValuesInMemory[T](
blockInfo: BlockInfo,
blockId: BlockId,
level: StorageLevel,
diskIterator: Iterator[T]): Iterator[T] = {
require(level.deserialized)
val classTag = blockInfo.classTag.asInstanceOf[ClassTag[T]]
if (level.useMemory) {
// Synchronize on blockInfo to guard against a race condition where two readers both try to
// put values read from disk into the MemoryStore.
blockInfo.synchronized {
if (memoryStore.contains(blockId)) {
// Note: if we had a means to discard the disk iterator, we would do that here.
memoryStore.getValues(blockId).get
} else {
memoryStore.putIteratorAsValues(blockId, diskIterator, classTag) match {
case Left(iter) =>
// The memory store put() failed, so it returned the iterator back to us:
iter
case Right(_) =>
// The put() succeeded, so we can read the values back:
memoryStore.getValues(blockId).get
}
}
}.asInstanceOf[Iterator[T]]
} else {
diskIterator
}
}
/**
* Get peer block managers in the system.
*/
private def getPeers(forceFetch: Boolean): Seq[BlockManagerId] = {
peerFetchLock.synchronized {
val cachedPeersTtl = conf.get(config.STORAGE_CACHED_PEERS_TTL) // milliseconds
val diff = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lastPeerFetchTimeNs)
val timeout = diff > cachedPeersTtl
if (cachedPeers == null || forceFetch || timeout) {
cachedPeers = master.getPeers(blockManagerId).sortBy(_.hashCode)
lastPeerFetchTimeNs = System.nanoTime()
logDebug("Fetched peers from master: " + cachedPeers.mkString("[", ",", "]"))
}
cachedPeers
}
}
/**
* Called for pro-active replenishment of blocks lost due to executor failures
*
* @param blockId blockId being replicate
* @param existingReplicas existing block managers that have a replica
* @param maxReplicas maximum replicas needed
*/
def replicateBlock(
blockId: BlockId,
existingReplicas: Set[BlockManagerId],
maxReplicas: Int): Unit = {
logInfo(s"Using $blockManagerId to pro-actively replicate $blockId")
blockInfoManager.lockForReading(blockId).foreach { info =>
val data = doGetLocalBytes(blockId, info)
val storageLevel = StorageLevel(
useDisk = info.level.useDisk,
useMemory = info.level.useMemory,
useOffHeap = info.level.useOffHeap,
deserialized = info.level.deserialized,
replication = maxReplicas)
// we know we are called as a result of an executor removal, so we refresh peer cache
// this way, we won't try to replicate to a missing executor with a stale reference
getPeers(forceFetch = true)
try {
replicate(blockId, data, storageLevel, info.classTag, existingReplicas)
} finally {
logDebug(s"Releasing lock for $blockId")
releaseLockAndDispose(blockId, data)
}
}
}
/**
* Replicate block to another node. Note that this is a blocking call that returns after
* the block has been replicated.
*/
private def replicate(
blockId: BlockId,
data: BlockData,
level: StorageLevel,
classTag: ClassTag[_],
existingReplicas: Set[BlockManagerId] = Set.empty): Unit = {
val maxReplicationFailures = conf.get(config.STORAGE_MAX_REPLICATION_FAILURE)
val tLevel = StorageLevel(
useDisk = level.useDisk,
useMemory = level.useMemory,
useOffHeap = level.useOffHeap,
deserialized = level.deserialized,
replication = 1)
val numPeersToReplicateTo = level.replication - 1
val startTime = System.nanoTime
val peersReplicatedTo = mutable.HashSet.empty ++ existingReplicas
val peersFailedToReplicateTo = mutable.HashSet.empty[BlockManagerId]
var numFailures = 0
val initialPeers = getPeers(false).filterNot(existingReplicas.contains)
var peersForReplication = blockReplicationPolicy.prioritize(
blockManagerId,
initialPeers,
peersReplicatedTo,
blockId,
numPeersToReplicateTo)
while(numFailures <= maxReplicationFailures &&
!peersForReplication.isEmpty &&
peersReplicatedTo.size < numPeersToReplicateTo) {
val peer = peersForReplication.head
try {
val onePeerStartTime = System.nanoTime
logTrace(s"Trying to replicate $blockId of ${data.size} bytes to $peer")
// This thread keeps a lock on the block, so we do not want the netty thread to unlock
// block when it finishes sending the message.
val buffer = new BlockManagerManagedBuffer(blockInfoManager, blockId, data, false,
unlockOnDeallocate = false)
blockTransferService.uploadBlockSync(
peer.host,
peer.port,
peer.executorId,
blockId,
buffer,
tLevel,
classTag)
logTrace(s"Replicated $blockId of ${data.size} bytes to $peer" +
s" in ${(System.nanoTime - onePeerStartTime).toDouble / 1e6} ms")
peersForReplication = peersForReplication.tail
peersReplicatedTo += peer
} catch {
case NonFatal(e) =>
logWarning(s"Failed to replicate $blockId to $peer, failure #$numFailures", e)
peersFailedToReplicateTo += peer
// we have a failed replication, so we get the list of peers again
// we don't want peers we have already replicated to and the ones that
// have failed previously
val filteredPeers = getPeers(true).filter { p =>
!peersFailedToReplicateTo.contains(p) && !peersReplicatedTo.contains(p)
}
numFailures += 1
peersForReplication = blockReplicationPolicy.prioritize(
blockManagerId,
filteredPeers,
peersReplicatedTo,
blockId,
numPeersToReplicateTo - peersReplicatedTo.size)
}
}
logDebug(s"Replicating $blockId of ${data.size} bytes to " +
s"${peersReplicatedTo.size} peer(s) took ${(System.nanoTime - startTime) / 1e6} ms")
if (peersReplicatedTo.size < numPeersToReplicateTo) {
logWarning(s"Block $blockId replicated to only " +
s"${peersReplicatedTo.size} peer(s) instead of $numPeersToReplicateTo peers")
}
logDebug(s"block $blockId replicated to ${peersReplicatedTo.mkString(", ")}")
}
/**
* Read a block consisting of a single object.
*/
def getSingle[T: ClassTag](blockId: BlockId): Option[T] = {
get[T](blockId).map(_.data.next().asInstanceOf[T])
}
/**
* Write a block consisting of a single object.
*
* @return true if the block was stored or false if the block was already stored or an
* error occurred.
*/
def putSingle[T: ClassTag](
blockId: BlockId,
value: T,
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
putIterator(blockId, Iterator(value), level, tellMaster)
}
/**
* Drop a block from memory, possibly putting it on disk if applicable. Called when the memory
* store reaches its limit and needs to free up space.
*
* If `data` is not put on disk, it won't be created.
*
* The caller of this method must hold a write lock on the block before calling this method.
* This method does not release the write lock.
*
* @return the block's new effective StorageLevel.
*/
private[storage] override def dropFromMemory[T: ClassTag](
blockId: BlockId,
data: () => Either[Array[T], ChunkedByteBuffer]): StorageLevel = {
logInfo(s"Dropping block $blockId from memory")
val info = blockInfoManager.assertBlockIsLockedForWriting(blockId)
var blockIsUpdated = false
val level = info.level
// Drop to disk, if storage level requires
if (level.useDisk && !diskStore.contains(blockId)) {
logInfo(s"Writing block $blockId to disk")
data() match {
case Left(elements) =>
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(
blockId,
out,
elements.toIterator)(info.classTag.asInstanceOf[ClassTag[T]])
}
case Right(bytes) =>
diskStore.putBytes(blockId, bytes)
}
blockIsUpdated = true
}
// Actually drop from memory store
val droppedMemorySize =
if (memoryStore.contains(blockId)) memoryStore.getSize(blockId) else 0L
val blockIsRemoved = memoryStore.remove(blockId)
if (blockIsRemoved) {
blockIsUpdated = true
} else {
logWarning(s"Block $blockId could not be dropped from memory as it does not exist")
}
val status = getCurrentBlockStatus(blockId, info)
if (info.tellMaster) {
reportBlockStatus(blockId, status, droppedMemorySize)
}
if (blockIsUpdated) {
addUpdatedBlockStatusToTaskMetrics(blockId, status)
}
status.storageLevel
}
/**
* Remove all blocks belonging to the given RDD.
*
* @return The number of blocks removed.
*/
def removeRdd(rddId: Int): Int = {
// TODO: Avoid a linear scan by creating another mapping of RDD.id to blocks.
logInfo(s"Removing RDD $rddId")
val blocksToRemove = blockInfoManager.entries.flatMap(_._1.asRDDId).filter(_.rddId == rddId)
blocksToRemove.foreach { blockId => removeBlock(blockId, tellMaster = false) }
blocksToRemove.size
}
/**
* Remove all blocks belonging to the given broadcast.
*/
def removeBroadcast(broadcastId: Long, tellMaster: Boolean): Int = {
logDebug(s"Removing broadcast $broadcastId")
val blocksToRemove = blockInfoManager.entries.map(_._1).collect {
case bid @ BroadcastBlockId(`broadcastId`, _) => bid
}
blocksToRemove.foreach { blockId => removeBlock(blockId, tellMaster) }
blocksToRemove.size
}
/**
* Remove a block from both memory and disk.
*/
def removeBlock(blockId: BlockId, tellMaster: Boolean = true): Unit = {
logDebug(s"Removing block $blockId")
blockInfoManager.lockForWriting(blockId) match {
case None =>
// The block has already been removed; do nothing.
logWarning(s"Asked to remove block $blockId, which does not exist")
case Some(info) =>
removeBlockInternal(blockId, tellMaster = tellMaster && info.tellMaster)
addUpdatedBlockStatusToTaskMetrics(blockId, BlockStatus.empty)
}
}
/**
* Internal version of [[removeBlock()]] which assumes that the caller already holds a write
* lock on the block.
*/
private def removeBlockInternal(blockId: BlockId, tellMaster: Boolean): Unit = {
// Removals are idempotent in disk store and memory store. At worst, we get a warning.
val removedFromMemory = memoryStore.remove(blockId)
val removedFromDisk = diskStore.remove(blockId)
if (!removedFromMemory && !removedFromDisk) {
logWarning(s"Block $blockId could not be removed as it was not found on disk or in memory")
}
blockInfoManager.removeBlock(blockId)
if (tellMaster) {
reportBlockStatus(blockId, BlockStatus.empty)
}
}
private def addUpdatedBlockStatusToTaskMetrics(blockId: BlockId, status: BlockStatus): Unit = {
if (conf.get(config.TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES)) {
Option(TaskContext.get()).foreach { c =>
c.taskMetrics().incUpdatedBlockStatuses(blockId -> status)
}
}
}
def releaseLockAndDispose(
blockId: BlockId,
data: BlockData,
taskContext: Option[TaskContext] = None): Unit = {
releaseLock(blockId, taskContext)
data.dispose()
}
def stop(): Unit = {
blockTransferService.close()
if (shuffleClient ne blockTransferService) {
// Closing should be idempotent, but maybe not for the NioBlockTransferService.
shuffleClient.close()
}
remoteBlockTempFileManager.stop()
diskBlockManager.stop()
rpcEnv.stop(slaveEndpoint)
blockInfoManager.clear()
memoryStore.clear()
futureExecutionContext.shutdownNow()
logInfo("BlockManager stopped")
}
}
private[spark] object BlockManager {
private val ID_GENERATOR = new IdGenerator
def blockIdsToLocations(
blockIds: Array[BlockId],
env: SparkEnv,
blockManagerMaster: BlockManagerMaster = null): Map[BlockId, Seq[String]] = {
// blockManagerMaster != null is used in tests
assert(env != null || blockManagerMaster != null)
val blockLocations: Seq[Seq[BlockManagerId]] = if (blockManagerMaster == null) {
env.blockManager.getLocationBlockIds(blockIds)
} else {
blockManagerMaster.getLocations(blockIds)
}
val blockManagers = new HashMap[BlockId, Seq[String]]
for (i <- 0 until blockIds.length) {
blockManagers(blockIds(i)) = blockLocations(i).map { loc =>
ExecutorCacheTaskLocation(loc.host, loc.executorId).toString
}
}
blockManagers.toMap
}
private class ShuffleMetricsSource(
override val sourceName: String,
metricSet: MetricSet) extends Source {
override val metricRegistry = new MetricRegistry
metricRegistry.registerAll(metricSet)
}
class RemoteBlockDownloadFileManager(blockManager: BlockManager)
extends DownloadFileManager with Logging {
// lazy because SparkEnv is set after this
lazy val encryptionKey = SparkEnv.get.securityManager.getIOEncryptionKey()
private class ReferenceWithCleanup(
file: DownloadFile,
referenceQueue: JReferenceQueue[DownloadFile]
) extends WeakReference[DownloadFile](file, referenceQueue) {
val filePath = file.path()
def cleanUp(): Unit = {
logDebug(s"Clean up file $filePath")
if (!file.delete()) {
logDebug(s"Fail to delete file $filePath")
}
}
}
private val referenceQueue = new JReferenceQueue[DownloadFile]
private val referenceBuffer = Collections.newSetFromMap[ReferenceWithCleanup](
new ConcurrentHashMap)
private val POLL_TIMEOUT = 1000
@volatile private var stopped = false
private val cleaningThread = new Thread() { override def run() { keepCleaning() } }
cleaningThread.setDaemon(true)
cleaningThread.setName("RemoteBlock-temp-file-clean-thread")
cleaningThread.start()
override def createTempFile(transportConf: TransportConf): DownloadFile = {
val file = blockManager.diskBlockManager.createTempLocalBlock()._2
encryptionKey match {
case Some(key) =>
// encryption is enabled, so when we read the decrypted data off the network, we need to
// encrypt it when writing to disk. Note that the data may have been encrypted when it
// was cached on disk on the remote side, but it was already decrypted by now (see
// EncryptedBlockData).
new EncryptedDownloadFile(file, key)
case None =>
new SimpleDownloadFile(file, transportConf)
}
}
override def registerTempFileToClean(file: DownloadFile): Boolean = {
referenceBuffer.add(new ReferenceWithCleanup(file, referenceQueue))
}
def stop(): Unit = {
stopped = true
cleaningThread.interrupt()
cleaningThread.join()
}
private def keepCleaning(): Unit = {
while (!stopped) {
try {
Option(referenceQueue.remove(POLL_TIMEOUT))
.map(_.asInstanceOf[ReferenceWithCleanup])
.foreach { ref =>
referenceBuffer.remove(ref)
ref.cleanUp()
}
} catch {
case _: InterruptedException =>
// no-op
case NonFatal(e) =>
logError("Error in cleaning thread", e)
}
}
}
}
/**
* A DownloadFile that encrypts data when it is written, and decrypts when it's read.
*/
private class EncryptedDownloadFile(
file: File,
key: Array[Byte]) extends DownloadFile {
private val env = SparkEnv.get
override def delete(): Boolean = file.delete()
override def openForWriting(): DownloadFileWritableChannel = {
new EncryptedDownloadWritableChannel()
}
override def path(): String = file.getAbsolutePath
private class EncryptedDownloadWritableChannel extends DownloadFileWritableChannel {
private val countingOutput: CountingWritableChannel = new CountingWritableChannel(
Channels.newChannel(env.serializerManager.wrapForEncryption(new FileOutputStream(file))))
override def closeAndRead(): ManagedBuffer = {
countingOutput.close()
val size = countingOutput.getCount
new EncryptedManagedBuffer(new EncryptedBlockData(file, size, env.conf, key))
}
override def write(src: ByteBuffer): Int = countingOutput.write(src)
override def isOpen: Boolean = countingOutput.isOpen()
override def close(): Unit = countingOutput.close()
}
}
}
| aosagie/spark | core/src/main/scala/org/apache/spark/storage/BlockManager.scala | Scala | apache-2.0 | 72,368 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package support
import com.github.tomakehurst.wiremock.client.MappingBuilder
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.matching.UrlPathPattern
import common.enums.VatRegStatus
import itutil.IntegrationSpecBase
import models.api.returns.Returns
import models.api.trafficmanagement.{Draft, RegistrationChannel, RegistrationInformation, VatReg}
import models.api.{Attachments, SicCode, VatScheme}
import models.{ApiKey, S4LKey}
import play.api.http.Status._
import play.api.libs.json._
import play.api.mvc.AnyContentAsFormUrlEncoded
import play.api.test.FakeRequest
import uk.gov.hmrc.auth.core.AffinityGroup.Organisation
import utils.JsonUtilities
import java.time.LocalDate
trait StubUtils {
final class RequestHolder(var request: FakeRequest[AnyContentAsFormUrlEncoded])
class PreconditionBuilder(implicit requestHolder: RequestHolder) {
implicit val builder: PreconditionBuilder = this
def address(id: String, line1: String, line2: String, country: String, postcode: String) =
AddressStub(id, line1, line2, country, postcode)
def postRequest(data: Map[String, String])(implicit requestHolder: RequestHolder): PreconditionBuilder = {
val requestWithBody = FakeRequest("POST", "/").withFormUrlEncodedBody(data.toArray: _*)
requestHolder.request = requestWithBody
this
}
def user = UserStub()
def alfeJourney = JourneyStub()
def vatRegistrationFootprint = VatRegistrationFootprintStub()
def vatScheme = VatSchemeStub()
def vatRegistration = VatRegistrationStub()
def icl = ICL()
def bankAccountReputation = BankAccountReputationServiceStub()
def s4lContainer[C: S4LKey]: ViewModelStub[C] = new ViewModelStub[C]()
def audit = AuditStub()
def s4l = S4L()
def trafficManagement = TrafficManagementStub()
def registrationApi = RegistrationApiStub()
}
def given()(implicit requestHolder: RequestHolder): PreconditionBuilder = {
new PreconditionBuilder()
.audit.writesAudit()
.audit.writesAuditMerged()
}
object S4LStub extends IntegrationSpecBase {
import uk.gov.hmrc.crypto._
import uk.gov.hmrc.crypto.json.JsonEncryptor
implicit lazy val jsonCrypto = new ApplicationCrypto(app.configuration.underlying).JsonCrypto
implicit lazy val encryptionFormat = new JsonEncryptor[JsValue]()
def stubS4LGetNoAux(key: String, data: String): MappingBuilder = {
val s4lData = Json.parse(data).as[JsValue]
val encData = encryptionFormat.writes(Protected(s4lData)).as[JsString]
val json =
s"""
|{
| "atomicId": { "$$oid": "598830cf5e00005e00b3401e" },
| "data": {
| "$key": $encData
| },
| "id": "1",
| "modifiedDetails": {
| "createdAt": { "$$date": 1502097615710 },
| "lastUpdated": { "$$date": 1502189409725 }
| }
|}
""".stripMargin
get(urlPathMatching("/save4later/vat-registration-frontend/1"))
.willReturn(ok(
json
))
}
def stubS4LPut(key: String, data: String): MappingBuilder = {
val s4lData = Json.parse(data).as[JsValue]
val encData = encryptionFormat.writes(Protected(s4lData)).as[JsString]
put(urlPathMatching(s"/save4later/vat-registration-frontend/1/data/$key"))
.willReturn(ok(
s"""
|{ "atomicId": { "$$oid": "598ac0b64e0000d800170620" },
| "data": { "$key": $encData },
| "id": "1",
| "modifiedDetails": {
| "createdAt": { "$$date": 1502265526026 },
| "lastUpdated": { "$$date": 1502265526026 }}}
""".stripMargin
))
}
def stubS4LGet[C, T](t: T)(implicit key: S4LKey[C], fmt: Writes[T]): MappingBuilder = {
val s4lData = Json.toJson(t)
val encData = encryptionFormat.writes(Protected(s4lData)).as[JsString]
get(urlPathMatching("/save4later/vat-registration-frontend/1"))
.willReturn(ok(
s"""
|{
| "atomicId": { "$$oid": "598830cf5e00005e00b3401e" },
| "data": {
| "${key.key}": $encData
| },
| "id": "1",
| "modifiedDetails": {
| "createdAt": { "$$date": 1502097615710 },
| "lastUpdated": { "$$date": 1502189409725 }
| }
|}
""".stripMargin
))
}
def stubS4LGetNothing(): MappingBuilder =
get(urlPathMatching("/save4later/vat-registration-frontend/1"))
.willReturn(ok(
s"""
|{
| "atomicId": { "$$oid": "598830cf5e00005e00b3401e" },
| "data": {},
| "id": "1",
| "modifiedDetails": {
| "createdAt": { "$$date": 1502097615710 },
| "lastUpdated": { "$$date": 1502189409725 }
| }
|}
""".stripMargin
))
def stubS4LClear(): MappingBuilder =
delete(urlPathMatching("/save4later/vat-registration-frontend/1")).willReturn(ok(""))
}
case class S4L(scenario: String = "S4L Scenario")(implicit builder: PreconditionBuilder) {
def contains(key: String, data: String, currentState: Option[String] = None, nextState: Option[String] = None): PreconditionBuilder = {
val mappingBuilderScenarioGET = S4LStub.stubS4LGetNoAux(key, data).inScenario(scenario)
val mappingBuilderGET = currentState.fold(mappingBuilderScenarioGET)(mappingBuilderScenarioGET.whenScenarioStateIs)
stubFor(nextState.fold(mappingBuilderGET)(mappingBuilderGET.willSetStateTo))
builder
}
def isUpdatedWith(key: String, data: String, currentState: Option[String] = None, nextState: Option[String] = None): PreconditionBuilder = {
val mappingBuilderScenarioPUT = S4LStub.stubS4LPut(key, data).inScenario(scenario)
val mappingBuilderPUT = currentState.fold(mappingBuilderScenarioPUT)(mappingBuilderScenarioPUT.whenScenarioStateIs)
stubFor(nextState.fold(mappingBuilderPUT)(mappingBuilderPUT.willSetStateTo))
builder
}
def isEmpty(currentState: Option[String] = None, nextState: Option[String] = None): PreconditionBuilder = {
val mappingBuilderScenarioGET = S4LStub.stubS4LGetNothing().inScenario(scenario)
val mappingBuilderGET = currentState.fold(mappingBuilderScenarioGET)(mappingBuilderScenarioGET.whenScenarioStateIs)
stubFor(nextState.fold(mappingBuilderGET)(mappingBuilderGET.willSetStateTo))
builder
}
def cleared(currentState: Option[String] = None, nextState: Option[String] = None): PreconditionBuilder = {
val mappingBuilderScenarioDELETE = S4LStub.stubS4LClear().inScenario(scenario)
val mappingBuilderDELETE = currentState.fold(mappingBuilderScenarioDELETE)(mappingBuilderScenarioDELETE.whenScenarioStateIs)
stubFor(nextState.fold(mappingBuilderDELETE)(mappingBuilderDELETE.willSetStateTo))
builder
}
}
@deprecated("please change the types on this once all refactoring has been completed, both should be same type instead of C & T")
class ViewModelStub[C]()(implicit builder: PreconditionBuilder, s4LKey: S4LKey[C]) {
def contains[T](t: T)(implicit fmt: Writes[T]): PreconditionBuilder = {
stubFor(S4LStub.stubS4LGet[C, T](t))
builder
}
def isUpdatedWith(t: C)(implicit key: S4LKey[C], fmt: Writes[C]): PreconditionBuilder = {
stubFor(S4LStub.stubS4LPut(key.key, fmt.writes(t).toString()))
builder
}
def isEmpty: PreconditionBuilder = {
stubFor(S4LStub.stubS4LGetNothing())
builder
}
def cleared: PreconditionBuilder = {
stubFor(S4LStub.stubS4LClear())
builder
}
def clearedByKey(implicit key: S4LKey[C]): PreconditionBuilder = {
stubFor(S4LStub.stubS4LPut(key.key, Json.obj().toString()))
builder
}
}
case class VatRegistrationStub()(implicit builder: PreconditionBuilder) {
def threshold(url: String, threshold: String): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo(url))
.willReturn(ok(
s"""
|{
| "taxable-threshold":"$threshold",
| "since":"2018-1-1"
|}
""".stripMargin
))
)
builder
}
def status(url: String, status: String): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo(url))
.willReturn(ok(
s"""
|{
| "status":"$status"
|}
""".stripMargin
))
)
builder
}
def submit(url: String, status: Int): PreconditionBuilder = {
stubFor(
put(urlPathEqualTo(url))
.willReturn(aResponse.withStatus(status))
)
builder
}
def acknowledgementReference(regId: String, ackRef: String): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo(s"/vatreg/$regId/acknowledgement-reference"))
.willReturn(ok(Json.toJson(ackRef).toString()))
)
builder
}
def storesNrsPayload(regId: String): PreconditionBuilder = {
stubFor(
patch(urlPathEqualTo(s"/vatreg/$regId/nrs-payload"))
.willReturn(ok())
)
builder
}
def honestyDeclaration(regId: String, honestyDeclaration: String): PreconditionBuilder = {
stubFor(
patch(urlPathEqualTo(s"/vatreg/$regId/honesty-declaration"))
.willReturn(ok(honestyDeclaration))
)
builder
}
def insertScheme(body: String): PreconditionBuilder = {
stubFor(
post(urlPathEqualTo("/vatreg/insert-s4l-scheme"))
.willReturn(ok().withBody(body))
)
builder
}
def storesReturns(regId: String, returns: Returns): PreconditionBuilder = {
stubFor(
patch(urlPathEqualTo(s"/vatreg/$regId/returns"))
.willReturn(ok().withBody(Json.toJson(returns).toString()))
)
builder
}
}
case class ICL()(implicit builder: PreconditionBuilder, requestHolder: RequestHolder) {
def setup(): PreconditionBuilder = {
stubFor(
post(urlPathEqualTo("/internal/initialise-journey"))
.willReturn(ok(
s"""
|{
| "fetchResultsUri" : "fetch",
| "journeyStartUri" : "journeyStart"
|}
""".stripMargin
)))
builder
}
def fetchResults(sicCodeList: List[SicCode]): PreconditionBuilder = {
val sicJsArray = Json.toJson(sicCodeList).as[JsArray]
stubFor(
get(urlPathMatching("/fetch-results"))
.willReturn(ok(
s"""
|{
| "sicCodes": $sicJsArray
|}
""".stripMargin
)))
builder
}
}
case class CurrentProfile()(implicit builder: PreconditionBuilder, requestHolder: RequestHolder) {
def setup(status: VatRegStatus.Value = VatRegStatus.draft, currentState: Option[String] = None, nextState: Option[String] = None): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo(s"/incorporation-information/000-431-TEST/company-profile"))
.willReturn(ok(
s"""{ "company_name": "testingCompanyName" }"""
)))
stubFor(get(urlPathEqualTo("/vatreg/1/status")).willReturn(ok(
s"""{"status": "${status.toString}"}"""
)))
stubFor(
get(urlPathEqualTo("/vatreg/incorporation-information/000-431-TEST"))
.willReturn(ok(
s"""
|{
| "statusEvent": {
| "crn": "90000001",
| "incorporationDate": "2016-08-05",
| "status": "accepted"
| },
| "subscription": {
| "callbackUrl": "http://localhost:9896/callbackUrl",
| "regime": "vat",
| "subscriber": "scrs",
| "transactionId": "000-431-TEST"
| }
|}
""".stripMargin
)))
builder
}
}
case class VatSchemeStub()(implicit builder: PreconditionBuilder) {
def isBlank: PreconditionBuilder = {
stubFor(
get(urlPathEqualTo("/vatreg/registrations/1"))
.willReturn(ok(
s"""{ "registrationId" : "1" , "status" : "draft"}"""
)))
builder
}
def doesNotExistForKey(blockKey: String): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo(s"/vatreg/1/$blockKey"))
.willReturn(notFound()))
builder
}
def isNotUpdatedWith[T](t: T, statusCode: Int = 500)(implicit tFmt: Format[T]) = {
stubFor(
patch(urlPathMatching(s"/vatreg/1/.*"))
.willReturn(aResponse().withStatus(statusCode).withBody(tFmt.writes(t).toString())))
builder
}
def isUpdatedWith[T](t: T)(implicit tFmt: Writes[T]) = {
stubFor(
patch(urlPathMatching(s"/vatreg/1/.*"))
.willReturn(aResponse().withStatus(202).withBody(tFmt.writes(t).toString())))
builder
}
def storesAttachments(data: Attachments)(implicit writes: Writes[Attachments]) = {
stubFor(
put(urlPathMatching(s"/vatreg/1/attachments"))
.willReturn(aResponse().withStatus(OK).withBody(writes.writes(data).toString())))
builder
}
def isUpdatedWithPartner[T](t: T)(implicit tFmt: Format[T]): PreconditionBuilder = {
stubFor(
put(urlPathMatching(s"/vatreg/1/partners/.*"))
.willReturn(aResponse().withStatus(CREATED).withBody(tFmt.writes(t).toString())))
builder
}
def contains(vatReg: VatScheme): PreconditionBuilder = {
stubFor(get(urlPathEqualTo("/vatreg/1/get-scheme")).willReturn(ok(Json.toJson(vatReg).toString)))
builder
}
def contains(json: JsValue): PreconditionBuilder = {
stubFor(get(urlPathEqualTo("/vatreg/1/get-scheme")).willReturn(ok(json.toString())))
builder
}
def has(key: String, data: JsValue): PreconditionBuilder = {
stubFor(get(urlPathEqualTo(s"/vatreg/1/$key")).willReturn(ok(data.toString())))
builder
}
def doesNotHave(blockKey: String): PreconditionBuilder = {
stubFor(get(urlPathEqualTo(s"/vatreg/1/$blockKey")).willReturn(noContent()))
builder
}
def deleted: PreconditionBuilder = {
stubFor(delete(urlPathEqualTo("/vatreg/1/delete-scheme")).willReturn(ok("")))
builder
}
def patched(block: String, json: JsValue) = {
stubFor(
patch(urlPathMatching(s"/vatreg/1/$block"))
.willReturn(aResponse().withStatus(202).withBody(json.toString)))
builder
}
def isSubmittedSuccessfully(regId: String = "1"): PreconditionBuilder = {
stubFor(
put(urlPathMatching(s"/vatreg/$regId/submit-registration"))
.willReturn(aResponse().withStatus(200).withBody("fooBar")))
builder
}
def regStatus(status: VatRegStatus.Value): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo("/vatreg/1/status"))
.willReturn(ok(
Json.toJson(status).toString()
))
)
builder
}
}
case class VatRegistrationFootprintStub()(implicit builder: PreconditionBuilder) extends JsonUtilities {
def exists(status: VatRegStatus.Value = VatRegStatus.draft, withDate: Boolean = false): PreconditionBuilder = {
stubFor(
post(urlPathEqualTo("/vatreg/new"))
.willReturn(ok(
Json.stringify(Json.obj(
"registrationId" -> "1",
"status" -> status.toString
) ++ {if (withDate) Json.obj("createdDate" -> "2021-01-01") else Json.obj()}
))))
builder
}
def fails: PreconditionBuilder = {
stubFor(
post(urlPathEqualTo("/vatreg/new"))
.willReturn(serverError()))
builder
}
}
case class UserStub()(implicit builder: PreconditionBuilder) {
def authoriseData(arn: Option[String]): JsValue =
Json.obj(
"internalId" -> "1",
"affinityGroup" -> Organisation.toString,
"allEnrolments" -> arn.fold(Json.arr())(ref =>
Json.arr(
Json.obj(
"key" -> "HMRC-AS-AGENT",
"identifiers" -> Json.arr(
Json.obj(
"key" -> "AgentReferenceNumber",
"value" -> ref
)
)
)
)
)
)
def isAuthorised(arn: Option[String] = None)(implicit requestHolder: RequestHolder): PreconditionBuilder = {
requestHolder.request = SessionCookieBaker.requestWithSession(requestHolder.request, "anyUserId")
stubFor(
post(urlPathEqualTo("/auth/authorise"))
.willReturn(ok(authoriseData(arn).toString())))
builder
}
def isNotAuthorised = {
stubFor(
post(urlPathEqualTo("/auth/authorise"))
.willReturn(unauthorized()))
builder
}
}
case class JourneyStub()(implicit builder: PreconditionBuilder) {
val journeyInitUrl: UrlPathPattern = urlPathMatching(s".*/api/v2/init")
def initialisedSuccessfully(): PreconditionBuilder = {
stubFor(post(journeyInitUrl).willReturn(aResponse.withStatus(202).withHeader("Location", "continueUrl")))
builder
}
def notInitialisedAsExpected(): PreconditionBuilder = {
stubFor(post(journeyInitUrl).willReturn(aResponse().withStatus(202))) // a 202 _without_ Location header
builder
}
def failedToInitialise(): PreconditionBuilder = {
stubFor(post(journeyInitUrl).willReturn(serverError()))
builder
}
}
case class AddressStub
(id: String, line1: String, line2: String, country: String, postcode: String)
(implicit builder: PreconditionBuilder) {
val confirmedAddressPath = s""".*/api/v2/confirmed[?]id=$id"""
def isFound: PreconditionBuilder = {
stubFor(
get(urlMatching(confirmedAddressPath))
.willReturn(ok(
s"""
|{
| "auditRef": "$id",
| "id": "GB990091234520",
| "address": {
| "country": {
| "code": "$country",
| "name": "United Kingdom"
| },
| "lines": [
| "$line1",
| "$line2"
| ],
| "postcode": "$postcode"
| }
|}
""".stripMargin
)))
builder
}
def isNotFound: PreconditionBuilder = {
stubFor(
get(urlMatching(confirmedAddressPath))
.willReturn(notFound()))
builder
}
}
case class AuditStub()(implicit builder: PreconditionBuilder) {
def writesAudit(status: Int = 204) = {
stubFor(post(urlMatching("/write/audit"))
.willReturn(
aResponse().
withStatus(status).
withBody("""{"x":2}""")
)
)
builder
}
def writesAuditMerged(status: Int = 204) = {
stubFor(post(urlMatching("/write/audit/merged"))
.willReturn(
aResponse().
withStatus(status).
withBody("""{"x":2}""")
)
)
builder
}
def failsToWriteAudit() = {
writesAudit(404)
}
}
case class BankAccountReputationServiceStub()(implicit builder: PreconditionBuilder) {
def passes: PreconditionBuilder = {
stubFor(post(urlMatching("/v2/validateBankDetails"))
.willReturn(
aResponse().withStatus(200).withBody(
s"""
|{
| "accountNumberWithSortCodeIsValid": "yes",
| "nonStandardAccountDetailsRequiredForBacs": "no"
|}
""".stripMargin)
))
builder
}
def fails: PreconditionBuilder = {
stubFor(post(urlMatching("/v2/validateBankDetails"))
.willReturn(
aResponse().withStatus(200).withBody(
s"""
|{
| "accountNumberWithSortCodeIsValid": "no",
| "nonStandardAccountDetailsRequiredForBacs": "no"
|}
""".stripMargin)
))
builder
}
def isDown: PreconditionBuilder = {
stubFor(post(urlMatching("/v2/validateBankDetails"))
.willReturn(
serverError()
))
builder
}
}
case class TrafficManagementStub()(implicit builder: PreconditionBuilder) {
def passes(channel: RegistrationChannel = VatReg): PreconditionBuilder = {
stubFor(get(urlMatching(s"/vatreg/traffic-management/1/reg-info"))
.willReturn(
aResponse()
.withStatus(200)
.withBody(Json.toJson(
RegistrationInformation("1", "1", Draft, Some(LocalDate.now()), channel)
).toString())
))
builder
}
def fails: PreconditionBuilder = {
stubFor(get(urlMatching("/vatreg/traffic-management/1/reg-info"))
.willReturn(
aResponse()
.withStatus(204)
))
builder
}
def isCleared: PreconditionBuilder = {
stubFor(delete(urlMatching("/vatreg/traffic-management/reg-info/clear"))
.willReturn(
aResponse()
.withStatus(204)
))
builder
}
def failsToClear: PreconditionBuilder = {
stubFor(delete(urlMatching("/vatreg/traffic-management/reg-info/clear"))
.willReturn(
aResponse()
.withStatus(400)
))
builder
}
}
case class RegistrationApiStub()(implicit builder: PreconditionBuilder) {
def getRegistration(vatScheme: VatScheme, regId: String = "1")(implicit format: Format[VatScheme] = VatScheme.format): PreconditionBuilder = {
stubFor(get(urlPathEqualTo(s"/vatreg/registrations/$regId"))
.willReturn(ok(Json.stringify(Json.toJson(vatScheme)))
))
builder
}
def getSection[T: ApiKey](contains: Option[T], regId: String = "1", isComplete: Boolean = true)(implicit format: Format[T]): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo(s"/vatreg/registrations/$regId/sections/${ApiKey[T]}"))
.willReturn(
contains match {
case Some(section) => ok(
Json.obj(
"isComplete" -> isComplete,
"data" -> Json.toJson[T](section)
).toString()
)
case None => notFound()
}))
builder
}
def getSectionFails[T: ApiKey](regId: String = "1")(implicit format: Format[T]): PreconditionBuilder = {
stubFor(
get(urlPathEqualTo(s"/vatreg/registrations/$regId/sections/${ApiKey[T]}"))
.willReturn(badRequest())
)
builder
}
def replaceSection[T: ApiKey](data: T, regId: String = "1", isComplete: Boolean = true)(implicit format: Format[T]): PreconditionBuilder = {
stubFor(
put(urlPathEqualTo(s"/vatreg/registrations/$regId/sections/${ApiKey[T]}"))
.willReturn(ok(
Json.obj(
"isComplete" -> isComplete,
"data" -> Json.toJson[T](data)
).toString()
)))
builder
}
def replaceSectionFails[T: ApiKey](regId: String = "1")(implicit format: Format[T]): PreconditionBuilder = {
stubFor(
put(urlPathEqualTo(s"/vatreg/registrations/$regId/sections/${ApiKey[T]}"))
.willReturn(badRequest())
)
builder
}
}
} | hmrc/vat-registration-frontend | it/support/StubUtils.scala | Scala | apache-2.0 | 24,336 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box.formats
import org.joda.time.LocalDate
import play.api.libs.json.JodaReads._
import play.api.libs.json.JodaWrites._
import play.api.libs.json._
import uk.gov.hmrc.ct.box.CtOptionalDate
class OptionalDateFormat[T <: CtOptionalDate](builder: (Option[LocalDate] => T)) extends Format[T] {
override def reads(json: JsValue): JsResult[T] = {
JsSuccess(builder(json.asOpt[LocalDate]))
}
override def writes(o: T): JsValue = {
Json.toJson[Option[LocalDate]](o.value)
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/box/formats/OptionalDateFormat.scala | Scala | apache-2.0 | 1,116 |
/**
* Copyright 2014 Gianluca Amato <[email protected]>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.targets
import it.unich.jandom.domains.numerical.NumericalProperty
import it.unich.jandom.domains.numerical.LinearForm
import it.unich.jandom.domains.numerical.LinearForm._
import it.unich.jandom.targets.NumericExpression._
/**
* This is the root of the hierarchy of all numeric expressions. Concrete instances
* are in the companion object. It implements operation for composing expressions.
* @author Gianluca Amato <[email protected]>
*/
sealed abstract class NumericExpression {
/**
* This method analyzes a numeric expression, by adding a new dimension for the result.
* @param input the abstract property used to evaluate the expression.
* @return the resulting proprerty with a new dimension for the evaluated expression.
*/
def analyze[Property <: NumericalProperty[Property]](input: Property): Property
/**
* This method analyzes a numeric expression, and assign the result to variable `v`
* @param input the abstract property where the expression is evaluated.
* @param v the variable which is the target of the assignment.
* @return the resulting property with a new dimension for the evaluated expression.
*/
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property
/**
* This methods returns the subset of input where the expression is less or equal to 0.
*/
def lteZero[Property <: NumericalProperty[Property]](input: Property): Property = {
val lf = LinearForm.v[Double](input.dimension)
analyze(input).linearInequality(lf).delVariable()
}
/**
* This methods returns the subset of input where the expression is strictly less than 0.
* At the moment it is equivalent to lteZero since we do not support strict contraints.
*/
def ltZero[Property <: NumericalProperty[Property]](input: Property): Property = {
val lf = LinearForm.v[Double](input.dimension)
analyze(input).linearInequality(lf).delVariable()
}
/**
* This methods returns the subset of input where the expression is different from 0.
*/
def neqZero[Property <: NumericalProperty[Property]](input: Property): Property = {
val lf = LinearForm.v[Double](input.dimension)
analyze(input).linearDisequality(lf).delVariable()
}
/**
* Returns the dimension of the expression, i.e. the greatest variable index which
* occurs in the expression plus one.
*/
def dimension: Int
/**
* Returns the textual representation of an expression.
* @param vars symbolic names of variables in the expression.
*/
def mkString(vars: Seq[String]): String
/**
* @inheritdoc
* It is equivalent to `mkString` with variable names `v0`...`vn`.
*/
override def toString = mkString(Stream.from(0).map { "v" + _ })
/**
* Returns true if the expression is syntactically zero
*/
def isZero: Boolean
def unary_- : NumericExpression = UnaryMinusExpression(this)
def +(expr: NumericExpression): NumericExpression = AddExpression(this, expr)
def -(expr: NumericExpression): NumericExpression = SubExpression(this, expr)
def *(expr: NumericExpression): NumericExpression = MulExpression(this, expr)
def /(expr: NumericExpression): NumericExpression = DivExpression(this, expr)
}
/**
* This is the companion class for NumericExpression, and contains all the concrete
* implementation of the abstract class.
* @author Gianluca Amato <[email protected]>
*/
object NumericExpression {
import scala.language.implicitConversions
/**
* Object for non-deterministic expression.
*/
case object NonDeterministicExpression extends NumericExpression {
def analyze[Property <: NumericalProperty[Property]](input: Property): Property =
input.addVariable
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property =
input.nonDeterministicAssignment(v)
override def lteZero[Property <: NumericalProperty[Property]](input: Property): Property =
input
override def ltZero[Property <: NumericalProperty[Property]](input: Property): Property =
input
override def neqZero[Property <: NumericalProperty[Property]](input: Property): Property =
input
def mkString(vars: Seq[String]) = "?"
def isZero = false
def dimension = 0
}
/**
* A class for linear expression. Linear expressions are important since they may be
* analyzed easily. For this reason, operators `+` and `-` try to produce a linear expression
* as a result when it is possible.
*/
case class LinearExpression[T: Numeric](val lf: LinearForm[T]) extends NumericExpression {
def analyze[Property <: NumericalProperty[Property]](input: Property): Property =
input.addVariable.linearAssignment(input.dimension, lf.toDouble)
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property =
input.linearAssignment(v, lf.toDouble)
override def lteZero[Property <: NumericalProperty[Property]](input: Property): Property =
input.linearInequality(lf.toDouble)
override def ltZero[Property <: NumericalProperty[Property]](input: Property): Property =
input.linearInequality(lf.toDouble)
override def neqZero[Property <: NumericalProperty[Property]](input: Property): Property =
input.linearDisequality(lf.toDouble)
override def +(expr: NumericExpression) = expr match {
case expr: LinearExpression[T] => LinearExpression[T](lf + expr.lf)
case _ => AddExpression(this, expr)
}
override def -(expr: NumericExpression) = expr match {
case expr: LinearExpression[T] => LinearExpression[T](lf - expr.lf)
case _ => SubExpression(this, expr)
}
override def *(expr: NumericExpression) = expr match {
case expr: LinearExpression[T] if (lf.isConstant) =>
LinearExpression(expr.lf * lf.known)
case expr: LinearExpression[T] if (expr.lf.isConstant) =>
LinearExpression(lf * expr.lf.known)
case _ => MulExpression(this, expr)
}
override def /(expr: NumericExpression) = expr match {
case expr: LinearExpression[T] if (expr.lf.isConstant) =>
LinearExpression(lf / expr.lf.known)
case _ => DivExpression(this, expr)
}
override def unary_- = LinearExpression[T](-lf)
def isZero = lf.isZero
def dimension = lf.dimension
def mkString(vars: Seq[String]) = lf.mkString(vars)
}
/**
* A class for the negation of a basic expression.
*/
case class UnaryMinusExpression(val e: NumericExpression) extends NumericExpression {
def analyze[Property <: NumericalProperty[Property]](input: Property): Property =
e.analyze(input).variableNeg()
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property =
e.assignTo(v)(input).variableNeg(v)
def dimension = e.dimension
def isZero = false
def mkString(vars: Seq[String]) = s"(- ${e.mkString(vars)})"
}
/**
* A class for the sum of two basic expressions.
*/
case class AddExpression(val e1: NumericExpression, val e2: NumericExpression) extends NumericExpression {
def analyze[Property <: NumericalProperty[Property]](input: Property): Property =
(e2 analyze (e1 analyze input)).variableAdd().delVariable()
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property =
e1.assignTo(v)(e2.analyze(input)).variableAdd(v, input.dimension).delVariable()
def dimension = e1.dimension max e2.dimension
def isZero = false
def mkString(vars: Seq[String]) = s"(${e1.mkString(vars)} + ${e2.mkString(vars)})"
}
/**
* A class for the difference of two basic expressions.
*/
case class SubExpression(val e1: NumericExpression, val e2: NumericExpression) extends NumericExpression {
def analyze[Property <: NumericalProperty[Property]](input: Property): Property =
(e2 analyze (e1 analyze input)).variableSub().delVariable()
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property =
e1.assignTo(v)(e2.analyze(input)).variableSub(v, input.dimension).delVariable()
def dimension = e1.dimension max e2.dimension
def isZero = false
def mkString(vars: Seq[String]) = s"(${e1.mkString(vars)} - ${e2.mkString(vars)})"
}
/**
* A class for the product of two basic expressions.
*/
case class MulExpression(val e1: NumericExpression, val e2: NumericExpression) extends NumericExpression {
def analyze[Property <: NumericalProperty[Property]](input: Property): Property =
(e2 analyze (e1 analyze input)).variableMul().delVariable()
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property =
e1.assignTo(v)(e2.analyze(input)).variableMul(v, input.dimension).delVariable()
def dimension = e1.dimension max e2.dimension
def isZero = false
def mkString(vars: Seq[String]) = s"(${e1.mkString(vars)} * ${e2.mkString(vars)})"
}
/**
* A class for the quotient of two basic expressions.
*/
case class DivExpression(val e1: NumericExpression, val e2: NumericExpression) extends NumericExpression {
def analyze[Property <: NumericalProperty[Property]](input: Property): Property =
(e2 analyze (e1 analyze input)).variableDiv().delVariable()
def assignTo[Property <: NumericalProperty[Property]](v: Int)(input: Property): Property =
e1.assignTo(v)(e2.analyze(input)).variableDiv(v, input.dimension).delVariable()
def dimension = e1.dimension max e2.dimension
def isZero = false
def mkString(vars: Seq[String]) = s"(${e1.mkString(vars)} / ${e2.mkString(vars)})"
}
/**
* Implicit coversion from constants of numeric type T to an NumericExpression.
*/
implicit def ConstantExpression[T: Numeric](c: T) = LinearExpression(c)
/**
* Constructs an expression corresponding to the variable `v`.
*/
def VariableExpression[T: Numeric](v: Int) = LinearExpression(LinearForm.v[T](v))
/**
* Implicit conversion from LinearForm to NumericExpression.
*/
implicit def linearFormToLinearExpression[T: Numeric](lf: LinearForm[T]) = LinearExpression(lf)
}
| francescaScozzari/Jandom | core/src/main/scala/it/unich/jandom/targets/NumericExpression.scala | Scala | lgpl-3.0 | 10,926 |
package com.carmanconsulting.akka.scala
import akka.actor.Actor
class ParameterizedHelloAkka(format: String) extends Actor {
def this() = {
this("Hello, %s!")
}
override def receive: Receive = {
case name: String =>
val message: String = String.format(format, name)
sender ! message
}
}
| jwcarman/akka-introduction | scala/src/main/scala/com/carmanconsulting/akka/scala/ParameterizedHelloAkka.scala | Scala | apache-2.0 | 319 |
import sbt._
import Keys._
import com.typesafe.sbt.SbtSite.site
import com.typesafe.sbt.SbtSite.SiteKeys._
import com.typesafe.sbt.site.JekyllSupport.Jekyll
import com.typesafe.sbt.SbtGhPages.ghpages
import com.typesafe.sbt.SbtGit.git
object SprinterBuild extends Build {
val sprinter = Project("sprinter", file(".")) settings (
organization := "org.scala-lang",
name := "sprinter",
version := "0.2.0",
scalaVersion := "2.10.3",
//scalaBinaryVersion <<= scalaVersion,
//crossVersion := CrossVersion.full,
//exportJars := true,
testOptions += Tests.Argument(TestFrameworks.JUnit, "-q", "-v", "-s", "-a"),
resolvers += Resolver.sonatypeRepo("snapshots"),
// quasi quote support // TODO: move this to tests only
addCompilerPlugin("org.scala-lang.plugins" % "macro-paradise" % "2.0.0-SNAPSHOT" cross CrossVersion.full),
libraryDependencies ++= Seq(
"junit" % "junit-dep" % "4.10" % "test",
"com.novocode" % "junit-interface" % "0.10-M4" % "test"
),
libraryDependencies <++= scalaVersion apply dependencies,
publishTo := Some(Resolver.file("file", new File(Path.userHome.absolutePath+"/.m2/repository")))
) settings (websiteSettings: _*)
lazy val websiteSettings: Seq[Setting[_]] = (
site.settings ++
ghpages.settings ++
site.includeScaladoc() ++
site.jekyllSupport() ++
Seq(
git.remoteRepo := "https://github.com/VladimirNik/sprinter.git",
includeFilter in Jekyll := ("*.html" | "*.png" | "*.js" | "*.css" | "CNAME")
)
)
def dependencies(sv: String) = Seq(
"org.scala-lang" % "scala-compiler" % sv
)
}
| VladimirNik/sprinter | project/build.scala | Scala | bsd-3-clause | 1,636 |
package at.forsyte.apalache.tla.bmcmt.rewriter
import at.forsyte.apalache.tla.bmcmt.analyses.ExprGrade
import at.forsyte.apalache.tla.bmcmt.caches.{AbstractCacheSnapshot, SimpleCacheSnapshot}
import at.forsyte.apalache.tla.bmcmt.types.eager.TrivialTypeSnapshot
import at.forsyte.apalache.tla.bmcmt.{Arena, ArenaCell}
import at.forsyte.apalache.tla.lir.TlaEx
import scala.collection.immutable.SortedSet
class SymbStateRewriterSnapshot(val typeFinderSnapshot: TrivialTypeSnapshot,
val intValueCacheSnapshot: AbstractCacheSnapshot[Arena, Int, ArenaCell],
val intRangeCacheSnapshot: AbstractCacheSnapshot[Arena, (Int, Int), ArenaCell],
val strValueCacheSnapshot: AbstractCacheSnapshot[Arena, String, ArenaCell],
val recordDomainCache: AbstractCacheSnapshot[Arena, (SortedSet[String], SortedSet[String]), ArenaCell],
val exprCacheSnapshot: SimpleCacheSnapshot[TlaEx, (TlaEx, ExprGrade.Value)]) {
}
| konnov/dach | tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/rewriter/SymbStateRewriterSnapshot.scala | Scala | apache-2.0 | 1,056 |
package com.twitter.finatra.httpclient
import com.google.inject.Provides
import com.twitter.finagle.Http
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finatra.httpclient.modules.HttpClientModuleTrait
import com.twitter.finatra.jackson.modules.ScalaObjectMapperModule
import com.twitter.inject.app.TestInjector
import com.twitter.inject.modules.StatsReceiverModule
import com.twitter.inject.{Injector, Test}
import com.twitter.util.jackson.ScalaObjectMapper
class HttpClientStartupIntegrationTest extends Test {
test("startup non ssl with HttpClientModuleTrait") {
val plainHttpClientModule = new HttpClientModuleTrait {
val label: String = "test-non-ssl"
val dest: String = "flag!myservice"
@Provides
def providesHttpClient(
injector: Injector,
statsReceiver: StatsReceiver,
mapper: ScalaObjectMapper
): HttpClient = newHttpClient(injector, statsReceiver, mapper)
}
val injector = TestInjector(
modules = Seq(
StatsReceiverModule,
ScalaObjectMapperModule,
plainHttpClientModule
),
flags = Map("com.twitter.server.resolverMap" -> "myservice=nil!")
).create
injector.instance[HttpClient]
}
test("startup ssl with HttpClientModuleTrait") {
val sslHttpClientModule = new HttpClientModuleTrait {
val label: String = "test-ssl"
val dest: String = "flag!myservice"
override def configureClient(injector: Injector, client: Http.Client): Http.Client =
client.withTls("foo")
@Provides
def providesHttpClient(
injector: Injector,
statsReceiver: StatsReceiver,
mapper: ScalaObjectMapper
): HttpClient = newHttpClient(injector, statsReceiver, mapper)
}
val injector = TestInjector(
modules = Seq(
StatsReceiverModule,
ScalaObjectMapperModule,
sslHttpClientModule
),
flags = Map("com.twitter.server.resolverMap" -> "myservice=nil!")
).create
injector.instance[HttpClient]
}
}
| twitter/finatra | http-client/src/test/scala/com/twitter/finatra/httpclient/HttpClientStartupIntegrationTest.scala | Scala | apache-2.0 | 2,048 |
package de.fosd.typechef.crefactor.backend
import de.fosd.typechef.parser.c._
import java.util.Collections
import org.kiama.rewriting.Rewriter._
import de.fosd.typechef.crefactor.Morpheus
import de.fosd.typechef.conditional.Opt
/**
* Trait containing all used kiama rewrite rules that apply the actual transformations.
* E.g., changing an identifiers.
*/
trait TUnitRewriteRules extends ASTNavigation with ConditionalNavigation {
/**
* Replace a list of ids in AST with copied instance with new names.
*/
def replaceIds[T <: Product](t: T, ids: List[Id], newName: String): T = {
val idsToReplace = Collections.newSetFromMap[Id](new java.util.IdentityHashMap())
ids foreach idsToReplace.add
val r = manybu(rule {
case id: Id =>
if (idsToReplace.contains(id)) {
// create id with the new name and preserve the position information
val copiedId = id.copy(name = newName)
id.range match {
case Some((from, to)) => copiedId.setPositionRange(from, to)
case _ =>
}
copiedId
} else
id
case x => x
})
r(t).get.asInstanceOf[T]
}
/**
* Replace a list of ids in AST with copied instances using pointers.
*/
def replaceIdsWithPointers[T <: Product](t: T, ids: List[Id]): T = {
val r = manybu(rule {
case id: Id => if (ids.exists(_ eq id)) PointerDerefExpr(id) else id
case x => x
})
r(t).get.asInstanceOf[T]
}
/**
* Replaces the innerstatements of compoundstatements of a translation unit.
*/
def replaceCompoundStmt[T <: Product](t: T, cStmt: CompoundStatement,
newCStmt: CompoundStatement): T = {
val r = manybu(rule {
case cc: CompoundStatement => if (cc eq cStmt) newCStmt else cc
case x => x
})
r(t).get.asInstanceOf[T]
}
def insertBefore[T <: Product](t: T, mark: Opt[_], insert: Opt[_]): T = {
val r = oncetd(rule {
case l: List[_] =>
l.flatMap(x =>
if (x.asInstanceOf[AnyRef].eq(mark))
insert :: x :: Nil
else
x :: Nil)
})
r(t).get.asInstanceOf[T]
}
/**
* Inserts one opt statement before and the after a mark in a translation unit.
*/
def insertBeforeAndAfter[T <: Product](t: T, mark: Opt[_], before: Opt[_], after: Opt[_]): T = {
val r = oncetd(rule {
case l: List[_] =>
l.flatMap(x =>
if (x.asInstanceOf[AnyRef].eq(mark))
before :: x :: after :: Nil
else
x :: Nil)
})
r(t).get.asInstanceOf[T]
}
def insertListBefore[T <: Product](t: T, mark: Opt[_], insert: List[Opt[_]]): T = {
val r = oncetd(rule {
case l: List[_] =>
l.flatMap(x =>
if (x.asInstanceOf[AnyRef].eq(mark))
insert ::: x :: Nil
else
x :: Nil)
})
r(t).get.asInstanceOf[T]
}
def replace[T <: Product](t: T, mark: Opt[_], replace: Opt[_]): T = {
val r = manybu(rule {
case l: List[_] => l.flatMap(x =>
if (x.asInstanceOf[AnyRef].eq(mark))
replace :: Nil
else
x :: Nil)
})
r(t).get.asInstanceOf[T]
}
def replaceStmtInCompoundStatement(ccStmt: CompoundStatement, mark: Opt[Statement], replace: Opt[Statement]) = {
val newInnerStmts = ccStmt.innerStatements.map { innerStmt =>
if (innerStmt.eq(mark)) replace
else innerStmt
}
ccStmt.copy(innerStatements = newInnerStmts)
}
def replaceOnceTD[T <: Product](t: T, mark: Opt[_], replace: Opt[_]): T = {
val r = oncetd(rule {
case l: List[_] =>
l.flatMap(x =>
if (x.asInstanceOf[AnyRef].eq(mark))
replace :: Nil
else
x :: Nil)
})
r(t).get.asInstanceOf[T]
}
def replaceNArySubExpr[T <: Product](t: T, e: NArySubExpr, n: NArySubExpr): T = {
val r = manybu(rule {
case i: NArySubExpr => if (isPartOf(i, e)) n else i
})
r(t).get.asInstanceOf[T]
}
def replaceExprWithCompStmExpr[T <: Product](t: T, e: Expr, n: CompoundStatementExpr): T = {
val r = manytd(rule {
case i: Expr if i eq e => n
})
r(t).get.asInstanceOf[T]
}
def replaceStmtWithStmtsInCompStmt[T <: Product](t: CompoundStatement, e: Opt[Statement], n: List[Opt[Statement]]): T = {
val r = manybu(rule {
case l: List[_] =>
l.flatMap(x =>
if (x.asInstanceOf[AnyRef] eq e)
Some(n)
else
x :: Nil)
})
r(t).get.asInstanceOf[T]
}
// generic replace function; possible replacement for replaceId and
// replaceCompoundStatement, and maybe more?
def replace[T <: Product, U](t: T, e: U, n: U): T = {
val r = manybu(rule {
case i if i.asInstanceOf[AnyRef] eq e.asInstanceOf[AnyRef] => n
})
r(t).getOrElse(t).asInstanceOf[T]
}
// removes element remove from t, but does not traverse t entirely, since
// oncetd is top-down traversal, which stops at first successful match
def remove[T <: Product](t: T, remove: Opt[_]): T = {
val r = oncetd(rule {
case l: List[_] =>
l.flatMap(x =>
if (x.asInstanceOf[AnyRef].eq(remove))
Nil
else
x :: Nil)
})
r(t).get.asInstanceOf[T]
}
// Leave them alone - they work!
def removeStatementInTUnit(l1: List[Opt[Statement]], l2: List[Opt[Statement]]): List[Opt[Statement]] = {
l1.flatMap(x => l2.exists(y => x.eq(y)) match {
case true => None
case false => Some(x)
})
}
def insertBefore(l: List[Opt[Statement]], mark: Opt[Statement], insert: Opt[Statement]) =
l.foldRight(List[Opt[Statement]]())((s, nl) => {
if (mark.eq(s)) insert :: s :: nl
else s :: nl
})
def replace(morpheus: Morpheus, remove: CompoundStatement,
insert: CompoundStatement): TranslationUnit = {
val parent = parentOpt(remove, morpheus.getASTEnv)
parent.entry match {
case f: FunctionDef => replaceOnceTD(morpheus.getTranslationUnit, parent,
parent.copy(entry = f.copy(stmt = insert)))
case c: CompoundStatement => replace(morpheus.getTranslationUnit, c,
c.copy(innerStatements = insert.innerStatements))
.asInstanceOf[TranslationUnit]
case _ => throw RefactorException("No valid rewrite rule.")
}
}
private def isPartOf(subterm: Product, term: Any): Boolean = {
term match {
case _: Product if subterm.asInstanceOf[AnyRef].eq(term.asInstanceOf[AnyRef]) => true
case l: List[_] => l.map(isPartOf(subterm, _)).exists(_ == true)
case p: Product => p.productIterator.toList.map(isPartOf(subterm, _)).exists(_ == true)
case x => false
}
}
}
| joliebig/Morpheus | src/main/scala/de/fosd/typechef/crefactor/backend/TUnitRewriteRules.scala | Scala | lgpl-3.0 | 7,710 |
package org.igye.commonutils
import org.junit.{Assert, Test}
class GeneralCaseInsensitiveStringFilterTest {
@Test
def testMatcher(): Unit = {
Assert.assertTrue(GeneralCaseInsensitiveStringFilter("bd").matches("abcde"))
Assert.assertTrue(GeneralCaseInsensitiveStringFilter("bd").matches("ABCDE"))
Assert.assertFalse(GeneralCaseInsensitiveStringFilter("db").matches("abcde"))
Assert.assertTrue(GeneralCaseInsensitiveStringFilter("").matches("abcde"))
Assert.assertFalse(GeneralCaseInsensitiveStringFilter("abc").matches("abbde"))
}
}
| Igorocky/commonutils | src/test/scala/org/igye/commonutils/GeneralCaseInsensitiveStringFilterTest.scala | Scala | mit | 586 |
package org.isa.rdd
import scala.reflect.ClassTag
import org.apache.spark.InterruptibleIterator
import org.apache.spark.{SparkContext, Partition, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.fs.{FileSystem, FileStatus, Path}
private[rdd] class FilenameCollectionPartition[String](
var rddId: Long,
var slice: Int,
var values: Seq[String]
) extends Partition with Serializable {
def iterator: Iterator[String] = values.iterator
override def hashCode(): Int = (41 * (41 + rddId) + slice).toInt
override def equals(other:Any): Boolean = other match {
case that: FilenameCollectionPartition[_] =>
this.rddId == that.rddId && this.slice == that.slice
case _ => false
}
override def index: Int = slice
}
private[rdd] class FilenameCollectionRDD[T:ClassTag](
@transient sc: SparkContext,
@transient data: Seq[String],
numSlices: Int
) extends RDD[String](sc, Nil) {
override def getPartitions: Array[Partition] = {
val slices = this.slice(data, numSlices).toArray
slices.indices.map(i => new FilenameCollectionPartition(id, i, slices(i))).toArray
}
override def compute(s:Partition, context:TaskContext): Iterator[String] = {
val iterator = new InterruptibleIterator(
context,
s.asInstanceOf[FilenameCollectionPartition[String]].iterator
)
iterator
}
private def slice(seq: Seq[String], numSlices: Int): Seq[Seq[String]] = {
require(numSlices >= 1, "Positive number of slices required")
def positions(length: Long, numSlices: Int): Iterator[(Int, Int)] = {
(0 until numSlices).iterator.map(i => {
val start = ((i * length) / numSlices).toInt
val end = (((i + 1) * length) / numSlices).toInt
(start, end)
})
}
val array = seq.toArray
positions(array.length, numSlices).map(
{ case (start, end) => array.slice(start, end).toSeq }
).toSeq
}
}
| sadikovi/sbt-multi-project-example | foo/src/main/scala/org/isa/rdd/FilenameCollectionRDD.scala | Scala | mit | 2,102 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
import scala.annotation.meta._
/** An annotation that designates that a definition is deprecated.
* A deprecation warning is issued upon usage of the annotated definition.
*
* Library authors should state the library's deprecation policy in their documentation to give
* developers guidance on how long a deprecated definition will be preserved.
*
* Library authors should prepend the name of their library to the version number to help
* developers distinguish deprecations coming from different libraries:
*
* {{{
* @deprecated("this method will be removed", "FooLib 12.0")
* def oldMethod(x: Int) = ...
* }}}
*
* The compiler will emit deprecation warnings grouped by library and version:
*
* {{{
* oldMethod(1)
* oldMethod(2)
* aDeprecatedMethodFromLibraryBar(3, 4)
*
* // warning: there was one deprecation warning (since BarLib 3.2)
* // warning: there were two deprecation warnings (since FooLib 12.0)
* // warning: there were three deprecation warnings in total; re-run with -deprecation for details
* }}}
*
* '''`@deprecated` in the Scala language and its standard library'''<br/>
*
* A deprecated element of the Scala language or a definition in the Scala standard library will
* be preserved at least for the current major version.
*
* This means that an element deprecated in some 2.13.x release will be preserved in
* all 2.13.x releases, but may be removed in 2.14. (A deprecated element
* might be kept longer to ease migration, but developers should not rely on this.)
*
* @see The official documentation on [[http://www.scala-lang.org/news/2.11.0/#binary-compatibility binary compatibility]].
* @param message the message to print during compilation if the definition is accessed
* @param since a string identifying the first version in which the definition was deprecated
* @see [[scala.deprecatedInheritance]]
* @see [[scala.deprecatedOverriding]]
* @see [[scala.deprecatedName]]
*/
@getter @setter @beanGetter @beanSetter @field
@deprecatedInheritance("Scheduled for being final in 2.14", "2.13.0")
class deprecated(message: String = "", since: String = "") extends scala.annotation.StaticAnnotation
| martijnhoekstra/scala | src/library/scala/deprecated.scala | Scala | apache-2.0 | 2,513 |
package com.teambytes.inflatable.raft.model
import org.scalatest._
import com.teambytes.inflatable.raft.model.Entry
import com.teambytes.inflatable.raft.example.protocol.WordConcatProtocol
import com.teambytes.inflatable.raft.ClusterConfiguration
class ReplicatedLogTest extends FlatSpec with Matchers
with WordConcatProtocol {
behavior of "ReplicatedLog"
it should "should contain commands and terms when they were recieved by leader" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
val t1 = Term(1)
val command1 = "a"
val t2 = Term(2)
val command2 = "b"
// when
val frozenLog = replicatedLog
replicatedLog += Entry(command1, t1, 0)
replicatedLog += Entry(command2, t2, 1)
// then
frozenLog.entries should have length 0 // check for immutability
replicatedLog.entries should have length 2
}
"append" should "append in the right order" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
// when
replicatedLog += Entry("a", Term(1), 0)
replicatedLog += Entry("b", Term(1), 1)
// then
val commands = replicatedLog.entries.map(_.command).toList
commands should equal (List("a", "b"))
}
it should "append with slicing some elements away (Leader forces us to drop some entries)" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog += Entry("a", Term(1), 0)
replicatedLog += Entry("D", Term(1), 1)
replicatedLog += Entry("D", Term(1), 2)
val entries =
Entry("b", Term(1), 1) ::
Entry("c", Term(1), 2) ::
Nil
// when
replicatedLog = replicatedLog.append(entries, take = 1)
// then
replicatedLog.entries.map(_.command).toList should equal (List("a", "b", "c"))
}
it should "append properly" in {
// given
var replicatedLog = ReplicatedLog.empty[Cmnd](1)
replicatedLog += Entry(AppendWord("I"), Term(1), 0)
replicatedLog += Entry(AppendWord("like"), Term(1), 1)
replicatedLog += Entry(AppendWord("bananas"), Term(1), 2)
replicatedLog += Entry(GetWords, Term(1), 3)
// when
// then
}
"comittedEntries" should "contain entries up until the last committed one" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0))
replicatedLog = replicatedLog.append(Entry("b", Term(2), 1))
replicatedLog = replicatedLog.append(Entry("a", Term(3), 2))
// when
val comittedIndex = 2
val comittedLog = replicatedLog.commit(comittedIndex)
// then
replicatedLog.lastIndex should equal (comittedLog.lastIndex)
replicatedLog.lastTerm should equal (comittedLog.lastTerm)
replicatedLog.committedIndex should equal (-1) // nothing ever comitted
comittedLog.committedIndex should equal (comittedIndex)
comittedLog.committedEntries should have length (2)
comittedLog.committedEntries.head should equal (Entry("a", Term(1), 0, None))
comittedLog.committedEntries.tail.head should equal (Entry("b", Term(2), 1, None))
}
"isConsistentWith" should "be consistent for valid append within a term" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0)) // t1, 0
replicatedLog = replicatedLog.append(Entry("b", Term(1), 1)) // t1, 1
// when / then
replicatedLog.containsMatchingEntry(Term(1), 0) should equal (false)
replicatedLog.containsMatchingEntry(Term(1), 1) should equal (true)
}
it should "be consistent with itself, from 1 write in the past" in {
// given
val emptyLog = ReplicatedLog.empty[String](1)
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog += Entry("a", Term(1), 0)
// when
info(s"empty log: ${emptyLog}")
info(s"prevTerm: ${replicatedLog.prevTerm}, prevIndex: ${replicatedLog.prevIndex}")
val isConsistent = emptyLog.containsMatchingEntry(replicatedLog.prevTerm, replicatedLog.prevIndex)
// then
isConsistent should equal (true)
}
it should "be consistent for initial append" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("I", Term(1), 0))
info("replicated log = " + replicatedLog)
// when / then
replicatedLog.containsMatchingEntry(Term(0), 0) should equal (true)
}
it should "be consistent with AppendEntries with multiple entries" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0))
replicatedLog = replicatedLog.append(Entry("b", Term(1), 1))
replicatedLog = replicatedLog.append(Entry("b", Term(2), 2))
replicatedLog = replicatedLog.append(Entry("b", Term(3), 3))
// when / then
replicatedLog.containsMatchingEntry(Term(1), 0) should equal (false)
replicatedLog.containsMatchingEntry(Term(1), 1) should equal (false)
replicatedLog.containsMatchingEntry(Term(1), 2) should equal (false)
replicatedLog.containsMatchingEntry(Term(2), 2) should equal (false)
replicatedLog.containsMatchingEntry(Term(2), 3) should equal (false)
replicatedLog.containsMatchingEntry(Term(3), 2) should equal (false)
replicatedLog.containsMatchingEntry(Term(3), 3) should equal (true)
}
"prevTerm / prevIndex" should "be Term(0) / 0 after first write" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0))
// when
val prevTerm = replicatedLog.prevTerm
val prevIndex = replicatedLog.prevIndex
// then
prevTerm should equal (Term(0))
prevIndex should equal (0)
}
"entriesFrom" should "not include already sent entry, from given term" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0))
replicatedLog = replicatedLog.append(Entry("b", Term(1), 1))
replicatedLog = replicatedLog.append(Entry("c", Term(2), 2))
replicatedLog = replicatedLog.append(Entry("d", Term(3), 3)) // other term
// when
val inTerm1 = replicatedLog.entriesBatchFrom(1)
// then
inTerm1 should have length 1
inTerm1(0) should equal (Entry("b", Term(1), 1))
}
it should "not include already sent entries, from given term" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0))
replicatedLog = replicatedLog.append(Entry("b", Term(1), 1))
replicatedLog = replicatedLog.append(Entry("c0", Term(2), 2))
replicatedLog = replicatedLog.append(Entry("c1", Term(2), 3))
replicatedLog = replicatedLog.append(Entry("c2", Term(2), 4))
replicatedLog = replicatedLog.append(Entry("d", Term(3), 5)) // other term
// when
val entriesFrom2ndTerm = replicatedLog.entriesBatchFrom(2)
// then
entriesFrom2ndTerm should have length 3
entriesFrom2ndTerm(0) should equal (Entry("c0", Term(2), 2))
entriesFrom2ndTerm(1) should equal (Entry("c1", Term(2), 3))
entriesFrom2ndTerm(2) should equal (Entry("c2", Term(2), 4))
}
"verifyOrDrop" should "not change if entries match" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0))
replicatedLog = replicatedLog.append(Entry("b", Term(1), 1))
replicatedLog = replicatedLog.append(Entry("c", Term(2), 2))
replicatedLog = replicatedLog.append(Entry("d", Term(3), 3))
// when
val check0 = replicatedLog.putWithDroppingInconsistent(Entry("a", Term(1), 0))
val check1 = replicatedLog.putWithDroppingInconsistent(Entry("b", Term(1), 1))
val check2 = replicatedLog.putWithDroppingInconsistent(Entry("c", Term(2), 2))
val check3 = replicatedLog.putWithDroppingInconsistent(Entry("d", Term(3), 3))
// then
check0 should equal (replicatedLog)
check1 should equal (replicatedLog)
check2 should equal (replicatedLog)
check3 should equal (replicatedLog)
}
it should "drop elements after an index that does not match" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
replicatedLog = replicatedLog.append(Entry("a", Term(1), 0))
replicatedLog = replicatedLog.append(Entry("b", Term(1), 1))
replicatedLog = replicatedLog.append(Entry("c", Term(2), 2))
replicatedLog = replicatedLog.append(Entry("d", Term(3), 3))
// when
val check0 = replicatedLog.putWithDroppingInconsistent(Entry("a", Term(1), 0))
val check1 = replicatedLog.putWithDroppingInconsistent(Entry("b", Term(1), 1))
val check2 = replicatedLog.putWithDroppingInconsistent(Entry("C!!!", Term(2), 1)) // different command
// then
check0 should equal (replicatedLog)
check1 should equal (replicatedLog)
check2 should not equal replicatedLog
check2.entries.head.command should equal ("a")
check2.entries.tail.head.command should equal ("C!!!")
check2.entries should have length 2
}
"between" should "include 0th entry when asked between(-1, 0)" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
val firstEntry = Entry("a", Term(1), 0)
replicatedLog += firstEntry
// when
val initialEntry = replicatedLog.between(-1, 0)
// then
initialEntry.headOption should be ('defined)
initialEntry.head should equal (firstEntry)
}
"compactedWith" should "compact the log" in {
// given
var replicatedLog = ReplicatedLog.empty[String](1)
(1 to 21) foreach { i =>
replicatedLog += Entry(s"e-$i", Term(1 + i / 10), i)
}
info("replicatedLog.lastTerm = " + replicatedLog.lastTerm)
info("replicatedLog.lastIndex = " + replicatedLog.lastIndex)
// when
// we compact and store a snapshot somewhere
val meta = RaftSnapshotMetadata(Term(2), 18, ClusterConfiguration(isLocal = false, Nil))
val snapshot = RaftSnapshot(meta, "example")
info(s"Snapshotting until: $meta")
val compactedLog = replicatedLog compactedWith snapshot
// then
info("compactedLog = " + compactedLog)
compactedLog.entries should have length 4
compactedLog.lastIndex should equal (replicatedLog.lastIndex)
compactedLog.lastTerm should equal (replicatedLog.lastTerm)
}
}
| grahamar/inflatable | src/test/scala/com/teambytes/inflatable/raft/model/ReplicatedLogTest.scala | Scala | apache-2.0 | 10,421 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.WindowAccess
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Window Access Repository
* @param session
* @param executionContext
*/
class WindowAccessRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.WindowAccessRepository[WindowAccess , Int]
with WindowAccessMapping {
def getById(id: Int): Future[WindowAccess] = {
getByRole(id , 0)
}
def getByRole(id: Int , role : Int): Future[WindowAccess] = {
Future(run(queryWindowAccess.filter(windowAccess => windowAccess.windowId == lift(id)
&& windowAccess.roleId == lift(role))).headOption.get)
}
def getByUUID(uuid: UUID): Future[WindowAccess] = {
Future(run(queryWindowAccess.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByWindowAccessId(id : Int) : Future[List[WindowAccess]] = {
Future(run(queryWindowAccess))
}
def getAll() : Future[List[WindowAccess]] = {
Future(run(queryWindowAccess))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[WindowAccess]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countWindowAccess()
elements <- if (offset > count) Future.successful(Nil)
else selectWindowAccess(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countWindowAccess() = {
Future(run(queryWindowAccess.size).toInt)
}
private def selectWindowAccess(offset: Int, limit: Int): Future[Seq[WindowAccess]] = {
Future(run(queryWindowAccess).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/WindowAccessRepository.scala | Scala | gpl-3.0 | 2,929 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.util.cache
import io.gatling.BaseSpec
import io.gatling.core.session.Session
import org.scalatest.OptionValues
class SessionCacheHandlerSpec extends BaseSpec with OptionValues {
val sessionCacheHandler = new SessionCacheHandler[String, String]("stringCache", 1)
val defaultSession = Session("scenarioName", 0)
"getCache" should "return None if the cache does not exist" in {
sessionCacheHandler.getCache(defaultSession) shouldBe empty
}
it should "return the cache if it exists" in {
val newCache = Cache.newImmutableCache[String, String](2)
val sessionWithCache = defaultSession.set("stringCache", newCache)
sessionCacheHandler.getCache(sessionWithCache) should not be empty
sessionCacheHandler.getCache(sessionWithCache).value should be theSameInstanceAs newCache
}
"getOrCreateCache" should "return the cache if it exists" in {
val newCache = Cache.newImmutableCache[String, String](2)
val sessionWithCache = defaultSession.set("stringCache", newCache)
sessionCacheHandler.getOrCreateCache(sessionWithCache) should be theSameInstanceAs newCache
}
it should "create a new cache if it didn't exists" in {
defaultSession.contains("stringCache") shouldBe false
sessionCacheHandler.getOrCreateCache(defaultSession) shouldBe a[Cache[_, _]] // TODO : Can this test be improved ?
}
"addEntry" should "add a new entry to the cache" in {
val sessionWithNewEntry = sessionCacheHandler.addEntry(defaultSession, "foo", "bar")
val entry = sessionCacheHandler.getOrCreateCache(sessionWithNewEntry).get("foo")
entry should not be empty
entry.value shouldBe "bar"
}
"getEntry" should "return None if the cache does not exists" in {
sessionCacheHandler.getEntry(defaultSession, "foo") shouldBe empty
}
it should "return None if the entry does not exists" in {
val sessionWithCache = sessionCacheHandler.addEntry(defaultSession, "foo", "bar")
sessionCacheHandler.getEntry(sessionWithCache, "quz") shouldBe empty
}
it should "return the value if the cache and entry exists" in {
val sessionWithCache = sessionCacheHandler.addEntry(defaultSession, "foo", "bar")
val entry = sessionCacheHandler.getEntry(sessionWithCache, "foo")
entry should not be empty
entry.value shouldBe "bar"
}
"removeEntry" should "left the session untouched if the cache doesn't exist" in {
sessionCacheHandler.removeEntry(defaultSession, "foo") should be theSameInstanceAs defaultSession
}
it should "remove the key from the cache if it exists" in {
val sessionWithEntry = sessionCacheHandler.addEntry(defaultSession, "foo", "bar")
val sessionWithoutEntry = sessionCacheHandler.removeEntry(sessionWithEntry, "foo")
sessionCacheHandler.getEntry(sessionWithoutEntry, "foo") shouldBe empty
}
}
| ryez/gatling | gatling-core/src/test/scala/io/gatling/core/util/cache/SessionCacheHandlerSpec.scala | Scala | apache-2.0 | 3,450 |
package com.twitter.streaming
import com.twitter.finatra.http.test.{EmbeddedHttpServer, HttpTest}
import com.twitter.finatra.httpclient.RequestBuilder
import com.twitter.inject.server.FeatureTest
import com.twitter.util.Await
class StreamingServerFeatureTest extends FeatureTest with HttpTest {
override val server = new EmbeddedHttpServer(
new StreamingServer,
streamResponse = true)
"post streaming json" in {
val request = RequestBuilder.post("/tweets").chunked
val tweets = for (i <- 1 to 100) yield {
Tweet(text = s"msg $i", location = Some("US"))
}
// Write to request in separate thread
pool {
writeJsonArray(request, tweets, delayMs = 25)
}
val response = server.httpRequest(request)
response.printAsyncStrings()
}
}
| nkhuyu/finatra | examples/streaming-example/src/test/scala/com/twitter/streaming/StreamingServerFeatureTest.scala | Scala | apache-2.0 | 790 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.invoker
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database.StaleParameter
import org.apache.openwhisk.core.entity.{Identity, View}
import org.apache.openwhisk.core.entity.types.AuthStore
import scala.concurrent.{ExecutionContext, Future}
import spray.json.DefaultJsonProtocol._
import scala.concurrent.duration.FiniteDuration
/**
* The namespace blacklist gets all namespaces that are throttled to 0 or blocked from the database.
*
* The caller is responsible for periodically updating the blacklist with `refreshBlacklist`.
*
* @param authStore Subjects database with the limit-documents.
*/
class NamespaceBlacklist(authStore: AuthStore) {
private var blacklist: Set[String] = Set.empty
/**
* Check if the identity, who invoked the activation is in the blacklist.
*
* @param identity which invoked the action.
* @return whether or not the current identity is considered blacklisted
*/
def isBlacklisted(identity: Identity): Boolean = blacklist.contains(identity.namespace.name.asString)
/**
* Check if the namespace, for which the activation is invoked, is in the blacklist.
*
* @param name namespace for which the action is invoked.
* @return whether or not the current namespace is considered blacklisted
*/
def isBlacklisted(name: String): Boolean = blacklist.contains(name)
/**
* Check if blacklist is empty.
*
* @return whether or not the blacklist is empty
*/
def isEmpty(): Boolean = blacklist.isEmpty
/** Refreshes the current blacklist from the database. */
/** Limit query parameter set to 0 for limitless record query. */
def refreshBlacklist()(implicit ec: ExecutionContext, tid: TransactionId): Future[Set[String]] = {
authStore
.query(
table = NamespaceBlacklist.view.name,
startKey = List.empty,
endKey = List.empty,
skip = 0,
limit = 0,
includeDocs = false,
descending = true,
reduce = false,
stale = StaleParameter.UpdateAfter)
.map(_.map(_.fields("key").convertTo[String]).toSet)
.map { newBlacklist =>
blacklist = newBlacklist
newBlacklist
}
}
}
object NamespaceBlacklist {
val view = View("namespaceThrottlings", "blockedNamespaces")
}
/** Configuration relevant to the namespace blacklist */
case class NamespaceBlacklistConfig(pollInterval: FiniteDuration)
| RSulzmann/openwhisk | core/invoker/src/main/scala/org/apache/openwhisk/core/invoker/NamespaceBlacklist.scala | Scala | apache-2.0 | 3,257 |
package jgo.tools.compiler
package interm
import codeseq._
import instr._
import symbol._
import types._
/**
* The intermediate representation of a certain package.
*/
case class PkgInterm(
target: PackageSymbol,
definedTypes: List[WrappedType],
functions: Map[Function, FunctionInterm],
globals: List[GlobalVar],
initCode: Code)
| thomasmodeneis/jgo | src/src/main/scala/jgo/tools/compiler/interm/PkgInterm.scala | Scala | gpl-3.0 | 362 |
package ttfi
import scalaz._
import scalaz.syntax.either._
import scalaz.syntax.apply._
import Intro2._
trait Serialize {
sealed trait Tree
case class Leaf(s: String) extends Tree
case class Node(s: String, children: List[Tree]) extends Tree
implicit object treeExprSYM extends ExpSYM[Tree] {
def lit = n => Node("Lit", List(Leaf(n.toString)))
def neg = e => Node("Neg", List(e))
def add = (l, r) => Node("Add", List(l, r))
}
def toTree: Tree => Tree = identity
val tf1_tree = toTree(tf1[Tree])
type ErrMsg = String
// emulate the niceness of Haskell
def liftM1[A, B]: (ErrMsg \\/ A) => (A => B) => (ErrMsg \\/ B )= Functor[({type l[a] = ErrMsg \\/ a})#l].map _
def liftM2[A, B] = Apply[({type l[a] = ErrMsg \\/ a})#l].apply2[A, A, B] _
trait SafeRead[T] {def safeRead(s: String): ErrMsg \\/ T }
implicit class SafeReadOps(s: String) {
def read[T](implicit read: SafeRead[T]) = read.safeRead(s)
}
implicit object intSafeRead extends SafeRead[Int] {
def safeRead(s: String) = try s.toInt.right
catch { case e: NumberFormatException => ("Read error: " + e.getMessage).left }
}
/*
ARGH
annoying when implicit params come before normal params.
inference stops working
*/
def fromTree[B](t: Tree)(implicit B: ExpSYM[B]): ErrMsg \\/ B = {
import B._
t match {
case Node("Lit", List(Leaf(n))) => n.read[Int] map lit
case Node("Neg", List(e)) =>
val value: ErrMsg \\/ B = fromTree[B](e)
value map neg
case Node("Add", List(l, r)) =>
val v1: ErrMsg \\/ B = fromTree[B](l)
val v2: ErrMsg \\/ B = fromTree[B](r)
// for {
// r1 <- v1
// r2 <- v2
// } yield add(r1, r2)
// Applicative[({type l[a] = ErrMsg \\/ a})#l].apply2(v1, v2)(add)
(v1 |@| v2)(add)
case other => ("invalid tree " + other).left[B]
}
}
val tfl1Deserialised_eval = fromTree[Int](tf1_tree)
.fold(err => "Error: " + err, expr => eval(expr).toString)
trait Wrapped {
def unwrap[Repr]: ExpSYM[Repr] => Repr
def unwrapi[Repr: ExpSYM]: Repr = unwrap(implicitly[ExpSYM[Repr]])
}
object Wrapped {
def lit(i: Int): Wrapped = new Wrapped {
def unwrap[Repr] = e => e.lit(i)
}
def neg(exp: Wrapped): Wrapped = new Wrapped {
def unwrap[Repr] = sym => {
val v = exp.unwrap[Repr](sym)
sym.neg(v)
}
}
def add(l: Wrapped, r: Wrapped) = new Wrapped {
def unwrap[Repr] = sym => {
val v1 = l.unwrap[Repr](sym)
val v2 = r.unwrap[Repr](sym)
sym.add(v1, v2)
}
}
}
def fromTreeW: Tree => ErrMsg \\/ Wrapped = {
case Node("Lit", List(Leaf(n))) => n.read[Int] map Wrapped.lit
case Node("Neg", List(e)) => fromTreeW(e) map Wrapped.neg
case Node("Add", List(l, r)) => (fromTreeW(l) |@| fromTreeW(r))(Wrapped.add)
case other => s"invalid tree $other".left
}
val tfl1DeserializedEvewWrapped = {
fromTreeW(tf1_tree) fold (
err => "Error: " + err,
expr => (
eval(expr.unwrapi[Int]),
view(expr.unwrapi[String])
)
)
}
// with Wrapped, we gain the ability to interpret the same deserialised term multiple times, in different context
// but we still haven't solved the expression problem
implicit def pairExpSym[R1: ExpSYM, R2: ExpSYM]: ExpSYM[(R1, R2)] = new ExpSYM[(R1, R2)] {
def lit = n => (ExpSYM[R1].lit(n), ExpSYM[R2].lit(n))
def neg = { case (x, y) => (ExpSYM[R1].neg(x), ExpSYM[R2].neg(y)) }
def add = { case ((e11, e12), (e21, e22)) => (ExpSYM[R1].add(e11, e21), ExpSYM[R2].add(e12, e22)) }
}
type :&:[A, B] = (A, B)
def duplicate[R1: ExpSYM, R2: ExpSYM]: ((R1, R2)) => (R1, R2) = identity
def dup[R: ExpSYM, S: ExpSYM] = ???
//TODO replace unit with IO
def checkConsume[A, B, R](f: B => Unit): ErrMsg \\/ B => Unit = {
case -\\/(e) => println("Error: " + e)
case \\/-(x) => f(x)
}
def dupConsume[R1: ExpSYM, R2: ExpSYM, S](ev: R1 => S, x: (R1, R2)): R2 = {
val (x1, x2) = duplicate[R1, R2].apply(x)
println(ev(x1))
x2
}
def intString: (Int, String) = (tf1[Int], tf1[String])
def intStringTree = (tf1[Int], tf1[String], tf1[Tree])
// deserialized value consumed by 3 different interpreters
val consumeThrice: ErrMsg \\/ (Int, (String, Tree)) => Unit = checkConsume(thrice)
def tf1deserializedInt3() = consumeThrice(fromTree[Int :&: String :&: Tree](tf1_tree))
def thrice(x: Int :&: String :&: Tree) = {
println()
val y = dupConsume(eval, x)
val z = dupConsume(view, y)
println(toTree(z))
println()
}
// NB implicit arg *after* Tree argument
def fromTreeExt[R: ExpSYM](self: Tree => ErrMsg \\/ R)(t: Tree): ErrMsg \\/ R = t match {
case Node("Lit", List(Leaf(n))) => n.read[Int] map ExpSYM[R].lit
case Node("Neg", List(e)) => self(e) map ExpSYM[R].neg
case Node("Add", List(e1, e2)) => liftM2(self(e1), self(e2))(ExpSYM[R].add)
case e => s"Invalid tree: $e".left
}
def fix[A, B](f: (A => B) => (A => B)): A => B = f(fix(f))(_)
def fromTree2[R: ExpSYM] = fix(fromTreeExt[R])
def tf1E_int3() = consumeThrice(fromTree2[Int :&: String :&: Tree].apply(tf1_tree))
def tfxE_int3() = consumeThrice(fromTree2[Int :&: String :&: Tree].apply(Node("Lit", List(Leaf("1"), Leaf("2")))))
}
object Serialize extends Serialize
object SerializeMain extends Main with Serialize {
println(tf1_tree)
println(tfl1Deserialised_eval)
println(tfl1DeserializedEvewWrapped)
tf1deserializedInt3()
tf1E_int3()
tfxE_int3()
}
| mjhopkins/ttfi | src/scala/ttfi/Serialize.scala | Scala | apache-2.0 | 5,673 |
package russoul.lib.common.math.algebra
import russoul.lib.common.TypeClasses.Addable
import russoul.lib.common._
import shapeless.{Nat, _0}
import shapeless.ops.nat.{Diff, ToInt}
import shapeless.ops.nat._
import shapeless.ops.nat.Diff._
import Implicits._
import scala.collection.TraversableLike
import scala.reflect.ClassTag
sealed trait FVec[+A,N <: Nat]
object FNil extends FVec[Nothing, _0] //TODO this implementation takes more space than Array impl + more `new` calls
case class Cons[+A,Q <: Nat,N <: Nat](x : A, xs : FVec[A, Q])(implicit diff : Sum.Aux[Q,Nat._1,N]) extends FVec[A,N]
//TODO problem : each node of FVec now contains extra implicit value as field !
//solution : move implicit from class constructor to object.apply
object FVec{
val test : FVec[Int, Nat._3] = Cons (3, Cons (2, Cons(1, FNil)))
}
/**
* Created by russoul on 11.07.2017.
*/
@immutable class Vec[@tbsp A : ClassTag, Size <: Nat]private ()(implicit size: ToInt[Size]) extends Traversable[A]{
private val array = new Array[A](size())
@inline def apply(i: Int): A = array(i)
def toArray = array.clone()
override def toString() : String = {
var str = ""
for(i <- array) str += i + " "
if(array.size > 1) str = str.dropRight(1)
s"Vec[${implicitly[ClassTag[A]].toString()}, ${implicitly[ToInt[Size]].apply()}]\\n$str"
}
override def foreach[U](f: (A) => U): Unit = {
var k = 0
while (k < size()){
f +> array(k)
k += 1
}
}
override def hashCode() = {
array.hashCode()
}
override def equals(obj: scala.Any) = {
obj match {
case that : Vec[A, Size] => this.hashCode() == that.hashCode()
case _ => false
}
}
}
object Vec {
@inline def apply[@tbsp A : ClassTag, Size <: Nat](args: A*)(implicit size: ToInt[Size]): Vec[A, Size] = {
val result = new Vec[A,Size]()
var k = 0
while(k < size()){
result.array(k) = args(k)
k += 1
}
result
}
}
| Russoul/UniScalaLib | src/main/scala/russoul/lib/common/math/algebra/Vec.scala | Scala | mit | 1,959 |
package pureconfig
import scala.collection.JavaConverters._
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.Builder
import scala.language.higherKinds
import com.typesafe.config._
import pureconfig.ConvertHelpers._
import pureconfig.error._
import shapeless._
import shapeless.labelled._
/**
* The default behavior of ConfigReaders that are implicitly derived in PureConfig is to raise a
* KeyNotFoundException when a required key is missing. Mixing in this trait to a ConfigReader
* allows customizing this behavior. When a key is missing, but the ConfigReader of the given
* type extends this trait, the `from` method of the ConfigReader is called with null.
*/
trait AllowMissingKey { self: ConfigReader[_] => }
/**
* Trait containing `ConfigReader` instances for collection, product and coproduct types.
*/
trait DerivedReaders {
private[pureconfig] trait WrappedConfigReader[Wrapped, SubRepr] extends ConfigReader[SubRepr]
private[pureconfig] trait WrappedDefaultValue[Wrapped, SubRepr <: HList, DefaultRepr <: HList] {
def fromWithDefault(config: ConfigValue, default: DefaultRepr): Either[ConfigReaderFailures, SubRepr] = config match {
case co: ConfigObject => fromConfigObject(co, default)
case other => fail(WrongType(other.valueType, Set(ConfigValueType.OBJECT), ConfigValueLocation(other), None))
}
def fromConfigObject(co: ConfigObject, default: DefaultRepr): Either[ConfigReaderFailures, SubRepr]
}
implicit final def hNilConfigReader[Wrapped](
implicit
hint: ProductHint[Wrapped]): WrappedDefaultValue[Wrapped, HNil, HNil] = new WrappedDefaultValue[Wrapped, HNil, HNil] {
override def fromConfigObject(config: ConfigObject, default: HNil): Either[ConfigReaderFailures, HNil] = {
if (!hint.allowUnknownKeys && !config.isEmpty) {
val keys = config.keySet().asScala.toList map {
k => UnknownKey(k, ConfigValueLocation(config.get(k)))
}
Left(new ConfigReaderFailures(keys.head, keys.tail))
} else {
Right(HNil)
}
}
}
implicit final def hConsConfigReader[Wrapped, K <: Symbol, V, T <: HList, U <: HList](
implicit
key: Witness.Aux[K],
vFieldConvert: Lazy[ConfigReader[V]],
tConfigReader: Lazy[WrappedDefaultValue[Wrapped, T, U]],
hint: ProductHint[Wrapped]): WrappedDefaultValue[Wrapped, FieldType[K, V] :: T, Option[V] :: U] = new WrappedDefaultValue[Wrapped, FieldType[K, V] :: T, Option[V] :: U] {
override def fromConfigObject(co: ConfigObject, default: Option[V] :: U): Either[ConfigReaderFailures, FieldType[K, V] :: T] = {
val keyStr = hint.configKey(key.value.toString().tail)
val headResult = improveFailures[V](
(co.get(keyStr), vFieldConvert.value) match {
case (null, converter: AllowMissingKey) =>
converter.from(co.get(keyStr))
case (null, _) =>
val defaultValue = if (hint.useDefaultArgs) default.head else None
defaultValue.fold(fail[V](CannotConvertNull))(Right[Nothing, V](_))
case (value, converter) =>
converter.from(value)
},
keyStr,
ConfigValueLocation(co))
// for performance reasons only, we shouldn't clone the config object unless necessary
val tailCo = if (hint.allowUnknownKeys) co else co.withoutKey(keyStr)
val tailResult = tConfigReader.value.fromWithDefault(tailCo, default.tail)
combineResults(headResult, tailResult)((head, tail) => field[K](head) :: tail)
}
}
implicit final def cNilConfigReader[Wrapped]: WrappedConfigReader[Wrapped, CNil] = new WrappedConfigReader[Wrapped, CNil] {
override def from(config: ConfigValue): Either[ConfigReaderFailures, CNil] =
fail(NoValidCoproductChoiceFound(config, ConfigValueLocation(config), None))
}
implicit final def coproductConfigReader[Wrapped, Name <: Symbol, V, T <: Coproduct](
implicit
coproductHint: CoproductHint[Wrapped],
vName: Witness.Aux[Name],
vFieldConvert: Lazy[ConfigReader[V]],
tConfigReader: Lazy[WrappedConfigReader[Wrapped, T]]): WrappedConfigReader[Wrapped, FieldType[Name, V] :+: T] =
new WrappedConfigReader[Wrapped, FieldType[Name, V] :+: T] {
override def from(config: ConfigValue): Either[ConfigReaderFailures, FieldType[Name, V] :+: T] =
coproductHint.from(config, vName.value.name) match {
case Right(Some(hintConfig)) =>
vFieldConvert.value.from(hintConfig) match {
case Left(_) if coproductHint.tryNextOnFail(vName.value.name) =>
tConfigReader.value.from(config).right.map(s => Inr(s))
case vTry => vTry.right.map(v => Inl(field[Name](v)))
}
case Right(None) => tConfigReader.value.from(config).right.map(s => Inr(s))
case l: Left[_, _] => l.asInstanceOf[Either[ConfigReaderFailures, FieldType[Name, V] :+: T]]
}
}
implicit def deriveOption[T](implicit conv: Lazy[ConfigReader[T]]) = new OptionConfigReader[T]
class OptionConfigReader[T](implicit conv: Lazy[ConfigReader[T]]) extends ConfigReader[Option[T]] with AllowMissingKey {
override def from(config: ConfigValue): Either[ConfigReaderFailures, Option[T]] = {
if (config == null || config.unwrapped() == null)
Right(None)
else
conv.value.from(config).right.map(Some(_))
}
}
implicit def deriveTraversable[T, F[T] <: TraversableOnce[T]](
implicit
configConvert: Lazy[ConfigReader[T]],
cbf: CanBuildFrom[F[T], T, F[T]]) = new ConfigReader[F[T]] {
override def from(config: ConfigValue): Either[ConfigReaderFailures, F[T]] = {
config match {
case co: ConfigList =>
val z: Either[ConfigReaderFailures, Builder[T, F[T]]] = Right(cbf())
// we called all the failures in the list
co.asScala.foldLeft(z) {
case (acc, value) =>
combineResults(acc, configConvert.value.from(value))(_ += _)
}.right.map(_.result())
case o: ConfigObject =>
val z: Either[ConfigReaderFailures, List[(Int, T)]] = Right(List.empty[(Int, T)])
def keyValueReader(key: String, value: ConfigValue): Either[ConfigReaderFailures, (Int, T)] = {
val keyResult = catchReadError(_.toInt)(implicitly)(key)(ConfigValueLocation(value)).left.flatMap(t => fail(CannotConvert(key, "Int",
s"To convert an object to a collection, its keys must be read as Int but key $key has value" +
s"$value which cannot converted. Error: ${t.because}", ConfigValueLocation(value), Some(key))))
val valueResult = configConvert.value.from(value)
combineResults(keyResult, valueResult)(_ -> _)
}
o.asScala.foldLeft(z) {
case (acc, (str, v)) =>
combineResults(acc, keyValueReader(str, v))(_ :+ _)
}.right.map {
l =>
val r = cbf()
r ++= l.sortBy(_._1).map(_._2)
r.result()
}
case other =>
fail(WrongType(other.valueType, Set(ConfigValueType.LIST, ConfigValueType.OBJECT), ConfigValueLocation(other), None))
}
}
}
implicit def deriveMap[T](implicit configConvert: Lazy[ConfigReader[T]]) = new ConfigReader[Map[String, T]] {
override def from(config: ConfigValue): Either[ConfigReaderFailures, Map[String, T]] = {
config match {
case co: ConfigObject =>
val z: Either[ConfigReaderFailures, Map[String, T]] = Right(Map.empty[String, T])
co.asScala.foldLeft(z) {
case (acc, (key, value)) =>
combineResults(
acc,
improveFailures(configConvert.value.from(value), key, ConfigValueLocation(value))) {
(map, valueConverted) => map + (key -> valueConverted)
}
}
case other =>
fail(WrongType(other.valueType, Set(ConfigValueType.OBJECT), ConfigValueLocation(other), None))
}
}
}
implicit final def deriveProductInstance[F, Repr <: HList, DefaultRepr <: HList](
implicit
gen: LabelledGeneric.Aux[F, Repr],
default: Default.AsOptions.Aux[F, DefaultRepr],
cc: Lazy[WrappedDefaultValue[F, Repr, DefaultRepr]]): ConfigReader[F] = new ConfigReader[F] {
override def from(config: ConfigValue): Either[ConfigReaderFailures, F] = {
cc.value.fromWithDefault(config, default()).right.map(gen.from)
}
}
implicit final def deriveCoproductInstance[F, Repr <: Coproduct](
implicit
gen: LabelledGeneric.Aux[F, Repr],
cc: Lazy[WrappedConfigReader[F, Repr]]): ConfigReader[F] = new ConfigReader[F] {
override def from(config: ConfigValue): Either[ConfigReaderFailures, F] = {
cc.value.from(config).right.map(gen.from)
}
}
}
object DerivedReaders extends DerivedReaders
| derekmorr/pureconfig | core/src/main/scala/pureconfig/DerivedReaders.scala | Scala | mpl-2.0 | 8,881 |
package com.ajjpj.adiagram_.ui.accordion
import com.ajjpj.adiagram_.ui.{TextStyleListCell, ADiagramController}
import com.ajjpj.adiagram_.ui.fw.{Command, Digest}
import com.ajjpj.adiagram_.model.style.TextStyleSpec
/**
* @author arno
*/
class TextStylePane(ctrl: ADiagramController)(implicit digest: Digest) extends AbstractStylePane[TextStyleSpec, TextStyleListCell, TextStyleChangeCommand](ctrl) {
def all = ctrl.styleRepository.textStyles
def snapshot = TextStyleChangeCommand(ctrl, selected.get, selected.get.name, selected.get.name, selected.get.fontSizePixels, selected.get.fontSizePixels)
textfield("Name:", selected.map(_.name).getOrElse(""), (cmd: TextStyleChangeCommand, newName: String) => cmd.copy(newName = newName))
textfield("Size[pixel]:", selected.map(_.fontSizePixels.toString).getOrElse(""), (cmd: TextStyleChangeCommand, newSize: String) => cmd.copy(newSize = newSize.toDouble)) //TODO invalid, limit range, ...
}
case class TextStyleChangeCommand(ctrl: ADiagramController, spec: TextStyleSpec, oldName: String, newName: String, oldSize: Double, newSize: Double) extends Command{
def name = "Change Text Style"
def isNop = false
def undo() {
spec.name = oldName
spec.fontSizePixels = oldSize
ctrl.styleRepository.changeCounter += 1
}
def redo() {
spec.name = newName
spec.fontSizePixels = newSize
ctrl.styleRepository.changeCounter += 1
}
} | arnohaase/a-diagram | src/main/scala-old/com/ajjpj/adiagram_/ui/accordion/TextStylePane.scala | Scala | apache-2.0 | 1,414 |
import Macros.*
object Test {
def main(args: Array[String]): Unit = {
assert2 {
def bar(): Boolean = {
println("bar")
false
}
bar()
}
}
}
| dotty-staging/dotty | tests/run-macros/quote-change-owner/Test_2.scala | Scala | apache-2.0 | 184 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
import scala.annotation.StaticAnnotation
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
private[mxnet] class AddNDArrayFunctions(isContrib: Boolean) extends StaticAnnotation {
private[mxnet] def macroTransform(annottees: Any*) = macro NDArrayMacro.addDefs
}
private[mxnet] class AddNDArrayAPIs(isContrib: Boolean) extends StaticAnnotation {
private[mxnet] def macroTransform(annottees: Any*) = macro TypedNDArrayAPIMacro.typeSafeAPIDefs
}
private[mxnet] class AddNDArrayRandomAPIs(isContrib: Boolean) extends StaticAnnotation {
private[mxnet] def macroTransform(annottees: Any*) =
macro TypedNDArrayRandomAPIMacro.typeSafeAPIDefs
}
/**
* For non-typed NDArray API
*/
private[mxnet] object NDArrayMacro extends GeneratorBase {
def addDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
import c.universe._
val isContrib: Boolean = c.prefix.tree match {
case q"new AddNDArrayFunctions($b)" => c.eval[Boolean](c.Expr(b))
}
impl(c)(isContrib, annottees: _*)
}
private def impl(c: blackbox.Context)
(isContrib: Boolean, annottees: c.Expr[Any]*): c.Expr[Any] = {
import c.universe._
val functions = functionsToGenerate(isSymbol = false, isContrib)
val functionDefs = functions.flatMap { NDArrayfunction =>
val funcName = NDArrayfunction.name
val termName = TermName(funcName)
Seq(
// e.g def transpose(kwargs: Map[String, Any] = null)(args: Any*)
q"""
def $termName(kwargs: Map[String, Any] = null)(args: Any*) = {
genericNDArrayFunctionInvoke($funcName, args, kwargs)
}
""".asInstanceOf[DefDef],
// e.g def transpose(args: Any*)
q"""
def $termName(args: Any*) = {
genericNDArrayFunctionInvoke($funcName, args, null)
}
""".asInstanceOf[DefDef]
)
}
structGeneration(c)(functionDefs, annottees: _*)
}
}
/**
* NDArray.api code generation
*/
private[mxnet] object TypedNDArrayAPIMacro extends GeneratorBase {
def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
import c.universe._
val isContrib: Boolean = c.prefix.tree match {
case q"new AddNDArrayAPIs($b)" => c.eval[Boolean](c.Expr(b))
}
val functionDefs = typeSafeFunctionsToGenerate(isSymbol = false, isContrib)
.map(f => buildTypedFunction(c)(f))
structGeneration(c)(functionDefs, annottees: _*)
}
protected def buildTypedFunction(c: blackbox.Context)
(function: Func): c.universe.DefDef = {
import c.universe._
val returnType = "org.apache.mxnet.NDArrayFuncReturn"
// Construct API arguments declaration
val argDecl = super.typedFunctionCommonArgDef(function) :+ "out : Option[NDArray] = None"
// Map API input args to backend args
val backendArgsMapping =
function.listOfArgs.map { arg =>
// ndarrays go to args, other types go to kwargs
if (arg.argType.equals(s"Array[org.apache.mxnet.NDArray]")) {
s"args ++= ${arg.safeArgName}.toSeq"
} else {
val base = if (arg.argType.equals("org.apache.mxnet.NDArray")) {
s"args += ${arg.safeArgName}"
} else {
s"""map("${arg.argName}") = ${arg.safeArgName}"""
}
if (arg.isOptional) s"if (!${arg.safeArgName}.isEmpty) $base.get"
else base
}
}
val impl =
s"""
|def ${function.name}
| (${argDecl.mkString(",")}): $returnType = {
|
| val map = scala.collection.mutable.Map[String, Any]()
| val args = scala.collection.mutable.ArrayBuffer.empty[org.apache.mxnet.NDArray]
|
| if (!out.isEmpty) map("out") = out.get
|
| ${backendArgsMapping.mkString("\\n")}
|
| org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke(
| "${function.name}", args.toSeq, map.toMap)
|}
""".stripMargin
c.parse(impl).asInstanceOf[DefDef]
}
}
/**
* NDArray.random code generation
*/
private[mxnet] object TypedNDArrayRandomAPIMacro extends GeneratorBase
with RandomHelpers {
def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
// Note: no contrib managed in this module
val functionDefs = typeSafeRandomFunctionsToGenerate(isSymbol = false)
.map(f => buildTypedFunction(c)(f))
structGeneration(c)(functionDefs, annottees: _*)
}
protected def buildTypedFunction(c: blackbox.Context)
(function: Func): c.universe.DefDef = {
import c.universe._
val returnType = "org.apache.mxnet.NDArrayFuncReturn"
// Construct API arguments declaration
val argDecl = super.typedFunctionCommonArgDef(function) :+ "out : Option[NDArray] = None"
// Map API input args to backend args
val backendArgsMapping =
function.listOfArgs.map { arg =>
// ndarrays go to args, other types go to kwargs
if (arg.argType.equals("Array[org.apache.mxnet.NDArray]")) {
s"args ++= ${arg.safeArgName}.toSeq"
} else {
if (arg.argType.equals("T")) {
if (arg.isOptional) {
s"""if(${arg.safeArgName}.isDefined) {
| if(isScalar) {
| map("${arg.argName}") = ${arg.safeArgName}.get
| } else {
| args += ${arg.safeArgName}.get.asInstanceOf[org.apache.mxnet.NDArray]
| }
|}
""".stripMargin
} else {
s"""if(isScalar) {
| map("${arg.argName}") = ${arg.safeArgName}
|} else {
| args += ${arg.safeArgName}.asInstanceOf[org.apache.mxnet.NDArray]
|}
""".stripMargin
}
} else {
if (arg.isOptional) {
s"""if (${arg.safeArgName}.isDefined) map("${arg.argName}")=${arg.safeArgName}.get"""
} else {
s"""map("${arg.argName}") = ${arg.safeArgName}"""
}
}
}
}
val impl =
s"""
|def ${function.name}${randomGenericTypeSpec(false, true)}
| (${argDecl.mkString(",")}): $returnType = {
|
| val map = scala.collection.mutable.Map[String, Any]()
| val args = scala.collection.mutable.ArrayBuffer.empty[org.apache.mxnet.NDArray]
| val isScalar = NDArrayOrScalar[T].isScalar
|
| if(out.isDefined) map("out") = out.get
|
| ${backendArgsMapping.mkString("\\n")}
|
| val target = if(isScalar) {
| "random_${function.name}"
| } else {
| "sample_${function.name}"
| }
|
| ${unhackNormalFunc(function)}
|
| org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke(
| target, args.toSeq, map.toMap)
|}
""".stripMargin
c.parse(impl).asInstanceOf[DefDef]
}
}
| dmlc/mxnet | scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala | Scala | apache-2.0 | 7,965 |
/**
* See <a href="https://www.codeeval.com/open_challenges/164/">Mars Networks</a>
*/
object MarsNetworks extends Challenge {
val lines = scala.io.Source.fromFile(args(0)).getLines().filter(_.length > 0)
lines.collect {
case Input(coordinates) => eval(coordinates, Map())
} foreach (result => println(result.ceil.toInt))
def eval(coordinates: Set[(Int, Int)], m: Map[(Int, Int), Double]): Double =
coordinates.isEmpty match {
case true => 0
case false => coordinates.map(x1 => (m.getOrElse(x1, 0d), x1)).min match {
case (dist, x1) =>
val rest = coordinates - x1
val distances = rest.map(x => (x, m.getOrElse(x, Double.MaxValue).min(distance(x, x1))))
dist + eval(rest, distances.toMap)
}
}
def distance(x1: (Int, Int), x2: (Int, Int)) =
scala.math.sqrt(scala.math.pow(x1._1 - x2._1, 2) + scala.math.pow(x1._2 - x2._2, 2))
object Input {
// 9013,3937 7791,872 2417,3183
def unapply(line: String) =
Some(line.split(" ").map(_.split(",")).map(coord => (coord(0).toInt, coord(1).toInt)).toSet)
}
}
| zelca/codeeval | src/MarsNetworks.scala | Scala | mit | 1,108 |
/*
* Copyright (C) 2016 Vincibean <Andre Bessi>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vincibean.scala.impatient.chapter7
/**
* Write a puzzler that baffles your Scala friends, using a package com that isn’t
* at the top level.
*
* Created by Vincibean on 22/01/16.
*/
package object exercise2 {
val theSameValue: Int = 17
}
// My packages start with org, not com. Sorry Cay! :-)
package org {
package vincibean {
package scala {
package impatient {
package chapter7 {
package object exercise2 {
val theSameValue: Int = 43
}
}
}
}
}
}
| Vincibean/ScalaForTheImpatient-Solutions | src/main/scala/org/vincibean/scala/impatient/chapter7/exercise2/package.scala | Scala | gpl-3.0 | 1,277 |
package com.orendainx.trucking.nifi.processors
import java.io.OutputStream
import java.nio.charset.StandardCharsets
import com.orendainx.trucking.simulator.simulators.ManualTickAndFetchSimulator
import org.apache.nifi.annotation.behavior._
import org.apache.nifi.annotation.documentation.{CapabilityDescription, Tags}
import org.apache.nifi.annotation.lifecycle.{OnRemoved, OnShutdown}
import org.apache.nifi.components.PropertyDescriptor
import org.apache.nifi.logging.ComponentLog
import org.apache.nifi.processor._
import org.apache.nifi.processor.io.OutputStreamCallback
import scala.collection.JavaConverters._
/**
* @author Edgar Orendain <[email protected]>
*/
@Tags(Array("trucking", "data", "event", "generator", "simulator", "iot"))
@CapabilityDescription("Generates simulated truck sensor data. Find the master project and its code, documentation and corresponding tutorials at: https://github.com/orendain/trucking-iot")
@InputRequirement(InputRequirement.Requirement.INPUT_FORBIDDEN)
@TriggerSerially
@WritesAttributes(Array(
new WritesAttribute(attribute = "dataType", description = "The class name of the of the TruckingData this flowfile carries (e.g. \\"TruckData\\" or \\"TrafficData\\").")
))
class GetTruckingData extends AbstractProcessor {
private var log: ComponentLog = _
private val RelSuccess = new Relationship.Builder().name("success").description("All generated data is routed to this relationship.").build
private lazy val simulator = new ManualTickAndFetchSimulator()
/** Perform necessary initialization */
override def init(context: ProcessorInitializationContext): Unit = {
log = context.getLogger // Save reference to NiFi's logger
simulator.tick() // On initialization, tick the simulator forward
}
/** Called when processor is scheduled to run and when work exists for it */
override def onTrigger(context: ProcessContext, session: ProcessSession): Unit = {
// Fetch results that have been generated by the simulator since last onTrigger
val truckingData = simulator.fetch()
log.debug(s"Received data: $truckingData")
// Process each bit of data generated, creating flow file and tagging with attributes as appropriate
truckingData.foreach { data =>
log.debug(s"Processing data: $data")
var flowFile = session.create()
flowFile = session.putAttribute(flowFile, "dataType", data.getClass.getSimpleName)
flowFile = session.write(flowFile, new OutputStreamCallback {
override def process(outputStream: OutputStream) =
outputStream.write(data.toCSV.getBytes(StandardCharsets.UTF_8))
})
// TODO: document what this does
// TODO: session.getProvenanceReporter.receive(flowFile, "ThisDoesWhat?")
session.getProvenanceReporter.route(flowFile, RelSuccess)
session.transfer(flowFile, RelSuccess)
session.commit()
}
// Tick the simulator forward so results are ready to be fetched by onTrigger's next invocation
simulator.tick()
}
// Define properties and relationships
override def getSupportedPropertyDescriptors: java.util.List[PropertyDescriptor] = List.empty[PropertyDescriptor].asJava
override def getRelationships: java.util.Set[Relationship] = Set(RelSuccess).asJava
@OnRemoved
@OnShutdown
/** Clean up and exit gracefully */
def cleanup(): Unit = simulator.stop()
}
| orendain/trucking-iot | nifi-bundle/trucking-nifi-processors/src/main/scala/com/orendainx/trucking/nifi/processors/GetTruckingData.scala | Scala | apache-2.0 | 3,370 |
package com.twitter.util.events
import com.twitter.util.events.Event.Type
/**
* Where runtime events such as logging, stats and tracing can be
* sent to allow for analysis.
*
* '''Note:''' while the API is public it should be considered as experimental
* and subject to changes.
*
* ===Design notes===
* - Implementations must be thread-safe.
* - Implementations should have very low runtime overhead such that as
* many events as possible can be sent here. In particular, object
* allocations should be kept to a minimum.
* - `event` is expected to be called many orders of magnitude
* more frequently than `events`.
*/
trait Sink {
/**
* Event input is captured as individual fields in service of
* avoiding an allocation to wrap the event.
*/
def event(
etype: Event.Type,
longVal: Long = Event.NoLong,
objectVal: Object = Event.NoObject,
doubleVal: Double = Event.NoDouble,
traceIdVal: Long = Event.NoTraceId,
spanIdVal: Long = Event.NoSpanId
): Unit
/**
* Returns all currently available events.
*
* '''Note:''' the events are not returned in any particular order.
*/
def events: Iterator[Event]
}
/**
* Note: There is a Java-friendly API for this object: [[com.twitter.util.events.Sinks]].
*/
object Sink {
/**
* A sink that ignores all input.
*/
val Null: Sink = new Sink {
override def event(
etype: Type,
longVal: Long,
objectVal: Object,
doubleVal: Double,
traceIdVal: Long,
spanIdVal: Long
): Unit = ()
override def events: Iterator[Event] = Iterator.empty
}
/**
* An unsized sink. Convenient for testing.
*/
def of(buffer: scala.collection.mutable.Buffer[Event]): Sink =
new Sink {
def events = buffer.iterator
def event(e: Event.Type, l: Long, o: Object, d: Double, t: Long, s: Long) =
buffer += Event(e, com.twitter.util.Time.now, l, o, d, t, s)
}
// exposed for testing
private[events] def newDefault: Sink = {
if (!sinkEnabled.apply()) {
Null
} else if (approxNumEvents() <= 0) {
Null
} else {
SizedSink(approxNumEvents())
}
}
/**
* The global default `Sink`.
*/
val default: Sink = newDefault
// Java compatibility.
private[events] val getDefault: Sink = default
/**
* Returns whether or not any event capture is enabled.
*/
def enabled: Boolean = default ne Null
}
| travisbrown/util | util-events/src/main/scala/com/twitter/util/events/Sink.scala | Scala | apache-2.0 | 2,441 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.cassandra
import com.twitter.cassie.{Order, Column, ColumnFamily}
import com.twitter.util.Future
import java.nio.ByteBuffer
import org.specs.mock.{JMocker, ClassMocker}
import org.specs.Specification
import scala.collection.JavaConverters._
class BucketedColumnFamilySpec extends Specification with JMocker with ClassMocker {
"BucketedColumnFamily" should {
val numBuckets = 10
val key = "some_key"
val col1 = Column(1L, 2L)
val col2 = Column(3L, 4L)
val col3 = Column(5L, 6L)
val colMap = Map(col1.name -> col1, col2.name -> col2, col3.name -> col3).asJava
val start = Some(6L)
val end = None
val count = 3
"insert and get row" in {
val cf = mock[ColumnFamily[ByteBuffer, Long, Long]]
val bcf = new StringBucketedColumnFamily(cf, numBuckets)
val keys = (0 until numBuckets) map {bcf.makeBucketedKey(key, _)}
expect {
3.of(cf).insert(any[ByteBuffer], any[Column[Long, Long]])
1.of(cf).multigetRows(keys.toSet.asJava, None, None, Order.Normal, Int.MaxValue) willReturn
Future {
Map (
keys(0) -> Map(col1.name -> col1).asJava,
keys(1) -> Map(col2.name -> col2).asJava,
keys(2) -> Map(col3.name -> col3).asJava
).asJava
}
}
bcf.insert(key, col1)
bcf.insert(key, col2)
bcf.insert(key, col3)
val rowMap = bcf.getRow(key)
rowMap() mustEqual colMap
}
"insert and get row slice" in {
val cf = mock[ColumnFamily[ByteBuffer, Long, Long]]
val bcf = new StringBucketedColumnFamily(cf, numBuckets)
val keys = (0 until numBuckets) map {bcf.makeBucketedKey(key, _)}
expect {
3.of(cf).insert(any[ByteBuffer], any[Column[Long, Long]])
Seq(Order.Normal, Order.Reversed) foreach { o =>
1.of(cf).multigetRows(keys.toSet.asJava, start, end, o, count) willReturn
Future {
Map (
keys(0) -> Map(col1.name -> col1).asJava,
keys(1) -> Map(col2.name -> col2).asJava,
keys(2) -> Map(col3.name -> col3).asJava
).asJava
}
}
}
bcf.insert(key, col1)
bcf.insert(key, col2)
bcf.insert(key, col3)
val rowNormal = bcf.getRowSlice(key, start, end, count, Order.Normal)()
val rowReversed = bcf.getRowSlice(key, start, end, count, Order.Reversed)()
rowNormal mustEqual List(col1, col2, col3)
rowReversed mustEqual List(col3, col2, col1)
}
"only get limited number of entries" in {
val cf = mock[ColumnFamily[ByteBuffer, Long, Long]]
val bcf = new StringBucketedColumnFamily(cf, numBuckets)
val count = 2
val keys = (0 until numBuckets) map {bcf.makeBucketedKey(key, _)}
expect {
3.of(cf).insert(any[ByteBuffer], any[Column[Long, Long]])
Seq(Order.Normal, Order.Reversed) foreach { o =>
1.of(cf).multigetRows(keys.toSet.asJava, start, end, o, count) willReturn
Future {
Map (
keys(0) -> Map(col1.name -> col1).asJava,
keys(1) -> Map(col2.name -> col2).asJava,
keys(2) -> Map(col3.name -> col3).asJava
).asJava
}
}
}
bcf.insert(key, col1)
bcf.insert(key, col2)
bcf.insert(key, col3)
val rowNormal = bcf.getRowSlice(key, start, end, count, Order.Normal)()
val rowReversed = bcf.getRowSlice(key, start, end, count, Order.Reversed)()
rowNormal mustEqual List(col1, col2)
rowReversed mustEqual List(col3, col2)
}
"roll over buckets correctly" in {
val cf = mock[ColumnFamily[ByteBuffer, Long, Long]]
val bcf = new StringBucketedColumnFamily(cf, numBuckets)
expect {
(0 until 11) foreach { i: Int =>
1.of(cf).insert(bcf.makeBucketedKey(key, i % numBuckets), col1)
}
}
(0 until 11) foreach { i: Int =>
bcf.insert(key, col1)
}
}
}
}
| jerryli9876/zipkin | zipkin-server/src/test/scala/com/twitter/zipkin/storage/cassandra/BucketedColumnFamilySpec.scala | Scala | apache-2.0 | 4,671 |
package org.finra.datagenerator.scaffolding.transformer.service
import java.lang.reflect.Method
import org.finra.datagenerator.scaffolding.context.ContextProvider
import org.finra.datagenerator.scaffolding.transformer.service.{InputTransformationContainer, TransformationContext}
import scala.collection.JavaConverters._
/**
* Created by dkopel on 12/14/16.
*/
trait TransformationContextProvider extends ContextProvider {
private val contextMethods: collection.mutable.Map[String, Method] = collection.mutable.Map.empty
contextMethods ++= DefaultContextMethods.methods
contextMethods.foreach(t => registerFunction(t._1, t._2))
def updateContext(tContext: TransformationContext): Unit = {
tContext.getCurrentIteration.asScala.foreach(c => {
if(c.isInstanceOf[InputTransformationContainer[_]] && c.value == null) {
logger.error(s"The input container for the class ${c.clazz} has a null value!")
}
registerVariable(c.alias, c.value)
})
registerVariables(
Map(
"context"->tContext,
"globals"->getGlobals,
"iteration"->tContext.getIteration
)
)
}
} | yukaReal/DataGenerator | rubber-scaffolding/rubber-transformer/src/main/scala/org/finra/datagenerator/scaffolding/transformer/service/TransformationContextProvider.scala | Scala | apache-2.0 | 1,230 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
// Copyright 2005-2017 LAMP/EPFL and Lightbend, Inc
package scala.tools.nsc
package transform
import symtab._
import Flags._
import scala.collection.mutable
trait AccessorSynthesis extends Transform with ast.TreeDSL {
import global._
import definitions._
import CODE._
val EmptyThicket = EmptyTree
def Thicket(trees: List[Tree]) = if (trees.isEmpty) EmptyTree else Block(trees, EmptyTree)
def mustExplodeThicket(tree: Tree): Boolean =
tree match {
case EmptyTree => true
case Block(_, EmptyTree) => true
case _ => false
}
def explodeThicket(tree: Tree): List[Tree] = tree match {
case EmptyTree => Nil
case Block(thicket, EmptyTree) => thicket
case stat => stat :: Nil
}
trait AccessorTreeSynthesis {
protected def typedPos(pos: Position)(tree: Tree): Tree
// used while we still need to synthesize some accessors in mixins: paramaccessors and presupers
class UncheckedAccessorSynth(protected val clazz: Symbol){
protected val _newDefs = mutable.ListBuffer[Tree]()
def newDefs = _newDefs.toList
/** Add tree at given position as new definition */
protected def addDef(tree: ValOrDefDef): Unit = _newDefs += typedPos(position(tree.symbol))(tree)
/** The position of given symbol, or, if this is undefined,
* the position of the current class.
*/
private def position(sym: Symbol) = if (sym.pos == NoPosition) clazz.pos else sym.pos
/** Add new method definition.
*
* @param sym The method symbol.
* @param rhs The method body.
*/
def addDefDef(sym: Symbol, rhs: Tree = EmptyTree) = addDef(DefDef(sym, rhs))
def addValDef(sym: Symbol, rhs: Tree = EmptyTree) = addDef(ValDef(sym, rhs))
/** Complete `stats` with init checks and bitmaps,
* removing any abstract method definitions in `stats` that are
* matched by some symbol defined by a tree previously passed to `addDef`.
*/
def implementWithNewDefs(stats: List[Tree]): List[Tree] = {
val newDefs = _newDefs.toList
val newSyms = newDefs map (_.symbol)
def isNotDuplicate(tree: Tree) = tree match {
case DefDef(_, _, _, _, _, _) =>
val sym = tree.symbol
!(sym.isDeferred &&
(newSyms exists (nsym => nsym.name == sym.name && (nsym.tpe matches sym.tpe))))
case _ => true
}
if (newDefs.isEmpty) stats
else newDefs ::: (stats filter isNotDuplicate)
}
def accessorBody(sym: Symbol) =
if (sym.isSetter) setterBody(sym, sym.getterIn(clazz)) else getterBody(sym)
protected def getterBody(getter: Symbol): Tree = {
assert(getter.isGetter, s"$getter must be a getter")
assert(getter.hasFlag(PARAMACCESSOR), s"$getter must be an accessor")
fieldAccess(getter)
}
protected def setterBody(setter: Symbol, getter: Symbol): Tree = {
assert(getter.hasFlag(PARAMACCESSOR), s"missing implementation for non-paramaccessor $setter in $clazz")
// scala-dev#408: fields for locals captured in a trait are non-final. The lambdalift phase adds the
// ConstructorNeedsFence attachment to the primary constructor of the class to ensure safe publication.
setter.accessed.setFlag(MUTABLE)
Assign(fieldAccess(setter), Ident(setter.firstParam))
}
private def fieldAccess(accessor: Symbol) =
Select(This(clazz), accessor.accessed)
}
}
case class BitmapInfo(symbol: Symbol, mask: Literal) {
def select(on: This): Tree = Select(on, symbol)
def applyToMask(on: This, op: Name): Tree = Apply(member(select(on), op), List(mask))
def member(bitmapRef: Tree, name: Name): Tree = Select(bitmapRef, getMember(storageClass, name))
def convert(bitmapRef: Tree): Tree = Apply(member(bitmapRef, newTermName("to" + storageClass.name)), Nil)
def isLong: Boolean = storageClass == LongClass
def isBoolean: Boolean = storageClass == BooleanClass
lazy val storageClass: ClassSymbol = symbol.info.typeSymbol.asClass
}
// TODO: better way to communicate from info transform to tree transform?
private[this] val _bitmapInfo = perRunCaches.newMap[Symbol, BitmapInfo]()
private[this] val _slowPathFor = perRunCaches.newMap[Symbol, Symbol]()
def checkedAccessorSymbolSynth(clz: Symbol): CheckedAccessorSymbolSynth =
new CheckedAccessorSymbolSynth(clz)
// base trait, with enough functionality for generating bitmap symbols for lazy vals and -Xcheckinit fields
class CheckedAccessorSymbolSynth(val clazz: Symbol) {
/**
* Note: fields of classes inheriting DelayedInit are not checked.
* This is because they are neither initialized in the constructor
* nor do they have a setter (not if they are vals anyway). The usual
* logic for setting bitmaps does therefore not work for such fields.
* That's why they are excluded.
*
*/
private[this] val doCheckInit = settings.checkInit.value && !(clazz isSubClass DelayedInitClass)
private[AccessorSynthesis] def bitmapFor(field: Symbol): BitmapInfo = _bitmapInfo(field)
protected def bitmapOf(field: Symbol): Option[BitmapInfo] = _bitmapInfo.get(field)
/** Fill the map from fields to bitmap infos.
* This is called for all fields in each transformed class (by the fields info transformer),
* after the fields inherited from traits have been added.
*
* bitmaps for checkinit fields are not inherited
*/
def computeBitmapInfos(fields: List[Symbol]): List[Symbol] = {
def bitmapCategory(field: Symbol): Name = {
import nme._
if (field.isLazy)
if (field hasAnnotation TransientAttr) BITMAP_TRANSIENT else BITMAP_NORMAL
else if (doCheckInit && !(field hasFlag DEFAULTINIT | PRESUPER | PARAMACCESSOR))
if (field hasAnnotation TransientAttr) BITMAP_CHECKINIT_TRANSIENT else BITMAP_CHECKINIT
else NO_NAME
}
def allocateBitmaps(fieldsWithBitmaps: List[Symbol], category: Name) = {
val nbFields = fieldsWithBitmaps.length // we know it's > 0
val (bitmapClass, bitmapCapacity) =
if (nbFields == 1) (BooleanClass, 1)
else if (nbFields <= 8) (ByteClass, 8)
else if (nbFields <= 32) (IntClass, 32)
else (LongClass, 64)
// 0-based index of highest bit, divided by bits per bitmap
// note that this is only ever > 0 when bitmapClass == LongClass
val maxBitmapNumber = (nbFields - 1) / bitmapCapacity
// transient fields get their own category
val isTransientCategory = nme.isTransientBitmap(category)
val bitmapSyms =
(0 to maxBitmapNumber).toArray map { bitmapNumber =>
val bitmapSym = (
clazz.newVariable(nme.newBitmapName(category, bitmapNumber).toTermName, clazz.pos.focus)
setInfo bitmapClass.tpe
setFlag PrivateLocal | NEEDS_TREES
)
bitmapSym addAnnotation VolatileAttr
if (isTransientCategory) bitmapSym addAnnotation TransientAttr
bitmapSym
}
fieldsWithBitmaps.zipWithIndex foreach { case (f, idx) =>
val bitmapIdx = idx / bitmapCapacity
val offsetInBitmap = idx % bitmapCapacity
val mask =
if (bitmapClass == LongClass) Constant(1L << offsetInBitmap)
else Constant(1 << offsetInBitmap)
_bitmapInfo(f) = BitmapInfo(bitmapSyms(bitmapIdx), Literal(mask))
}
bitmapSyms
}
fields.groupBy(bitmapCategory).flatMap {
case (category, fields) if category != nme.NO_NAME && fields.nonEmpty => allocateBitmaps(fields, category): Iterable[Symbol]
case _ => Nil
}.toList
}
def slowPathFor(lzyVal: Symbol): Symbol = _slowPathFor(lzyVal)
def newSlowPathSymbol(lzyVal: Symbol): Symbol = {
val pos = if (lzyVal.pos != NoPosition) lzyVal.pos else clazz.pos.focus // TODO: is the else branch ever taken?
val sym = clazz.newMethod(nme.newLazyValSlowComputeName(lzyVal.name.toTermName), pos, PRIVATE) setInfo MethodType(Nil, lzyVal.tpe.resultType)
_slowPathFor(lzyVal) = sym
sym
}
}
// synthesize trees based on info gathered during info transform
// (which are known to have been run because the tree transform runs afterOwnPhase)
// since we can't easily share all info via symbols and flags, we have two maps above
// (they are persisted even between phases because the -Xcheckinit logic runs during constructors)
// TODO: can we use attachments instead of _bitmapInfo and _slowPathFor?
trait CheckedAccessorTreeSynthesis extends AccessorTreeSynthesis {
// note: we deal in getters here, not field symbols
class SynthCheckedAccessorsTreesInClass(clazz: Symbol) extends CheckedAccessorSymbolSynth(clazz) {
def isUnitGetter(sym: Symbol) = sym.tpe.resultType.typeSymbol == UnitClass
def thisRef = gen.mkAttributedThis(clazz)
/** Return an (untyped) tree of the form 'clazz.this.bitmapSym & mask (==|!=) 0', the
* precise comparison operator depending on the value of 'equalToZero'.
*/
def mkTest(bm: BitmapInfo, equalToZero: Boolean = true): Tree =
if (bm.isBoolean)
if (equalToZero) Apply(NOT(bm.select(thisRef)), Nil) else bm.select(thisRef)
else
Apply(bm.member(bm.applyToMask(thisRef, nme.AND), if (equalToZero) nme.EQ else nme.NE), List(ZERO))
/** Return an (untyped) tree of the form 'Clazz.this.bmp = Clazz.this.bmp | mask'. */
def mkSetFlag(bitmap: BitmapInfo): Tree =
Assign(bitmap.select(thisRef),
if (bitmap.isBoolean) TRUE
else {
val ored = bitmap.applyToMask(thisRef, nme.OR)
// NOTE: Unless the bitmap is a Long, we must convert explicitly to avoid widening
// For example, bitwise OR (`|`) on two bytes yields and Int
if (bitmap.isLong) ored else bitmap.convert(ored)
})
}
class SynthLazyAccessorsIn(clazz: Symbol) extends SynthCheckedAccessorsTreesInClass(clazz) {
/**
* The compute method (slow path) looks like:
*
* {{{
* def l\$compute() = {
* synchronized(this) {
* if ((bitmap\$n & MASK) == 0) {
* init // l\$ = <rhs>
* bitmap\$n = bimap\$n | MASK
* }
* }
* ...
* this.f1 = null
* ...
* this.fn = null
* l\$
* }
* }}}
*
* `bitmap\$n` is a byte, int or long value acting as a bitmap of initialized values.
* The kind of the bitmap determines how many bit indicators for lazy vals are stored in it.
* For Int bitmap it is 32 and then 'n' in the above code is: (offset / 32),
* the MASK is (1 << (offset % 32)).
*
* If the class contains only a single lazy val then the bitmap is
* represented as a Boolean and the condition checking is a simple bool test.
*
* Private fields used only in this initializer are subsequently set to null.
*
* For performance reasons the double-checked locking is split into two parts,
* the first (fast) path checks the bitmap without synchronizing, and if that
* fails it initializes the lazy val within the synchronization block (slow path).
*
* This way the inliner should optimize the fast path because the method body is small enough.
*/
def expandLazyClassMember(lazyVar: global.Symbol, lazyAccessor: global.Symbol, transformedRhs: global.Tree): Tree = {
val slowPathSym = slowPathFor(lazyAccessor)
val rhsAtSlowDef = transformedRhs.changeOwner(lazyAccessor, slowPathSym)
val isUnit = isUnitGetter(lazyAccessor)
val selectVar = if (isUnit) UNIT else Select(thisRef, lazyVar)
val storeRes = if (isUnit) rhsAtSlowDef else Assign(selectVar, fields.castHack(rhsAtSlowDef, lazyVar.info))
val bitmap = bitmapFor(lazyVar)
def needsInit = mkTest(bitmap)
val doInit = Block(List(storeRes), mkSetFlag(bitmap))
// the slow part of double-checked locking (TODO: is this the most efficient pattern? https://github.come/scala/scala-dev/issues/204)
val slowPathRhs = Block(gen.mkSynchronized(thisRef)(If(needsInit, doInit, EmptyTree)) :: Nil, selectVar)
// The lazy accessor delegates to the compute method if needed, otherwise just accesses the var (it was initialized previously)
// `if ((bitmap&n & MASK) == 0) this.l$compute() else l$`
val accessorRhs = fields.castHack(If(needsInit, Apply(Select(thisRef, slowPathSym), Nil), selectVar), lazyVar.info)
afterOwnPhase { // so that we can assign to vals
Thicket(List((DefDef(slowPathSym, slowPathRhs)), DefDef(lazyAccessor, accessorRhs)) map typedPos(lazyAccessor.pos.focus))
}
}
}
class SynthInitCheckedAccessorsIn(clazz: Symbol) extends SynthCheckedAccessorsTreesInClass(clazz) {
// Add statements to the body of a constructor to set the 'init' bit for each field initialized in the constructor
private object addInitBitsTransformer extends AstTransformer {
override def transformStats(stats: List[Tree], exprOwner: Symbol) = {
val checkedStats = stats flatMap {
// Mark field as initialized after an assignment
case stat@Assign(lhs@Select(This(_), _), _) =>
stat :: bitmapOf(lhs.symbol).toList.map(bitmap => typedPos(stat.pos.focus)(mkSetFlag(bitmap)))
// remove initialization for default values
// TODO is this case ever hit? constructors does not generate Assigns with EmptyTree for the rhs AFAICT
// !!! Ident(self) is never referenced, is it supposed to be confirming
// that self is anything in particular?
case Apply(lhs@Select(Ident(self), _), EmptyTree.asList) if lhs.symbol.isSetter => Nil
case stat => List(stat)
}
super.transformStats(checkedStats, exprOwner)
}
}
private[this] val isTrait = clazz.isTrait
// We only act on concrete methods, and traits only need to have their constructor rewritten
def needsWrapping(dd: DefDef) =
dd.rhs != EmptyTree && (!isTrait || dd.symbol.isConstructor)
/** Make getters check the initialized bit, and the class constructor & setters are changed to set the initialized bits. */
def wrapRhsWithInitChecks(sym: Symbol)(rhs: Tree): Tree =
if (sym.isConstructor) addInitBitsTransformer transform rhs
else if ((sym hasFlag ACCESSOR) && !(sym hasFlag (LAZY | PARAMACCESSOR))) {
val field = clazz.info.decl(sym.localName)
if (field == NoSymbol) rhs
else bitmapOf(field) match {
case Some(bitmap) =>
if (sym.isGetter) mkCheckedAccessorRhs(if (isUnitGetter(sym)) UNIT else rhs, rhs.pos, bitmap) // TODO: why not always use rhs?
else Block(List(rhs, typedPos(rhs.pos.focus)(mkSetFlag(bitmap))), UNIT)
case _ => rhs
}
}
else rhs
private def mkCheckedAccessorRhs(retVal: Tree, pos: Position, bitmap: BitmapInfo): Tree = {
val msg = s"Uninitialized field: ${clazz.sourceFile}: ${pos.line}"
val result =
IF(mkTest(bitmap, equalToZero = false)).
THEN(retVal).
ELSE(Throw(NewFromConstructor(UninitializedFieldConstructor, LIT(msg))))
typedPos(pos)(BLOCK(result, retVal))
}
}
}
}
| scala/scala | src/compiler/scala/tools/nsc/transform/AccessorSynthesis.scala | Scala | apache-2.0 | 16,059 |
package com.exandas.tms.identity
/**
* Created by kostas on 09/07/2015.
*/
trait IdentityProvider {
this : Memory =>
def storeUser(username:String, password: String): Unit ={
val salt = generateSalt
val salted = password.bcrypt(salt)
store(UserLoginInfo(username,salted,salt))
}
def authenticate(username:String, password:String): Boolean = {
getAll.find(userInfo => {
userInfo.username.equals(username)
}) match {
case Some(u) => {
val salt = u.salt
val realPass = u.password
password.bcrypt(salt).equals(realPass)
}
case None => false
}
}
}
case class UserLoginInfo(username:String, password:String, salt:String)
trait Memory {
def store(s:UserLoginInfo)
def getAll : Seq[UserLoginInfo]
}
trait InMemory extends Memory {
import scala.collection.mutable._
var storeSeq : Seq[UserLoginInfo] = Seq()
override def store(s: UserLoginInfo): Unit = {
storeSeq = storeSeq.+:(s)
}
override def getAll = storeSeq
}
| exandas/tms-identity-micro | src/main/scala/com/exandas/tms/identity/IdentityProvider.scala | Scala | mit | 1,040 |
// scalac: -Yrangepos
object Test extends App {
42 match {
case Extractor(a) => println(a)
case x => throw new MatchError(x)
}
}
| scala/scala | test/files/run/macro-rangepos-subpatterns/Test_2.scala | Scala | apache-2.0 | 141 |
def f(p: String) {
var p: String = ""
println(/* offset: 25 */ p.getClass)
println(classOf[/* resolved: false */ p])
} | ilinum/intellij-scala | testdata/resolve2/element/mix/FunctionParameterAndVariable.scala | Scala | apache-2.0 | 125 |
package edu.gemini.ags.client.impl
import edu.gemini.model.p1.immutable.Observation
import edu.gemini.ags.client.api._
import edu.gemini.util.ssl.GemSslSocketFactory
import java.net.{URLConnection, HttpURLConnection, URL}
import HttpURLConnection.HTTP_OK
import io.Source
import java.io.{InputStream, IOException}
import javax.net.ssl.{SSLSession, HostnameVerifier, HttpsURLConnection}
import java.util.logging.{Level, Logger}
/**
* AGS client that connects to a remote AGS service to obtain estimates.
*/
object AgsHttpClient {
private val Log = Logger.getLogger(getClass.getName)
private val hostnameVerifier: HostnameVerifier = new HostnameVerifier {
def verify(s: String, sslSession: SSLSession) = true
}
}
import AgsHttpClient.{Log, hostnameVerifier}
case class AgsHttpClient(host: String, port: Int) extends AgsClient {
val timeout = 3 * 60000 // 1 min ?
val qurl = QueryUrl(host, port)
def url(obs: Observation, time: Long): Option[URL] =
qurl.format(obs, time).right.toOption
def estimateNow(obs: Observation, time: Long): AgsResult =
qurl.format(obs, time) match {
case Left(msg) => AgsResult.Incomplete(msg)
case Right(url) => estimateNow(url)
}
private def estimateNow(url: URL): AgsResult = {
try {
AgsHttpClient.Log.info(s"AGS Query to $url")
val conn = url.openConnection().asInstanceOf[HttpsURLConnection]
conn.setHostnameVerifier(hostnameVerifier)
conn.setSSLSocketFactory(GemSslSocketFactory.get)
conn.setReadTimeout(timeout)
Charset.set(conn)
Response(conn).result
} catch {
case io: IOException =>
Log.log(Level.INFO, "I/O Exception while fetching AGS estimate, presumed offline", io)
AgsResult.Offline
case t: Throwable =>
Log.log(Level.WARNING, "Exception while fetching AGS estimate", t)
AgsResult.Error(t)
}
}
}
private object Charset {
val default = "UTF-8"
def set(conn: URLConnection) {
conn.setRequestProperty("Accept-Charset", default)
}
def get(conn: URLConnection): String =
Option(conn.getContentType).getOrElse("").replace(" ", "").split(';') find {
_.startsWith("charset=")
} map {
_.substring("charset=".length)
} getOrElse default
}
private case class Response(code: Int, msg: String) {
val isSuccess = (code >= 200) && (code < 300)
val isClientError = (code >= 400) && (code < 500)
val result =
if (isSuccess)
AgsResult(msg)
else if (isClientError)
AgsResult.Incompatible(code, msg)
else
AgsResult.ServiceError(code, msg)
}
private object Response {
def apply(conn: HttpURLConnection): Response = {
def read(is: HttpURLConnection => InputStream): String = {
val s = Source.fromInputStream(is(conn), Charset.get(conn))
try { s.mkString } finally { s.close() }
}
conn.getResponseCode match {
case HTTP_OK => Response(HTTP_OK, read(_.getInputStream))
case code => Response(code, read(_.getErrorStream))
}
}
}
| arturog8m/ocs | bundle/edu.gemini.ags.client.impl/src/main/scala/edu/gemini/ags/client/impl/AgsHttpClient.scala | Scala | bsd-3-clause | 3,034 |
package com.databricks.spark.sql.perf.mllib.feature
import scala.util.Random
import org.apache.spark.ml
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.ml.PipelineStage
import org.apache.spark.sql._
import com.databricks.spark.sql.perf.mllib.OptionImplicits._
import com.databricks.spark.sql.perf.mllib.data.DataGenerator
import com.databricks.spark.sql.perf.mllib.{BenchmarkAlgorithm, MLBenchContext, TestFromTraining}
/** Object for testing Bucketizer performance */
object Bucketizer extends BenchmarkAlgorithm with TestFromTraining with UnaryTransformer {
override def trainingDataSet(ctx: MLBenchContext): DataFrame = {
import ctx.params._
import ctx.sqlContext.implicits._
val rng = ctx.newGenerator()
// For a bucketizer, training data consists of a single column of random doubles
DataGenerator.generateContinuousFeatures(ctx.sqlContext,
numExamples, ctx.seed(), numPartitions, numFeatures = 1).rdd.map { case Row(vec: Vector) =>
vec(0) // extract the single generated double value for each row
}.toDF(inputCol)
}
override def getPipelineStage(ctx: MLBenchContext): PipelineStage = {
import ctx.params._
val rng = ctx.newGenerator()
// Generate an array of (finite) splitting points in [-1, 1) for the Bucketizer
val splitPoints = 0.until(bucketizerNumBuckets - 1).map { _ =>
2 * rng.nextDouble() - 1
}.sorted.toArray
// Final array of splits contains +/- infinity
val splits = Array(Double.NegativeInfinity) ++ splitPoints ++ Array(Double.PositiveInfinity)
new ml.feature.Bucketizer()
.setSplits(splits)
.setInputCol(inputCol)
}
}
| databricks/spark-sql-perf | src/main/scala/com/databricks/spark/sql/perf/mllib/feature/Bucketizer.scala | Scala | apache-2.0 | 1,660 |
object assignments {
var a = Array(1, 2, 3)
var i = 0
a(i) = a(i) * 2
a(i + 1) += 1
class C {
var myX = 0
def x = myX
def x_=(x: Int) = myX = x
x = x + 1
x *= 2
x_= = 2 // error should give missing arguments
}
var c = new C
import c._ // error should give: prefix is not stable
x = x + 1
x *= 2
}
| som-snytt/dotty | tests/neg/assignments.scala | Scala | apache-2.0 | 347 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.commons.dao
import com.aerospike.client.Value.StringValue
import com.aerospike.client._
import com.aerospike.client.async.AsyncClient
import com.aerospike.client.cdt._
import com.aerospike.client.listener.{DeleteListener, RecordListener, WriteListener}
import com.aerospike.client.policy.{QueryPolicy, WritePolicy}
import com.flipkart.connekt.commons.metrics.Instrumented
import com.flipkart.connekt.commons.services.ConnektConfig
import scala.collection.JavaConverters._
import scala.concurrent.{Future, Promise}
trait AeroSpikeDao extends Instrumented {
lazy val timeout: Int = ConnektConfig.getInt("connections.aerospike.timeout").getOrElse(1000)
lazy val maxRetries: Int = ConnektConfig.getInt("connections.aerospike.maxRetries").getOrElse(3)
protected def addRow(key: Key, bin: Bin, ttl: Option[Long] = None)(implicit client: AsyncClient): Future[Boolean] = {
val policy = new WritePolicy()
policy.timeout = timeout
policy.maxRetries = maxRetries
ttl.foreach(millis => policy.expiration = (millis / 1000).toInt)
val promise = Promise[Boolean]()
client.put(policy, new AeroSpikeWriteHandler(promise), key, bin)
promise.future
}
protected def addMapRow(key: Key, binName: String, values: Map[String, String], ttl: Option[Long] = None)(implicit client: AsyncClient): Future[Record] = {
val policy = new WritePolicy()
policy.timeout = timeout
policy.maxRetries = maxRetries
ttl.foreach(millis => policy.expiration = (millis / 1000).toInt)
val data: Map[Value, Value] = values.map { case (k, v) => (new StringValue(k), new StringValue(v)) }
val promise = Promise[Record]()
client.operate(policy,new AeroSpikeRecordHandler(promise), key, MapOperation.putItems(MapPolicy.Default, binName, data.asJava))
promise.future
}
protected def deleteMapRowItems(key: Key, binName: String, keys: List[String])(implicit client: AsyncClient): Future[Record] = {
val policy = new WritePolicy()
policy.timeout = timeout
policy.maxRetries = maxRetries
policy.expiration = -2
val _keys: List[Value] = keys.map(new StringValue(_))
val promise = Promise[Record]()
client.operate(policy, new AeroSpikeRecordHandler(promise), key, MapOperation.removeByKeyList(binName, _keys.asJava, MapReturnType.COUNT))
promise.future
}
protected def trimMapRowItems(key: Key, binName: String, numToRemove:Int)(implicit client: AsyncClient): Future[Record] = {
val policy = new WritePolicy()
policy.timeout = timeout
policy.maxRetries = maxRetries
policy.expiration = -2
val promise = Promise[Record]()
client.operate(policy, new AeroSpikeRecordHandler(promise), key, MapOperation.removeByIndexRange(binName, 0, numToRemove, MapReturnType.COUNT))
promise.future
}
protected def getRow(key: Key)(implicit client: AsyncClient): Future[Record] = {
val policy = new QueryPolicy()
policy.timeout = timeout
policy.maxRetries = maxRetries
val promise = Promise[Record]()
client.get(policy, new AeroSpikeRecordHandler(promise), key)
promise.future
}
protected def deleteRow(key: Key)(implicit client: AsyncClient): Future[Boolean] = {
val policy = new WritePolicy()
policy.timeout = timeout
policy.maxRetries = maxRetries
val promise = Promise[Boolean]()
client.delete(policy, new AeroSpikeDeleteHandler(promise), key)
promise.future
}
}
private class AeroSpikeWriteHandler(promise: Promise[Boolean]) extends WriteListener {
override def onFailure(exception: AerospikeException): Unit = promise.failure(exception)
override def onSuccess(key: Key): Unit = promise.success(true)
}
private class AeroSpikeRecordHandler(promise: Promise[Record]) extends RecordListener {
override def onFailure(exception: AerospikeException): Unit = promise.failure(exception)
override def onSuccess(key: Key, record: Record): Unit = promise.success(record)
}
private class AeroSpikeDeleteHandler(promise: Promise[Boolean]) extends DeleteListener {
override def onFailure(exception: AerospikeException): Unit = promise.failure(exception)
override def onSuccess(key: Key, existed: Boolean): Unit = promise.success(true)
}
| Flipkart/connekt | commons/src/main/scala/com/flipkart/connekt/commons/dao/AeroSpikeDao.scala | Scala | mit | 4,822 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.si.model
import java.io.File
import com.bwsw.common.file.utils.MongoFileStorage
import com.bwsw.sj.common.dal.model.module.{FileMetadataDomain, SpecificationDomain}
import com.bwsw.sj.common.dal.repository.ConnectionRepository
import com.bwsw.sj.common.si.JsonValidator
import com.bwsw.sj.common.utils.{MessageResourceUtils, SpecificationUtils}
import scaldi.Injectable.inject
import scaldi.Injector
import scala.collection.mutable.ArrayBuffer
import scala.util.{Failure, Success, Try}
class FileMetadata(val filename: String,
val file: Option[File] = None,
val name: Option[String] = None,
val version: Option[String] = None,
val length: Option[Long] = None,
val description: Option[String] = None,
val uploadDate: Option[String] = None)
(implicit injector: Injector)
extends JsonValidator {
protected val messageResourceUtils = inject[MessageResourceUtils]
import messageResourceUtils._
protected val connectionRepository: ConnectionRepository = inject[ConnectionRepository]
protected val fileStorage: MongoFileStorage = connectionRepository.getFileStorage
protected val fileMetadataRepository = connectionRepository.getFileMetadataRepository
/**
* Validates file metadata
*
* @return empty array if file metadata is correct, validation errors otherwise
*/
def validate(): ArrayBuffer[String] = {
val errors = new ArrayBuffer[String]
if (!fileStorage.exists(filename)) {
if (checkCustomFileSpecification(file.get)) {
val specification = inject[SpecificationUtils].getSpecification(file.get)
if (doesCustomJarExist(specification)) errors += createMessage("rest.custom.jars.exists", specification.name, specification.version)
} else errors += getMessage("rest.errors.invalid.specification")
} else errors += createMessage("rest.custom.jars.file.exists", filename)
errors
}
/**
* Indicates that specification of uploading custom jar file is valid
*
* @param jarFile uploading jar file
*/
private def checkCustomFileSpecification(jarFile: File): Boolean = {
val json = inject[SpecificationUtils].getSpecificationFromJar(jarFile)
if (isEmptyOrNullString(json)) {
return false
}
Try(validateWithSchema(json, "customschema.json")) match {
case Success(isValid) => isValid
case Failure(_) => false
}
}
private def doesCustomJarExist(specification: SpecificationDomain) = {
fileMetadataRepository.getByParameters(
Map("filetype" -> FileMetadataLiterals.customJarType,
"specification.name" -> specification.name,
"specification.version" -> specification.version
)).nonEmpty
}
}
object FileMetadataLiterals {
val customJarType: String = "custom"
val customFileType: String = "custom-file"
val moduleType: String = "module"
}
class FileMetadataCreator {
def from(fileMetadataDomain: FileMetadataDomain)(implicit injector: Injector): FileMetadata = {
new FileMetadata(
fileMetadataDomain.filename,
None,
Some(fileMetadataDomain.specification.name),
Some(fileMetadataDomain.specification.version),
Some(fileMetadataDomain.length),
Some(fileMetadataDomain.specification.description),
Some(fileMetadataDomain.uploadDate.toString)
)
}
}
| bwsw/sj-platform | core/sj-common/src/main/scala/com/bwsw/sj/common/si/model/FileMetadata.scala | Scala | apache-2.0 | 4,239 |
package net.sansa_stack.rdf.common.partition.core
import net.sansa_stack.rdf.common.partition.core.RdfPartitionerDefault._
import net.sansa_stack.rdf.common.partition.schema.{ SchemaStringString, SchemaStringStringLang }
import org.apache.jena.graph.{ Node, NodeFactory, Triple }
import org.scalatest.FunSuite
/**
* @author Gezim Sejdiu
*/
class RdfPartitionerDefaultTests extends FunSuite {
val triple = Triple.create(
NodeFactory.createURI("http://dbpedia.org/resource/Guy_de_Maupassant"),
NodeFactory.createURI("http://xmlns.com/foaf/0.1/givenName"),
NodeFactory.createLiteral("Guy De"))
test("getting URI or BNode string should match") {
val node = NodeFactory.createURI("http://dbpedia.org/resource/Guy_de_Maupassant")
assert(getUriOrBNodeString(triple.getSubject).matches(node.getURI))
}
test("getting RDF Term type should match") {
assert(getRdfTermType(triple.getSubject) == 1)
}
test("chechking if data type is PlainLiteral should match") {
assert(isPlainLiteralDatatype(triple.getObject.getLiteralDatatypeURI))
}
test("chechking if [[Node]] is TypedLiteral should match") {
assert(!isTypedLiteral(triple.getObject))
}
test("getting partitioning layout from [[Triple]] should match") {
val expectedPartition = new RdfPartitionDefault(1, "http://xmlns.com/foaf/0.1/givenName",
2, "http://www.w3.org/2001/XMLSchema#string", true)
assert(fromTriple(triple).equals(expectedPartition))
}
test("determining Layout should match") {
val expectedLayout = new SchemaStringStringLang("http://dbpedia.org/resource/Guy_de_Maupassant", "Guy De", "")
assert(determineLayout(fromTriple(triple)).fromTriple(triple).equals(expectedLayout))
}
test("determining Layout Datatype should match") {
val expectedLayoutDatatype = new SchemaStringString("http://dbpedia.org/resource/Guy_de_Maupassant", "Guy De")
assert(determineLayoutDatatype(triple.getObject.getLiteralDatatypeURI).fromTriple(triple).equals(expectedLayoutDatatype))
}
}
| SANSA-Stack/Spark-RDF | sansa-rdf-common/src/test/scala/net/sansa_stack/rdf/common/partition/core/RdfPartitionerDefaultTests.scala | Scala | gpl-3.0 | 2,026 |
package qq
package macros
import cats.Eval
import cats.implicits._
import fastparse.core.Parsed
import qq.Platform.Rec._
import qq.cc.{LocalOptimizer, Parser}
import qq.data._
import qq.data.ast._
import qq.util.Recursion.RecursiveFunction
import scala.language.experimental.macros
import scala.reflect.macros.whitebox
// this is used to pre-prepare QQ programs at compile time
object QQStager {
def qqimpl(c: whitebox.Context)(pieces: c.Tree*): c.Tree = {
import c.universe._
def lift[T: Liftable](value: T): Tree = implicitly[Liftable[T]].apply(value)
implicit def disjunctionLiftable[E: Liftable, A: Liftable]: Liftable[E Either A] =
Liftable[E Either A](_.fold(
e => q"scala.util.Left(${lift[E](e)})",
a => q"scala.util.Right(${lift[A](a)})"
))
implicit def definitionLiftable[F: Liftable]: Liftable[Definition[F]] =
Liftable { case Definition(name, params, body) => q"qq.data.Definition(${lift(name)}, ${lift(params)}, ${lift(body)})" }
implicit def mathOpLiftable: Liftable[MathOperator] =
Liftable {
case Add => q"qq.data.ast.Add"
case Subtract => q"qq.data.ast.Subtract"
case Multiply => q"qq.data.ast.Multiply"
case Divide => q"qq.data.ast.Divide"
case Modulo => q"qq.data.ast.Modulo"
case Equal => q"qq.data.ast.Equal"
case LTE => q"qq.data.ast.LTE"
case GTE => q"qq.data.ast.GTE"
case LessThan => q"qq.data.ast.LessThan"
case GreaterThan => q"qq.data.ast.GreaterThan"
}
implicit def pathLiftable: Liftable[PathComponent] =
Liftable {
case CollectResults => q"qq.data.ast.CollectResults"
case SelectKey(k) => q"qq.data.ast.SelectKey(${lift(k)})"
case SelectIndex(i) => q"qq.data.ast.SelectIndex(${lift(i)})"
case SelectRange(s, e) => q"qq.data.ast.SelectRange(${lift(s)}, ${lift(e)})"
}
def pathOpLift[A](f: A => Eval[c.universe.Tree]): PathOperationF[A] => Eval[c.universe.Tree] = {
case PathGet => Eval.now(q"qq.data.ast.PathGet")
case PathModify(m) => f(m).map { r => q"qq.data.ast.PathModify($r)" }
case PathSet(s) => f(s).map { r => q"qq.data.ast.PathSet($r)" }
}
val liftFilter: RecursiveFunction[FilterAST, c.universe.Tree] = new RecursiveFunction[FilterAST, c.universe.Tree] {
override def run(value: FilterAST, loop: FilterAST => Eval[c.universe.Tree]): Eval[c.universe.Tree] = {
val sub: Eval[c.universe.Tree] = value.unFix match {
case PathOperation(pc, op) => pathOpLift(loop)(op) map { o => q"qq.data.ast.PathOperation[qq.data.FilterAST](${lift(pc)}, $o)" }
case AsBinding(name, as, in) => (loop(as) |@| loop(in)).map { (a, i) => q"qq.data.ast.AsBinding[qq.data.FilterAST](${lift(name)}, $a, $i)" }
case Dereference(name) => Eval.now(q"qq.data.ast.Dereference[qq.data.FilterAST](${lift(name)})")
case ComposeFilters(first, second) => (loop(first) |@| loop(second)).map { (f, s) => q"qq.data.ast.ComposeFilters[qq.data.FilterAST]($f, $s)" }
case SilenceExceptions(child) => loop(child) map { f => q"qq.data.ast.SilenceExceptions[qq.data.FilterAST]($f)" }
case EnlistFilter(child) => loop(child) map { f => q"qq.data.ast.EnlistFilter[qq.data.FilterAST]($f)" }
case EnsequenceFilters(first, second) => (loop(first) |@| loop(second)).map { (f, s) => q"qq.data.ast.EnsequenceFilters[qq.data.FilterAST]($f, $s)" }
case EnjectFilters(obj) => obj.traverse[Eval, (c.universe.Tree, c.universe.Tree)] { case (k, v) =>
for {
ke <- k.traverse(loop).map(e => lift(e.leftMap(lift(_))))
ve <- loop(v)
} yield (ke, ve)
}
.map { o => q"qq.data.ast.EnjectFilters[qq.data.FilterAST](${lift(o)})" }
case CallFilter(name: String, params) => params.traverse(loop).map { p => q"qq.data.ast.CallFilter[qq.data.FilterAST](${lift(name)}, $p)" }
case FilterNot() => Eval.now(q"qq.data.ast.FilterNot[qq.data.FilterAST]()")
case ConstNumber(v) => Eval.now(q"qq.data.ast.ConstNumber[qq.data.FilterAST](${lift(v)})")
case ConstBoolean(v) => Eval.now(q"qq.data.ast.ConstBoolean[qq.data.FilterAST](${lift(v)})")
case ConstString(v) => Eval.now(q"qq.data.ast.ConstString[qq.data.FilterAST](${lift(v)})")
case FilterMath(first, second, op) => (loop(first) |@| loop(second)).map { (f, s) => q"qq.data.ast.FilterMath[qq.data.FilterAST]($f, $s, ${lift(op)})" }
}
sub.map(f => q"qq.util.Fix[qq.data.ast.FilterComponent]($f)")
}
}
implicit def concreteFilterLiftable: Liftable[FilterAST] =
Liftable(liftFilter(_))
implicit def programLiftable: Liftable[Program[FilterAST]] = Liftable[Program[FilterAST]](
value => q"qq.data.Program(${lift(value.defns)}, ${lift(value.main)})"
)
val program = c.prefix.tree match {
// access data of string interpolation
case Apply(_, List(Apply(_, rawParts))) =>
if (rawParts.length != 1) {
c.abort(c.enclosingPosition, "$ detected. qq is not an interpolator, it's for a single string")
}
rawParts.head match {
case Literal(Constant(str: String)) => str
case _ =>
c.abort(c.enclosingPosition, "invalid") // TODO: make the error message more readable
}
case _ =>
c.abort(c.enclosingPosition, "invalid") // TODO: make the error message more readable
}
val parsedProgram: Program[FilterAST] = Parser.program.parse(program) match {
case [email protected](_, _, _) =>
c.abort(c.enclosingPosition, "QQ parsing error: " + f.extra.traced.trace)
case Parsed.Success(prog, _) => prog
}
val optimizedProgram = LocalOptimizer.optimizeProgram(parsedProgram)
lift(optimizedProgram)
}
final implicit class qqops(val sc: StringContext) {
def qq(pieces: Any*): Program[FilterAST] = macro QQStager.qqimpl
}
}
| edmundnoble/slate | qqmacros/shared/src/main/scala/qq/macros/QQStager.scala | Scala | mit | 5,981 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.utils.events
import android.app.{Activity, Fragment, Service}
import android.view.View
trait EventContext {
private object lock
private[this] var started = false
private[this] var destroyed = false
private[this] var observers = Set.empty[Subscription]
protected implicit def eventContext: EventContext = this
override protected def finalize(): Unit = {
lock.synchronized { if (! destroyed) onContextDestroy() }
super.finalize()
}
def onContextStart(): Unit = {
lock.synchronized {
if (! started) {
started = true
observers foreach (_.subscribe()) // XXX during this, subscribe may call Observable#onWire with in turn may call register which will change observers
}
}
}
def onContextStop(): Unit = {
lock.synchronized {
if (started) {
started = false
observers foreach (_.unsubscribe())
}
}
}
def onContextDestroy(): Unit = {
lock.synchronized {
destroyed = true
val observersToDestroy = observers
observers = Set.empty
observersToDestroy foreach (_.destroy())
}
}
def register(observer: Subscription): Unit = {
lock.synchronized {
assert(!destroyed, "context already destroyed")
if (! observers.contains(observer)) {
observers += observer
if (started) observer.subscribe()
}
}
}
def unregister(observer: Subscription): Unit =
lock.synchronized(observers -= observer)
def isContextStarted: Boolean = lock.synchronized(started && ! destroyed)
}
object EventContext {
object Implicits {
implicit val global: EventContext = EventContext.Global
}
object Global extends EventContext {
override def register(observer: Subscription): Unit = () // do nothing, global context will never need the observers (can not be stopped)
override def unregister(observer: Subscription): Unit = ()
override def onContextStart(): Unit = ()
override def onContextStop(): Unit = ()
override def onContextDestroy(): Unit = ()
override def isContextStarted: Boolean = true
}
}
trait ActivityEventContext extends Activity with EventContext {
override def onResume(): Unit = {
onContextStart()
super.onResume()
}
override def onPause(): Unit = {
super.onPause()
onContextStop()
}
override def onDestroy(): Unit = {
super.onDestroy()
onContextDestroy()
}
}
trait FragmentEventContext extends Fragment with EventContext {
override def onResume(): Unit = {
onContextStart()
super.onResume()
}
override def onPause(): Unit = {
super.onPause()
onContextStop()
}
override def onDestroy(): Unit = {
super.onDestroy()
onContextDestroy()
}
}
trait ViewEventContext extends View with EventContext {
private var attached = false
override def onAttachedToWindow(): Unit = {
super.onAttachedToWindow()
attached = true
if (getVisibility != View.GONE) onContextStart()
}
override def setVisibility(visibility: Int): Unit = {
super.setVisibility(visibility)
if (visibility != View.GONE && attached) onContextStart()
else onContextStop()
}
override def onDetachedFromWindow(): Unit = {
super.onDetachedFromWindow()
attached = false
onContextStop()
}
}
trait ServiceEventContext extends Service with EventContext {
override def onCreate(): Unit = {
super.onCreate()
onContextStart()
}
override def onDestroy(): Unit = {
onContextStop()
onContextDestroy()
super.onDestroy()
}
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/utils/events/EventContext.scala | Scala | gpl-3.0 | 4,258 |
package nexus.diff.ops
import nexus.diff._
/**
* @author Tongfei Chen
*/
object Sinh extends PolyOp1
object Cosh extends PolyOp1
object Tanh extends PolyOp1
object ArSinh extends PolyOp1
object ArCosh extends PolyOp1
object ArTanh extends PolyOp1
| ctongfei/nexus | diff/src/main/scala/nexus/diff/ops/hyperbolic.scala | Scala | mit | 256 |
package com.novocode.erased.test
import org.junit.Test
import com.novocode.erased._
import com.novocode.erased.KList._
class KListTest {
@Test
def testKList {
val l1 = 42 |: "foo" |: Some(1.0) |: "bar" |: KNil[KList.Identity]
val l1a = l1.head
val l1b = l1.tail.head
val l1c = l1.tail.tail.head
val l1d = l1.tail.tail.tail.head
//implicitly[l1.type <:< (Int |: String |: Some[Double] |: String |: KNil[KList.Identity])]
val o1 = Some(42) |: None |: Some(1.0) |: Some("bar") |: KNil[Option]
implicitly[o1.type <:< (KList { type Cons[X] = Option[X] })]
val o1a = o1.head
val o1b = o1.tail.head
val o1c = o1.tail.tail.head
val o1d = o1.tail.tail.tail.head
val o1id = o1.asIdentity
implicitly[o1id.type <:< KList.KHList]
implicitly[o1id.type <:< (KList { type Cons[X] = X })]
val o1ida = o1id.head
val o1idb = o1id.tail.head
val o1idc = o1id.tail.tail.head
val o1idd = o1id.tail.tail.tail.head
val o1seq = o1.map[Seq](NaturalTransformation.optionToSeq)
implicitly[o1seq.type <:< (KList { type Cons[X] = Seq[X] })]
val o1seqa = o1seq.head
val o1seqb = o1seq.tail.head
val o1seqc = o1seq.tail.tail.head
val o1seqd = o1seq.tail.tail.tail.head
val o1seqat = o1seqa: Seq[Int]
val o1seqbt = o1seqb: Seq[_]
val o1seqct = o1seqc: Seq[Double]
val o1seqdt = o1seqd: Seq[String]
println(l1)
val l2 = l1.drop(Nat._3)
println(l2)
val e0: Int = l1(Nat._0)
val e2a: Option[Double] = l1.apply(Nat._2)
val e2b: Option[Double] = l1.drop(Nat._2).head
val x1 = null : l1.type#Tail#Tail#Tail#Head
val x2 = null : Nat._3#Fold[HList, ({ type L[X <: HList] = X#Tail })#L, l1.type#Self]#Head
val x3: Option[Double] = null : l1.type#Drop[Nat._2]#Head
implicitly[l1.Length =:= Nat._4]
implicitly[l2.Length =:= Nat._1]
println((l1.length, l2.length))
import HList._
val l3a = Seq("foo") |: Seq(42) |: KNil[Seq]
val l3b = Seq(true) |: Seq.empty[Float] |: Seq(Some(1.0)) |: KNil[Seq]
val l3 = l3a |:: l3b
val l31: Seq[String] = l3(Nat._0)
val l32: Seq[Int] = l3(Nat._1)
val l33: Seq[Boolean] = l3(Nat._2)
val l34: Seq[Float] = l3(Nat._3)
val l35: Seq[Some[Double]] = l3(Nat._4)
//implicitly[(l3a.type |:: l3b.type) <:< l3.type]
//println(l3 : String |: Int |: Boolean |: Float |: Some[Double] |: KNil[Seq])
}
}
| szeiger/ErasedTypes | src/test/scala/com/novocode/erased/test/KListTest.scala | Scala | bsd-2-clause | 2,407 |
/**
* Copyright (c) 2013-2016 Extended Mind Technologies Oy
*
* This file is part of Extended Mind.
*
* Extended Mind is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.extendedmind.db
import org.neo4j.graphdb.traversal.Evaluator
import org.neo4j.graphdb.Node
import org.neo4j.graphdb.Path
import org.neo4j.graphdb.traversal.Evaluation
import java.util.UUID
import org.extendedmind.security.IdUtils
case class PropertyEvaluator(label: Label, property: String, propertyStringValue: Option[String] = None,
foundEvaluation: Evaluation = Evaluation.INCLUDE_AND_PRUNE,
notFoundEvaluation: Evaluation = Evaluation.EXCLUDE_AND_CONTINUE) extends Evaluator{
override def evaluate(path: Path): Evaluation = {
val currentNode: Node = path.endNode();
if (currentNode.hasLabel(label) && currentNode.hasProperty(property) &&
(propertyStringValue.isEmpty || propertyStringValue.get == currentNode.getProperty(property).asInstanceOf[String])){
return foundEvaluation
}
return notFoundEvaluation
}
} | ttiurani/extendedmind | backend/src/main/scala/org/extendedmind/db/PropertyEvaluator.scala | Scala | agpl-3.0 | 1,698 |
package fpscala.chapter8
import fpscala.BaseSpec
class StateSpec extends BaseSpec{
val simpleRng = RNG.Simple(10l)
it should "map a state " in {
//convert a string to a state[string]
val s1:State[RNG,String] = State.unit("hello")
val s2: State[RNG,Int] = s1.map { x => x.length() }
val (result,nextRNG):(Int,RNG) = s2.run(simpleRng)
result should be (5)
}
it should "combine two states" in {
val s1:State[RNG,String] = State.unit("hello")
val s2: State[RNG,String] = State.unit("world")
val s3:State[RNG,String] = s1.map2(s2)((a,b) => a + b)
val (result,nextRNG):(String,RNG) = s3.run(simpleRng)
result should be ("helloworld")
}
it should "flatMap to learn" in {
val s1:State[RNG,String] = State.unit("hello")
val flatMapper:(String => State[RNG,String]) = { aString => State.unit(aString.toUpperCase()) }
val s2:State[RNG,String] = s1.flatMap(flatMapper)
val (result,nextRNG):(String,RNG) = s2.run(simpleRng)
result should be ("HELLO")
}
} | sajit/learnyou | scala/minimal-scala/src/test/scala/fpscala/chapter8/StateSpec.scala | Scala | mit | 1,042 |
package skinny.controller
import skinny._
import skinny.controller.assets._
import skinny.util.LoanPattern._
import scala.io.Source
import java.io.File
import org.joda.time.DateTimeZone
import org.joda.time.format.DateTimeFormat
/**
* Assets controller.
*/
class AssetsController extends SkinnyController {
// see https://github.com/scalatra/scalatra/issues/349
addMimeMapping("text/css", "css")
addMimeMapping("application/octet-stream", "map")
def sourceMapsEnabled: Boolean = SkinnyEnv.isDevelopment() || SkinnyEnv.isTest()
/**
* Returns assets root path.
*/
def assetsRootPath = "/assets"
/**
* Returns assets/js root path.
*/
def jsRootPath = s"${assetsRootPath}/js"
/**
* Returns assets/css root path.
*/
def cssRootPath = s"${assetsRootPath}/css"
/**
* Predicates this controller in staging env.
*/
def isDisabledInStaging: Boolean = true
/**
* Predicates this controller in production env.
*/
def isDisabledInProduction: Boolean = true
/**
* Predicates this controller is enabled in the current env.
*/
def isEnabled: Boolean = {
if (SkinnyEnv.isProduction()) !isDisabledInProduction
else if (SkinnyEnv.isStaging()) !isDisabledInStaging
else true
}
/**
* Base path for assets files.
*/
val basePath = "/WEB-INF/assets"
/**
* Registered JS Compilers
*/
private[this] val jsCompilers = new collection.mutable.ListBuffer[AssetCompiler]
/**
* Registered CSS Compilers
*/
private[this] val cssCompilers = new collection.mutable.ListBuffer[AssetCompiler]
// registered compilers by default
registerJsCompiler(CoffeeScriptAssetCompiler)
registerJsCompiler(ReactJSXAssetCompiler)
registerJsCompiler(ScalaJSAssetCompiler) // just provides Scala source code
registerCssCompiler(LessAssetCompiler)
registerCssCompiler(ScssAssetCompiler)
registerCssCompiler(SassAssetCompiler)
/**
* Registers JS compiler to this controller.
* @param compiler compiler
*/
def registerJsCompiler(compiler: AssetCompiler) = jsCompilers.append(compiler)
/**
* Registers CSS compiler to this controller.
* @param compiler compiler
*/
def registerCssCompiler(compiler: AssetCompiler) = cssCompilers.append(compiler)
def path(extension: String): Option[String] = multiParams("splat").headOption.flatMap { fullPath =>
fullPath.split("\\\\.") match {
case Array(path, e) if e == extension => Some(path)
case _ => None
}
}
def sourceMapsPath(): Option[String] = path("map")
/**
* Returns js or coffee assets.
*/
def js(): Any = if (path("js") == Some("skinny-framework")) {
path("js").flatMap { p =>
contentType = "application/javascript"
jsFromClassPath(p)
}.getOrElse(
throw new IllegalStateException("skinny-framework.js should be found. This is a framework bug.")
)
} else {
if (isEnabled) {
path("js").map { path =>
jsFromClassPath(path)
.orElse(compiledJsFromClassPath(path))
.orElse(jsFromFile(path))
.orElse(compiledJsFromFile(path))
.map { js =>
contentType = "application/javascript"
js
}.getOrElse(pass())
}.orElse(jsSourceMapsFile()).getOrElse(pass())
} else pass()
}
private def jsSourceMapsFile(): Option[Any] = {
if (sourceMapsEnabled) {
sourceMapsPath.flatMap { path =>
contentType = "application/octet-stream"
sourceMapsFromFile(path, jsCompilers)
}.orElse {
jsCompilers.find(c => path(c.extension).isDefined).flatMap { compiler =>
path(compiler.extension).map { path =>
contentType = "application/octet-stream"
val file = new File(servletContext.getRealPath(s"${basePath}/${compiler.extension}/${path}.${compiler.extension}"))
using(Source.fromFile(file))(map => map.mkString)
}
}
}
} else None
}
private def jsFromClassPath(path: String): Option[String] = {
ClassPathResourceLoader.getClassPathResource(s"${basePath}/js/${path}.js").map { resource =>
using(resource.stream) { stream =>
setLastModified(resource.lastModified)
if (isModified(resource.lastModified)) using(Source.fromInputStream(resource.stream))(_.mkString)
else halt(304)
}
}
}
private def jsFromFile(path: String): Option[String] = {
val jsFile = new File(servletContext.getRealPath(s"${basePath}/js/${path}.js"))
if (jsFile.exists()) {
setLastModified(jsFile.lastModified)
if (isModified(jsFile.lastModified)) Some(using(Source.fromFile(jsFile))(js => js.mkString))
else halt(304)
} else None
}
private def compiledJsFromClassPath(path: String): Option[String] = compiledCodeFromClassPath(path, jsCompilers)
private def compiledJsFromFile(path: String): Option[String] = compiledCodeFromFile(path, jsCompilers)
private def sourceMapsFromFile(path: String, compilers: Seq[AssetCompiler]): Option[String] = {
compilers.find { compiler =>
new File(servletContext.getRealPath(s"${basePath}/${compiler.extension}/${path}.map")).exists()
}.map { compiler =>
val mapFile = new File(servletContext.getRealPath(s"${basePath}/${compiler.extension}/${path}.map"))
setLastModified(mapFile.lastModified)
if (isModified(mapFile.lastModified)) using(Source.fromFile(mapFile))(map => map.mkString)
else halt(304)
}
}
/**
* Returns css or less assets.
*/
def css(): Any = if (isEnabled) {
multiParams("splat").headOption.flatMap { fullPath =>
fullPath.split("\\\\.") match {
case Array(path, "css") => Some(path)
case _ => None
}
}.map { path =>
cssFromClassPath(path)
.orElse(compiledCssFromClassPath(path))
.orElse(cssFromFile(path))
.orElse(compiledCssFromFile(path))
.map { css =>
contentType = "text/css"
css
}.getOrElse(pass())
}.orElse(cssSourceMapsFile()).getOrElse(pass())
} else pass()
private def cssSourceMapsFile(): Option[Any] = {
if (sourceMapsEnabled) {
sourceMapsPath.flatMap { path =>
contentType = "application/octet-stream"
sourceMapsFromFile(path, cssCompilers)
}.orElse {
cssCompilers.find(c => path(c.extension).isDefined).flatMap { compiler =>
path(compiler.extension).map { path =>
contentType = "application/octet-stream"
val file = new File(servletContext.getRealPath(s"${basePath}/${compiler.extension}/${path}.${compiler.extension}"))
using(Source.fromFile(file))(map => map.mkString)
}
}
}
} else None
}
def cssFromClassPath(path: String): Option[String] = {
ClassPathResourceLoader.getClassPathResource(s"${basePath}/css/${path}.css").map { resource =>
using(resource.stream) { stream =>
setLastModified(resource.lastModified)
if (isModified(resource.lastModified)) {
using(Source.fromInputStream(resource.stream))(_.mkString)
} else halt(304)
}
}
}
private def cssFromFile(path: String): Option[String] = {
val cssFile = new File(servletContext.getRealPath(s"${basePath}/css/${path}.css"))
if (cssFile.exists()) {
setLastModified(cssFile.lastModified)
if (isModified(cssFile.lastModified)) Some(using(Source.fromFile(cssFile))(css => css.mkString))
else halt(304)
} else None
}
private def compiledCssFromClassPath(path: String): Option[String] = compiledCodeFromClassPath(path, cssCompilers)
private def compiledCssFromFile(path: String): Option[String] = compiledCodeFromFile(path, cssCompilers)
val PATTERN_RFC1123 = "EEE, dd MMM yyyy HH:mm:ss zzz"
val PATTERN_RFC1036 = "EEE, dd-MMM-yy HH:mm:ss zzz"
val PATTERN_ASCTIME = "EEE MMM d HH:mm:ss yyyy"
val modifiedHeaderFormats = Seq(PATTERN_RFC1123, PATTERN_RFC1036, PATTERN_ASCTIME).map { pattern =>
DateTimeFormat.forPattern(pattern).withZone(DateTimeZone.UTC).withLocale(java.util.Locale.ENGLISH)
}
def setLastModified(lastModified: Long): Unit = {
val format = modifiedHeaderFormats.head
response.setHeader("Last-Modified", format.print(lastModified).replaceFirst("UTC$", "GMT"))
}
def isModified(resourceLastModified: Long): Boolean = {
request.header("If-Modified-Since").map(_.replaceFirst("^\\"", "").replaceFirst("\\"$", "")).map { ifModifiedSince =>
modifiedHeaderFormats.flatMap { formatter =>
try Option(formatter.parseDateTime(ifModifiedSince))
catch { case scala.util.control.NonFatal(e) => None }
}.headOption.map(_.getMillis < resourceLastModified) getOrElse true
} getOrElse true
}
private def compiledCodeFromClassPath(path: String, compilers: Seq[AssetCompiler]): Option[String] = {
// try to load from class path resources
compilers.flatMap(c => c.findClassPathResource(basePath, path).map(r => (c, r))).headOption.map {
case (compiler, resource) => using(resource.stream) { stream =>
setLastModified(resource.lastModified)
if (isModified(resource.lastModified)) {
compiler.compile(path, using(Source.fromInputStream(resource.stream))(_.mkString))
} else halt(304)
}
}
}
private def compiledCodeFromFile(path: String, compilers: Seq[AssetCompiler]): Option[String] = {
// load content from real files
compilers.flatMap { c =>
val file = c.findRealFile(servletContext, basePath, path)
if (file.exists) Some((c, file)) else None
}.headOption.map {
case (compiler, file) =>
setLastModified(file.lastModified)
if (isModified(file.lastModified)) {
using(Source.fromFile(file))(code => compiler.compile(file.getPath, code.mkString))
} else halt(304)
}
}
}
/**
* AssetsController with default configurations.
*/
object AssetsController extends AssetsController with Routes {
// Unfortunately, *.* seems not to work.
val jsRootUrl = get(s"${jsRootPath}/*")(js).as('js)
val cssRootUrl = get(s"${cssRootPath}/*")(css).as('css)
}
| holycattle/skinny-framework | assets/src/main/scala/skinny/controller/AssetsController.scala | Scala | mit | 10,108 |
package sink
import akka.stream.scaladsl.Sink
import logic._
import logic.game._
import org.fusesource.jansi.{Ansi, AnsiConsole}
object GameStateSink {
private lazy val ansiState = Ansi.ansi()
private def handleGameState(gameState: GameState): Unit = gameState match {
case Running(foodPosition, snake, _, _, _) =>
drawRunningGameState(foodPosition, snake.segmentPositions)
case Paused(stateBeforePause) =>
handleGameState(stateBeforePause)
case Exited(_) =>
}
private def drawRunningGameState(foodPosition: Position, snakeSegmentPositions: Seq[Position]): Unit = {
AnsiConsole.systemInstall()
ansiState.eraseScreen()
drawObject('F',foodPosition)
snakeSegmentPositions.foreach(drawObject('*', _))
AnsiConsole.out.println(ansiState)
}
private def drawObject(objectCharacter: Char, position: Position) =
ansiState.cursor(position.y, position.x).a(objectCharacter)
def getGameStateSink = Sink.foreach(handleGameState)
} | margorczynski/stateless-snake | src/main/scala/sink/GameStateSink.scala | Scala | apache-2.0 | 988 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.cassandra.tools
import com.beust.jcommander.JCommander
import org.locationtech.geomesa.cassandra.tools.commands._
import org.locationtech.geomesa.cassandra.tools.export.CassandraExportCommand
import org.locationtech.geomesa.tools.status._
import org.locationtech.geomesa.tools.{Command, Runner}
object CassandraRunner extends Runner {
override val name: String = "geomesa-cassandra"
override def createCommands(jc: JCommander): Seq[Command] = Seq(
new CassandraGetTypeNamesCommand,
new CassandraDescribeSchemaCommand,
new HelpCommand(this, jc),
new EnvironmentCommand,
new VersionCommand,
new CassandraGetSftConfigCommand,
new CassandraCreateSchemaCommand,
new CassandraRemoveSchemaCommand,
new CassandraDeleteFeaturesCommand,
new CassandraIngestCommand,
new CassandraExportCommand,
new CassandraExplainCommand,
new ConfigureCommand,
new ClasspathCommand,
new ScalaConsoleCommand
)
override def environmentErrorInfo(): Option[String] = {
if (sys.env.get("CASSANDRA_HOME").isEmpty) {
Option("Warning: you have not set the CASSANDRA_HOME environment variable." +
"\\nGeoMesa tools will not run without the appropriate Cassandra jars on the classpath.")
} else { None }
}
}
| jahhulbert-ccri/geomesa | geomesa-cassandra/geomesa-cassandra-tools/src/main/scala/org/locationtech/geomesa/cassandra/tools/CassandraRunner.scala | Scala | apache-2.0 | 1,765 |
package sparcass
import java.util.Date
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql.{ClusteringColumn, ColumnDef, PartitionKeyColumn, TableDef, _}
import com.datastax.spark.connector.types.{TimestampParser, _}
import org.apache.log4j.Logger
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.cassandra.CassandraSQLContext
import org.apache.spark.{SparkConf, SparkContext}
/**
* Tool for importing GitHub log files into Cassandra
* Tool for importing GitHub log files into Cassandra
*/
object GitHubLogsImporter {
val logger = Logger.getLogger(GitHubLogsImporter.getClass)
def main(args: Array[String]) {
// Load basic properties from the arguments
val props: GitHubLogsProps = GitHubLogsArgsProps(args)
// Configures Spark.
val conf = new SparkConf(true)
.set("spark.cassandra.connection.host", props.cassandraHost)
// Connect to the Spark cluster
val sc = new SparkContext(conf)
//Create a Cassandra SQL context
val cqlContext = new CassandraSQLContext(sc)
// Create the keyspace and table using the manual connector.
// Normally this would already exist in the Cassandra cluster prior to the table creation, but for our purposes we create it here
CassandraConnector(conf).withSessionDo { session =>
session.execute(s"CREATE KEYSPACE IF NOT EXISTS ${props.cassandraKeyspace} WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 2 };")
// Yet another option for creating our table, using the manual connector
// session.execute(s"CREATE TABLE IF NOT EXISTS ${props.cassandraKeyspace}.${props.cassandraTable} (id bigint PRIMARY KEY, type text);")
}
logger.info("Reading json file(s) into Spark...")
// Get the data from the input json file and cache it, as we will be doing multiple saves
val importedRDD = cqlContext.read.json(props.inputFile).rdd.cache
val defaultTableExists = checkTableExists(sc, props.cassandraKeyspace, props.cassandraTable)
logger.info("[Create table and] Import data into the default table (primary key = id).")
if(defaultTableExists)
// We already have the table so just import the data
importLogsDefault(importedRDD, props.cassandraKeyspace, props.cassandraTable)
else
// We need to create the table first; let's do it the default way (first column will become the primary key)
createAndImportLogsDefault(importedRDD, props.cassandraKeyspace, props.cassandraTable)
logger.info("[Create table and] Import data into the better structure table.")
// Why "better table"? If it is better indeed depends on the use case, but I think for an hourly based time series
// this should do just fine
val betterTableName = props.cassandraTable + "_hourly_by_ts_id"
val betterTableExists = checkTableExists(sc, props.cassandraKeyspace, betterTableName)
if(betterTableExists)
// We already have the table so just import the data
importHourlyLogsBy_CreatedAt_Id(importedRDD, props.cassandraKeyspace, betterTableName)
else
// We need to create the table first; let's do it through the API for the fun of it
createAndImportHourlyLogsBy_CreatedAt_Id(importedRDD, props.cassandraKeyspace, betterTableName)
// TODO Add some progress report, catch exceptions....
logger.info("Ok, we're done.")
}
def checkTableExists(sc: SparkContext, keyspace: String, tablename: String) : Boolean =
sc.cassandraTable("system", "schema_columns").
where("keyspace_name=? and columnfamily_name=?", keyspace, tablename).
count > 0
def importLogsDefault(rdd: RDD[Row], keyspaceName: String, tableName: String)(implicit converter: (Row) => LogRecordForImport): Unit = {
// Map rdd into custom data structure and create table
val events = rdd.map(converter)
//TODO Figure out replication configuration
events.saveToCassandra(keyspaceName, tableName)
}
/**
* Import the events RDD (of rows) into the Cassandra database,
* with minimal setup (by default partition key will be the `type` and
* no clustering columns defined).
* This creates a bit of a problem, since the type is not a good primary key
*
* @param rdd
* @param keyspaceName
* @param tableName
* @param converter
* @return
*/
def createAndImportLogsDefault(rdd: RDD[Row], keyspaceName: String, tableName: String)(implicit converter: (Row) => LogRecordForImport): Unit = {
// Map rdd into custom data structure and create table
val events = rdd.map(converter)
//TODO Figure out replication configuration
events.saveAsCassandraTable(keyspaceName, tableName)
}
/**
* Import the events RDD (of rows) into the Cassandra database,
* using as partition key the UTC date and hour and as clustering columns
* the "created_at" and "ev_id".
* @param rdd
* @param keyspaceName
* @param tableName
* @param converter
* @return
*/
def importHourlyLogsBy_CreatedAt_Id(rdd: RDD[Row], keyspaceName: String, tableName: String)(implicit converter: (Row) => LogRecordForImport): Unit = {
// Map rdd into custom data structure and save the data
val events = rdd.map(converter)
events.saveToCassandra(keyspaceName, tableName,
SomeColumns("date_utc", "hour_utc",
"created_at", "ev_id",
"repo", "actor", "ev_type" ))
}
/**
* Create the proper table and import the events RDD (of rows) into the Cassandra database,
* using as partition key the UTC date and hour and as clustering columns
* the "created_at" and "ev_id".
*
*
* @param rdd
* @param keyspaceName
* @param tableName
* @param converter
* @return
*/
def createAndImportHourlyLogsBy_CreatedAt_Id(rdd: RDD[Row], keyspaceName: String, tableName: String)(implicit converter: (Row) => LogRecordForImport): Unit = {
logger.info(s"Save data into Cassandra $keyspaceName.$tableName ...")
logger.info("Partition key is date, hour and ev_type, actor, repo and id are clustering columns.")
// TODO Think about what is the best partition key/primary key combination
// Define columns
// Partitioning columns
val p1Col = new ColumnDef("date_utc", PartitionKeyColumn, VarCharType)
val p2Col = new ColumnDef("hour_utc", PartitionKeyColumn, IntType)
// Clustering columns
val c1Col = new ColumnDef("created_at", ClusteringColumn(0), TimestampType)
val c2Col = new ColumnDef("ev_id", ClusteringColumn(1), BigIntType)
// Regular columns
val r1Col = new ColumnDef("repo", RegularColumn, VarCharType)
val r2Col = new ColumnDef("actor", RegularColumn, VarCharType)
val r3Col = new ColumnDef("ev_type", RegularColumn, VarCharType)
// Create table definition
val table = TableDef(keyspaceName, tableName,
Seq(p1Col, p2Col), // Partitioning columns
Seq(c1Col, c2Col), // Clustering columns
Seq(r1Col, r2Col, r3Col)) // Regular columns
// Map rdd into custom data structure and create table
val events = rdd.map(converter)
events.saveAsCassandraTableEx(table,
SomeColumns("date_utc", "hour_utc",
"created_at", "ev_id",
"repo", "actor", "ev_type" ))
}
/**
* Define structure for the GitHub logs rdd data as it will be exported to Cassandra.
*
* It does not map the entire Json structure, just the parts considered relevant.
*
* To solve the time problem once and for all, the date and time will be UTC.
*
* @param ev_id
* @param created_at
* @param ev_type
* @param actor
* @param repo
*/
case class LogRecordForImport(ev_id: Long, created_at: Date, ev_type: String, actor: String, repo: String) {
import java.time.ZoneId
import java.time.format.DateTimeFormatter
private val utcDateTime = created_at.toInstant.atZone(ZoneId.of("UTC"))
val date_utc = utcDateTime.format(DateTimeFormatter.BASIC_ISO_DATE)
val hour_utc = utcDateTime.getHour
}
/**
* Implicit conversion between an SQL row and EventByTypeActorNameId
*
* Input row columns: Array(actor, created_at, id, org, payload, public, repo, type)
* Sample input row: [ [https://avatars.githubusercontent.com/u/9152315?, ,9152315,davidjhulse,https://api.github.com/users/davidjhulse],2015-01-01T00:00:00Z,2489368070,null,[null,86ffa724b4d70fce46e760f8cc080f5ec3d7d85f,null,WrappedArray([ [[email protected],davidjhulse],true,Altered BingBot.jar
* Fixed issue with multiple account support,a9b22a6d80c1e0bb49c1cf75a3c075b642c28f81,https://api.github.com/repos/davidjhulse/davesbingrewardsbot/commits/a9b22a6d80c1e0bb49c1cf75a3c075b642c28f81]),null,1,null,a9b22a6d80c1e0bb49c1cf75a3c075b642c28f81,null,null,null,null,null,null,536740396,null,refs/heads/master,null,null,1],true,[28635890,davidjhulse/davesbingrewardsbot,https://api.github.com/repos/davidjhulse/davesbingrewardsbot],PushEvent]
*
* @param row
* @return
*/
implicit def sqlRowToEventByTypeActorNameId(row: Row): LogRecordForImport =
LogRecordForImport(
ev_id = row(2).toString.toLong,
created_at = TimestampParser.parse(row(1).toString),
ev_type = row(7).toString,
actor = row(0).toString.split(",")(3),
repo = row(6).toString.split(",")(1))
}
| tupol/spark-learning | src/main/scala/sparcass/GitHubLogsImporter.scala | Scala | apache-2.0 | 9,282 |
package spire.random
package rng
import java.io._
class Device(f: File) extends Generator { self =>
if (!f.canRead)
throw new IllegalArgumentException("can't read %s" format f)
private var dis = new DataInputStream(new FileInputStream(f))
def copyInit: Generator = new Device(f)
def getSeedBytes(): Array[Byte] =
throw new UnsupportedOperationException("getSeedBytes")
def setSeedBytes(bytes: Array[Byte]): Unit =
throw new UnsupportedOperationException("setSeedBytes")
def nextInt(): Int = dis.readInt()
def nextLong(): Long = dis.readLong()
}
object Device {
def apply(path: String): Device = new Device(new File(path))
def random: Device = new Device(new File("/dev/random"))
def urandom: Device = new Device(new File("/dev/urandom"))
}
class CycledFile(f: File) extends Generator { self =>
private var dis: DataInputStream = null
if (!f.canRead)
throw new IllegalArgumentException("can't read %s" format f)
else
reinit()
try {
nextLong()
} catch {
case e: EOFException =>
throw new IllegalArgumentException("%s contains less than 8 bytes" format f)
}
def reinit(): Unit = {
if (dis != null) dis.close()
dis = new DataInputStream(new FileInputStream(f))
}
def copyInit: Generator = new CycledFile(f)
def getSeedBytes(): Array[Byte] =
throw new UnsupportedOperationException("getSeedBytes")
def setSeedBytes(bytes: Array[Byte]): Unit =
throw new UnsupportedOperationException("setSeedBytes")
def nextInt(): Int = try {
dis.readInt()
} catch {
case e: EOFException =>
reinit()
dis.readInt()
}
def nextLong(): Long = try {
dis.readLong()
} catch {
case e: EOFException =>
reinit()
dis.readInt()
}
}
object CycledFile {
def apply(path: String): CycledFile = new CycledFile(new File(path))
}
| AlecZorab/spire | core/src/main/scala/spire/random/rng/DevPrng.scala | Scala | mit | 1,855 |
package freetransformer
import scala.util.{Failure, Try}
trait Core {
def read(data: String): Try[List[String]]
def calculateA(el: Entity): (Entity, Either[String, EntityWithA])
def calculateB(el: Entity): (Entity, Either[String, EntityWithB])
def join(a: List[(Entity, EntityWithA)], b: List[(Entity, EntityWithB)]): List[Either[String, Result]]
def reject[E](data: List[(Entity, Either[String, E])]): List[(Entity, E)]
}
object Core {
def apply() = new CoreImpl
}
class CoreImpl extends Core {
def read(data: String): Try[List[String]] = if (data.isEmpty) Failure(new Exception("No items")) else Try(data.split(",").toList)
def calculateA(el: Entity): (Entity, Either[String, EntityWithA]) =
if (el.value.length > 2) {
println(s"CalculateA for ${el.value}")
(el, Right(EntityWithA(el.value, "A")))
}
else (el, Left("length need to be greater than 2"))
def calculateB(el: Entity): (Entity, Either[String, EntityWithB]) =
if (el.value.length > 2) {
println(s"CalculateB for ${el.value}")
(el, Right(EntityWithB(el.value, "B")))
}
else (el, Left("length need to be greater than 2"))
def reject[E](data: List[(Entity, Either[String, E])]): List[(Entity, E)] = {
data.filter(_._2.isLeft).foreach(el => println(s"Rejected ${el._1} entity because: ${el._2.left.get}"))
data.filter(_._2.isRight).map(el => (el._1, el._2.right.get))
}
def join(a: List[(Entity, EntityWithA)], b: List[(Entity, EntityWithB)]): List[Either[String, Result]] = {
a.map { case (entity, ea) =>
b.find(_._1 == entity).fold[Either[String, Result]](Left("Err")) { case (_, eb) =>
println(s"Valid result: ${entity.value} a: ${ea.a}, b: ${eb.b}")
Right(Result(entity.value, ea.a, eb.b))
}
}
}
}
| mateuszjancy/intro-to-scala | workshop07/FreeTransformer/src/main/scala/freetransformer/Core.scala | Scala | apache-2.0 | 1,790 |
package org.singingwizard.screeps.ai
import org.singingwizard.screeps.api._
import org.singingwizard.screeps.api.ScreepsContext._
import prickle.CompositePickler
import prickle.PicklerPair
import org.singingwizard.screeps.ai.tasks._
trait Task {
/** Execute the task.
*/
def run()(implicit ctx: AIContext): Unit
def reschedule()(implicit ctx: AIContext) = {
ctx.schedule(this)
}
def fail(msg: String) = {
Console.log(s"Task '$this' failed with: $msg")
}
}
object Task {
implicit val taskPickler = Task.pickler(GetEnergy, SpawnCreep, TakeEnergyTo)
def pickler(taskTypes: TaskCompanion*): PicklerPair[Task] = {
taskTypes.foldRight(CompositePickler[Task])(_.register(_))
}
}
trait TaskCompanion {
def register(pickler: PicklerPair[Task]): PicklerPair[Task]
}
trait TaskWithContinuation extends Task {
def continuation: TraversableOnce[Task]
def finish()(implicit ctx: AIContext) = {
continuation.map(ctx.schedule(_))
}
} | arthurp/amps-screeps | src/main/scala/org/singingwizard/screeps/ai/Task.scala | Scala | gpl-3.0 | 982 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.execution
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.execution.streaming.state._
import org.apache.spark.sql.execution.SparkPlan
/** Used to identify the state store for a given operator. */
case class OperatorStateId(
checkpointLocation: String,
operatorId: Long,
batchId: Long)
/**
* An operator that saves or restores state from the [[StateStore]]. The [[OperatorStateId]] should
* be filled in by `prepareForExecution` in [[IncrementalExecution]].
*/
trait StatefulOperator extends SparkPlan {
def stateId: Option[OperatorStateId]
protected def getStateId: OperatorStateId = attachTree(this) {
stateId.getOrElse {
throw new IllegalStateException("State location not present for execution")
}
}
}
/**
* For each input tuple, the key is calculated and the value from the [[StateStore]] is added
* to the stream (in addition to the input tuple) if present.
*/
case class StateStoreRestoreExec(
keyExpressions: Seq[Attribute],
stateId: Option[OperatorStateId],
child: SparkPlan)
extends execution.UnaryExecNode with StatefulOperator {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override protected def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
child.execute().mapPartitionsWithStateStore(
getStateId.checkpointLocation,
operatorId = getStateId.operatorId,
storeVersion = getStateId.batchId,
keyExpressions.toStructType,
child.output.toStructType,
sqlContext.sessionState,
Some(sqlContext.streams.stateStoreCoordinator)) { case (store, iter) =>
val getKey = GenerateUnsafeProjection.generate(keyExpressions, child.output)
iter.flatMap { row =>
val key = getKey(row)
val savedState = store.get(key)
numOutputRows += 1
row +: savedState.toSeq
}
}
}
override def output: Seq[Attribute] = child.output
}
/**
* For each input tuple, the key is calculated and the tuple is `put` into the [[StateStore]].
*/
case class StateStoreSaveExec(
keyExpressions: Seq[Attribute],
stateId: Option[OperatorStateId],
returnAllStates: Option[Boolean],
child: SparkPlan)
extends execution.UnaryExecNode with StatefulOperator {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
"numTotalStateRows" -> SQLMetrics.createMetric(sparkContext, "number of total state rows"),
"numUpdatedStateRows" -> SQLMetrics.createMetric(sparkContext, "number of updated state rows"))
override protected def doExecute(): RDD[InternalRow] = {
metrics // force lazy init at driver
assert(returnAllStates.nonEmpty,
"Incorrect planning in IncrementalExecution, returnAllStates have not been set")
val saveAndReturnFunc = if (returnAllStates.get) saveAndReturnAll _ else saveAndReturnUpdated _
child.execute().mapPartitionsWithStateStore(
getStateId.checkpointLocation,
operatorId = getStateId.operatorId,
storeVersion = getStateId.batchId,
keyExpressions.toStructType,
child.output.toStructType,
sqlContext.sessionState,
Some(sqlContext.streams.stateStoreCoordinator)
)(saveAndReturnFunc)
}
override def output: Seq[Attribute] = child.output
/**
* Save all the rows to the state store, and return all the rows in the state store.
* Note that this returns an iterator that pipelines the saving to store with downstream
* processing.
*/
private def saveAndReturnUpdated(
store: StateStore,
iter: Iterator[InternalRow]): Iterator[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
val numTotalStateRows = longMetric("numTotalStateRows")
val numUpdatedStateRows = longMetric("numUpdatedStateRows")
new Iterator[InternalRow] {
private[this] val baseIterator = iter
private[this] val getKey = GenerateUnsafeProjection.generate(keyExpressions, child.output)
override def hasNext: Boolean = {
if (!baseIterator.hasNext) {
store.commit()
numTotalStateRows += store.numKeys()
false
} else {
true
}
}
override def next(): InternalRow = {
val row = baseIterator.next().asInstanceOf[UnsafeRow]
val key = getKey(row)
store.put(key.copy(), row.copy())
numOutputRows += 1
numUpdatedStateRows += 1
row
}
}
}
/**
* Save all the rows to the state store, and return all the rows in the state store.
* Note that the saving to store is blocking; only after all the rows have been saved
* is the iterator on the update store data is generated.
*/
private def saveAndReturnAll(
store: StateStore,
iter: Iterator[InternalRow]): Iterator[InternalRow] = {
val getKey = GenerateUnsafeProjection.generate(keyExpressions, child.output)
val numOutputRows = longMetric("numOutputRows")
val numTotalStateRows = longMetric("numTotalStateRows")
val numUpdatedStateRows = longMetric("numUpdatedStateRows")
while (iter.hasNext) {
val row = iter.next().asInstanceOf[UnsafeRow]
val key = getKey(row)
store.put(key.copy(), row.copy())
numUpdatedStateRows += 1
}
store.commit()
numTotalStateRows += store.numKeys()
store.iterator().map { case (k, v) =>
numOutputRows += 1
v.asInstanceOf[InternalRow]
}
}
}
| gioenn/xSpark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StatefulAggregate.scala | Scala | apache-2.0 | 6,704 |
package notification.models
import java.util.UUID
import models.NotificationReport
import notification.services.SenderError
import play.api.libs.json._
import tracking.RepositoryError
case class PushResult(
id: UUID,
reportingError: Option[String] = None,
rejectedNotifications: Option[List[String]] = None
) {
def withRejected(rejected: List[SenderError]) = copy(rejectedNotifications = Some(rejected map { _.toString }))
def withReportingError(error: RepositoryError) = copy(reportingError = Some(error.message))
}
object PushResult {
def fromReport(report: NotificationReport): PushResult = PushResult(report.notification.id)
implicit val jf = Json.format[PushResult]
}
| guardian/mobile-n10n | notification/app/notification/models/PushResult.scala | Scala | apache-2.0 | 693 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.table.planner.delegation.StreamPlanner
import org.apache.flink.table.planner.plan.metadata.FlinkRelMetadataQuery
import org.apache.flink.table.planner.plan.nodes.physical.stream._
import org.apache.calcite.plan.hep.HepRelVertex
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.rel.{RelNode, RelVisitor}
import scala.collection.JavaConversions._
object UpdatingPlanChecker {
/** Validates that the plan produces only append changes. */
def isAppendOnly(plan: RelNode): Boolean = {
val appendOnlyValidator = new AppendOnlyValidator
appendOnlyValidator.go(plan)
appendOnlyValidator.isAppendOnly
}
/** Extracts the unique keys of the table produced by the plan. */
def getUniqueKeyFields(relNode: RelNode, planner: StreamPlanner): Option[Array[Array[String]]] = {
val rowType = relNode.getRowType
val fmq = FlinkRelMetadataQuery.reuseOrCreate(planner.getRelBuilder.getCluster.getMetadataQuery)
val uniqueKeys = fmq.getUniqueKeys(relNode)
if (uniqueKeys != null && uniqueKeys.size() > 0) {
Some(uniqueKeys.filter(_.nonEmpty).map(_.toArray.map(rowType.getFieldNames.get)).toArray)
} else {
None
}
}
private class AppendOnlyValidator extends RelVisitor {
var isAppendOnly = true
override def visit(node: RelNode, ordinal: Int, parent: RelNode): Unit = {
node match {
case s: StreamPhysicalRel if s.producesUpdates || s.producesRetractions =>
isAppendOnly = false
case hep: HepRelVertex =>
visit(hep.getCurrentRel, ordinal, parent) //remove wrapper node
case rs: RelSubset =>
visit(rs.getOriginal, ordinal, parent) //remove wrapper node
case _ =>
super.visit(node, ordinal, parent)
}
}
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/utils/UpdatingPlanChecker.scala | Scala | apache-2.0 | 2,665 |
package com.twitter.finagle.http
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.finagle.builder.ClientConfig
import com.twitter.finagle.client.StackClient
import com.twitter.finagle.http.service.HttpResponseClassifier
import com.twitter.finagle.param.{Tracer => TracerParam}
import com.twitter.finagle.service.ResponseClassifier
import com.twitter.finagle.Filter
import com.twitter.finagle.Name
import com.twitter.finagle.Resolver
import com.twitter.finagle.Service
import com.twitter.finagle.client
import com.twitter.util.Duration
import com.twitter.util.tunable.Tunable
import com.twitter.{finagle => ctf}
object MethodBuilder {
/**
* Create a [[MethodBuilder]] for a given destination.
*
* Note that metrics will be scoped (e.g. "clnt/your_client_label/method_name").
*
* The value for "your_client_label" is taken from the `withLabel` setting
* (from [[com.twitter.finagle.param.Label]]). If that is not set, `dest` is used.
* The value for "method_name" is set when an method-specific client
* is constructed, as in [[MethodBuilder.newService(String)]].
*
* @param dest where requests are dispatched to.
* See the [[https://twitter.github.io/finagle/guide/Names.html user guide]]
* for details on destination names.
*
* @see [[com.twitter.finagle.Http.Client.methodBuilder(String)]]
*/
def from(dest: String, stackClient: StackClient[Request, Response]): MethodBuilder =
from(Resolver.eval(dest), stackClient)
/**
* Create a [[MethodBuilder]] for a given destination.
*
* Note that metrics will be scoped (e.g. "clnt/your_client_label/method_name").
*
* The value for "your_client_label" is taken from the `withLabel` setting
* (from [[com.twitter.finagle.param.Label]]). If that is not set, `dest` is used.
* The value for "method_name" is set when an method-specific client
* is constructed, as in [[MethodBuilder.newService(String)]].
*
* @param dest where requests are dispatched to.
* See the [[https://twitter.github.io/finagle/guide/Names.html user guide]]
* for details on destination names.
*
* @see [[com.twitter.finagle.Http.Client.methodBuilder(Name)]]
*/
def from(dest: Name, stackClient: StackClient[Request, Response]): MethodBuilder = {
val initializer = HttpClientTraceInitializer.typeAgnostic(
stackClient.params[TracerParam].tracer
)
val mb = client.MethodBuilder
.from(dest, stackClient)
.withTraceInitializer(initializer)
new MethodBuilder(mb)
}
/**
* '''NOTE:''' Prefer using [[ctf.Http.Client.methodBuilder]] over using
* this approach to construction. The functionality is available through
* [[ctf.Http.Client]] and [[MethodBuilder]] while addressing the various issues
* of [[ClientBuilder]].
*
* Creates a [[MethodBuilder]] from the given [[ClientBuilder]].
*
* Note that metrics will be scoped (e.g. "clnt/clientbuilders_name/method_name").
*
* The value for "clientbuilders_name" is taken from the [[ClientBuilder.name]]
* configuration, using "client" if unspecified.
* The value for "method_name" is set when an method-specific client
* is constructed, as in [[MethodBuilder.newService]].
*
* - The [[ClientBuilder.timeout]] configuration will be used as the default
* value for [[MethodBuilder.withTimeoutTotal]].
*
* - The [[ClientBuilder.requestTimeout]] configuration will be used as the
* default value for [[MethodBuilder.withTimeoutPerRequest]].
*
* - The [[ClientBuilder]] must have been constructed using
* [[ClientBuilder.stack]] passing an instance of a [[ctf.Http.Client]].
*
* - The [[ClientBuilder]] metrics scoped to "tries" are not included
* as they are superseded by metrics scoped to "logical".
*
* - The [[ClientBuilder]] retry policy will not be applied and must
* be migrated to using [[MethodBuilder.withRetryForClassifier]].
*
* @see [[https://twitter.github.io/finagle/guide/Clients.html#migrating-from-clientbuilder user guide]]
*/
def from(
clientBuilder: ClientBuilder[Request, Response, ClientConfig.Yes, _, _]
): MethodBuilder = {
if (!clientBuilder.params.contains[ClientConfig.DestName])
throw new IllegalArgumentException("ClientBuilder must be configured with a dest")
val dest = clientBuilder.params[ClientConfig.DestName].name
val client = clientBuilder.client.asInstanceOf[ctf.Http.Client]
from(dest, client)
}
}
/**
* `MethodBuilder` is a collection of APIs for client configuration at
* a higher level than the Finagle 6 APIs while improving upon the deprecated
* [[ClientBuilder]]. `MethodBuilder` provides:
*
* - Logical success rate metrics.
* - Retries based on application-level requests and responses (e.g. an HTTP 503 response code).
* - Configuration of per-attempt and total timeouts.
*
* All of these can be customized per method (or endpoint) while sharing a single
* underlying Finagle client. Concretely, a single service might offer both
* `GET statuses/show/:id` as well as `POST statuses/update`, whilst each having
* wildly different characteristics. The `GET` is idempotent and has a tight latency
* distribution while the `POST` is not idempotent and has a wide latency
* distribution. If users want different configurations, without `MethodBuilder`
* they must create separate Finagle clients for each grouping. While long-lived
* clients in Finagle are not expensive, they are not free. They create
* duplicate metrics and waste heap, file descriptors, and CPU.
*
* = Example =
*
* A client that has timeouts and retries on a 418 status code.
* {{{
* import com.twitter.conversions.DurationOps._
* import com.twitter.finagle.Http
* import com.twitter.finagle.service.{ReqRep, ResponseClass}
* import com.twitter.util.Return
*
* val client: Http.Client = ???
* client.methodBuilder("inet!example.com:80")
* .withTimeoutPerRequest(50.milliseconds)
* .withTimeoutTotal(100.milliseconds)
* .withRetryForClassifier {
* case ReqRep(_, Return(rep)) if rep.statusCode == 418 => ResponseClass.RetryableFailure
* }
* .newService("an_endpoint_name")
* }}}
*
* = Timeouts =
*
* Defaults to using the StackClient's configuration.
*
* An example of setting a per-request timeout of 50 milliseconds and a total
* timeout of 100 milliseconds:
* {{{
* import com.twitter.conversions.DurationOps._
* import com.twitter.finagle.Http
* import com.twitter.finagle.http.MethodBuilder
*
* val builder: MethodBuilder = ???
* builder
* .withTimeoutPerRequest(50.milliseconds)
* .withTimeoutTotal(100.milliseconds)
* }}}
*
* = Retries =
*
* Retries are intended to help clients improve success rate by trying
* failed requests additional times. Care must be taken by developers
* to only retry when it is known to be safe to issue the request multiple
* times. This is because the client cannot always be sure what the
* backend service has done. An example of a request that is safe to
* retry would be a read-only request.
*
* Defaults to using the client's [[ResponseClassifier]] to retry failures
* [[com.twitter.finagle.service.ResponseClass.RetryableFailure marked as retryable]].
* See [[withRetryForClassifier]] for details.
*
* A [[com.twitter.finagle.service.RetryBudget]] is used to prevent retries from overwhelming
* the backend service. The budget is shared across clients created from
* an initial `MethodBuilder`. As such, even if the retry rules
* deem the request retryable, it may not be retried if there is insufficient
* budget.
*
* Finagle will automatically retry failures that are known to be safe
* to retry via [[com.twitter.finagle.service.RequeueFilter]]. This includes
* [[com.twitter.finagle.WriteException WriteExceptions]] and
* [[com.twitter.finagle.FailureFlags.Retryable retryable nacks]]. As these should have
* already been retried, we avoid retrying them again by ignoring them at this layer.
*
* Additional information regarding retries can be found in the
* [[https://twitter.github.io/finagle/guide/Clients.html#retries user guide]].
*
* The classifier is also used to determine the logical success metrics of
* the method. Logical here means after any retries are run. For example
* should a request result in retryable failure on the first attempt, but
* succeed upon retry, this is exposed through metrics as a success.
* Logical success rate metrics are scoped to
* "clnt/your_client_label/method_name/logical" and get "success" and
* "requests" counters along with a "request_latency_ms" stat.
*
* Unsuccessful requests are logged at `com.twitter.logging.Level.DEBUG` level.
* Further details, including the request and response, are available at
* `TRACE` level.
*
* @see [[com.twitter.finagle.Http.Client.methodBuilder]] to construct instances.
*
* @see The [[https://twitter.github.io/finagle/guide/MethodBuilder.html user guide]].
*/
class MethodBuilder private (mb: client.MethodBuilder[Request, Response])
extends client.BaseMethodBuilder[MethodBuilder] {
def withTimeoutTotal(howLong: Duration): MethodBuilder =
new MethodBuilder(mb.withTimeout.total(howLong))
def withTimeoutTotal(howLong: Tunable[Duration]): MethodBuilder =
new MethodBuilder(mb.withTimeout.total(howLong))
def withTimeoutPerRequest(howLong: Duration): MethodBuilder =
new MethodBuilder(mb.withTimeout.perRequest(howLong))
def withTimeoutPerRequest(howLong: Tunable[Duration]): MethodBuilder =
new MethodBuilder(mb.withTimeout.perRequest(howLong))
def withTraceInitializer(initializer: Filter.TypeAgnostic): MethodBuilder =
new MethodBuilder(mb.withTraceInitializer(initializer))
def withRetryForClassifier(classifier: ResponseClassifier): MethodBuilder =
new MethodBuilder(mb.withRetry.forClassifier(classifier))
def withMaxRetries(value: Int): MethodBuilder =
new MethodBuilder(mb.withRetry.maxRetries(value))
def withRetryDisabled: MethodBuilder =
new MethodBuilder(mb.withRetry.disabled)
/**
* @inheritdoc
*
* This additionally causes any server error HTTP status codes (500s) to be retried.
*/
def idempotent(maxExtraLoad: Double): MethodBuilder =
new MethodBuilder(
mb.idempotent(
maxExtraLoad,
sendInterrupts = false,
HttpResponseClassifier.ServerErrorsAsFailures
)
)
/**
* @inheritdoc
*
* This additionally causes any server error HTTP status codes (500s) to be retried.
*/
def idempotent(maxExtraLoad: Double, minSendBackupAfterMs: Int): MethodBuilder =
new MethodBuilder(
mb.idempotent(
maxExtraLoad,
sendInterrupts = false,
minSendBackupAfterMs,
HttpResponseClassifier.ServerErrorsAsFailures
)
)
/**
* @inheritdoc
*
* This additionally causes any server error HTTP status codes (500s) to be retried.
*/
def idempotent(maxExtraLoad: Tunable[Double]): MethodBuilder =
new MethodBuilder(
mb.idempotent(
maxExtraLoad,
sendInterrupts = false,
HttpResponseClassifier.ServerErrorsAsFailures
)
)
/**
* @inheritdoc
*
* This additionally causes any server error HTTP status codes (500s) to be retried.
*/
def idempotent(maxExtraLoad: Tunable[Double], minSendBackupAfterMs: Int): MethodBuilder =
new MethodBuilder(
mb.idempotent(
maxExtraLoad,
sendInterrupts = false,
minSendBackupAfterMs,
HttpResponseClassifier.ServerErrorsAsFailures
)
)
def nonIdempotent: MethodBuilder =
new MethodBuilder(mb.nonIdempotent)
/**
* Construct a [[Service]] to be used for the `methodName` method.
*
* @param methodName used for scoping metrics (e.g. "clnt/your_client_label/method_name").
*/
def newService(methodName: String): Service[Request, Response] =
mb.newService(methodName)
/**
* Construct a [[Service]] to be used for the client.
*/
def newService: Service[Request, Response] =
mb.newService
override def toString: String = mb.toString
}
| twitter/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/MethodBuilder.scala | Scala | apache-2.0 | 12,161 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.tasty
import scala.collection.mutable
/**A static type representing a bitset of modes that affect the interpretation of a TASTy file,
* such as distinguishing between reading the parents of a class, or an annotation tree.
*/
object TastyModes {
final val EmptyTastyMode: TastyMode = TastyMode(0)
/** When reading the parents of a class template */
final val ReadParents: TastyMode = TastyMode(1 << 0)
/** When reading trees of an annotation */
final val ReadAnnotation: TastyMode = TastyMode(1 << 1)
/** When reading the outermost tree of an term */
final val OuterTerm: TastyMode = TastyMode(1 << 2)
/** When reading statements in a sequence */
final val IndexStats: TastyMode = TastyMode(1 << 3)
/** When reading a macro definition body */
final val ReadMacro: TastyMode = TastyMode(1 << 4)
/** When not at the package scope */
final val InnerScope: TastyMode = TastyMode(1 << 5)
/** When reading the tree of an Opaque type */
final val OpaqueTypeDef: TastyMode = TastyMode(1 << 6)
/** The union of `IndexStats` and `InnerScope` */
final val IndexScopedStats: TastyMode = IndexStats | InnerScope
case class TastyMode(val toInt: Int) extends AnyVal { mode =>
def |(other: TastyMode): TastyMode = TastyMode(toInt | other.toInt)
def &(mask: TastyMode): TastyMode = TastyMode(toInt & mask.toInt)
def &~(mask: TastyMode): TastyMode = TastyMode(toInt & ~mask.toInt)
def is(mask: TastyMode): Boolean = (this & mask) == mask
def isOneOf(mask: TastyMode): Boolean = (this & mask).nonEmpty
def nonEmpty: Boolean = toInt != 0
def debug: String = {
if (mode == EmptyTastyMode) "EmptyTastyMode"
else {
val sb = mutable.ArrayBuffer.empty[String]
if (mode.is(ReadParents)) sb += "ReadParents"
if (mode.is(ReadAnnotation)) sb += "ReadAnnotation"
if (mode.is(OuterTerm)) sb += "OuterTerm"
if (mode.is(IndexStats)) sb += "IndexStats"
if (mode.is(ReadMacro)) sb += "ReadMacro"
if (mode.is(InnerScope)) sb += "InnerScope"
if (mode.is(OpaqueTypeDef)) sb += "OpaqueTypeDef"
sb.mkString(" | ")
}
}
}
}
| lrytz/scala | src/compiler/scala/tools/nsc/tasty/TastyModes.scala | Scala | apache-2.0 | 2,519 |
/*
* Copyright (C) 2012 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.domain
import java.math.{ BigDecimal, MathContext, RoundingMode }
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
import org.openmole.core.tools.math.BigDecimalOperations
package object range {
val scale = 128
private val mc = new MathContext(scale, RoundingMode.HALF_UP)
trait Log[T] {
def log(t: T): T
def exp(t: T): T
}
implicit lazy val doubleLog =
new Log[Double] {
def log(t: Double) = math.log(t)
def exp(t: Double) = math.exp(t)
}
implicit lazy val bigDecimalLog =
new Log[BigDecimal] {
def log(t: BigDecimal) = BigDecimalOperations.ln(t, scale)
def exp(t: BigDecimal) = BigDecimalOperations.exp(t, scale).setScale(scale, RoundingMode.HALF_UP).round(mc)
}
implicit class RangeDomainDecorator[T](r: RangeDomain[T]) {
def step(s: FromContext[T]) = StepRangeDomain[T](r, s)
def by(s: FromContext[T]) = step(s)
def size(s: FromContext[Int]) = SizeRangeDomain[T](r, s)
def logSteps(s: FromContext[Int])(implicit l: Log[T]) = LogRangeDomain[T](r, s)
}
trait DefaultStep[T] {
def step: T
}
implicit def defaultStepInt = new DefaultStep[Int] { def step = 1 }
implicit def defaultStepLong = new DefaultStep[Long] { def step = 1 }
object RangeValue {
implicit def fractionalIsRangeValue[T](implicit fractional: Fractional[T]) = new RangeValue[T] {
override def div(t1: T, t2: T): T = fractional.div(t1, t2)
override def plus(t1: T, t2: T): T = fractional.plus(t1, t2)
override def toInt(t: T): Int = fractional.toInt(t)
override def mult(t1: T, t2: T): T = fractional.times(t1, t2)
override def fromInt(i: Int): T = fractional.fromInt(i)
override def minus(t1: T, t2: T): T = fractional.minus(t1, t2)
}
implicit def integralIsRangeValue[T](implicit integral: Integral[T]) = new RangeValue[T] {
override def div(t1: T, t2: T): T = integral.quot(t1, t2)
override def plus(t1: T, t2: T): T = integral.plus(t1, t2)
override def toInt(t: T): Int = integral.toInt(t)
override def mult(t1: T, t2: T): T = integral.times(t1, t2)
override def fromInt(i: Int): T = integral.fromInt(i)
override def minus(t1: T, t2: T): T = integral.minus(t1, t2)
}
}
trait RangeValue[T] { v ⇒
def div(t1: T, t2: T): T
def mult(t1: T, t2: T): T
def plus(t1: T, t2: T): T
def minus(t1: T, t2: T): T
def fromInt(i: Int): T
def toInt(t: T): Int
implicit class ops(lhs: T) {
def +(rhs: T) = plus(lhs, rhs)
def -(rhs: T) = minus(lhs, rhs)
def /(rhs: T) = div(lhs, rhs)
def *(rhs: T) = mult(lhs, rhs)
def toInt = v.toInt(lhs)
}
}
object IsRangeDomain {
import org.openmole.tool.collection.DoubleRange
implicit def doubleRangeIsRange: IsRangeDomain[DoubleRange, Double] = (range: DoubleRange) ⇒ RangeDomain(range.low, range.high)
implicit def intRangeIsRange: IsRangeDomain[scala.Range, Int] = (range: scala.Range) ⇒ RangeDomain(range.min, range.max)
implicit def intRangeIsRangeDouble: IsRangeDomain[scala.Range, Double] = (range: scala.Range) ⇒ RangeDomain(range.min.toDouble, range.max.toDouble)
}
trait IsRangeDomain[-D, T] {
def apply(t: D): RangeDomain[T]
}
implicit def isRangeDomainIsBounded[D, T](implicit isRangeDomain: IsRangeDomain[D, T]): BoundedFromContextDomain[D, T] = domain ⇒
Domain(
(isRangeDomain(domain).min, isRangeDomain(domain).max),
isRangeDomain(domain).inputs,
isRangeDomain(domain).validate
)
implicit def isRangeDomainHasCenter[D, T](implicit isRangeDomain: IsRangeDomain[D, T]): DomainCenterFromContext[D, T] = domain ⇒ RangeDomain.rangeCenter(isRangeDomain(domain))
@deprecated("Use RangeDomain", "13")
def Range = RangeDomain
@deprecated("Use LogRangeDomain", "13")
def LogRange = LogRangeDomain
@deprecated("Use SizeRangeDomain", "13")
def SizeRange = SizeRangeDomain
@deprecated("Use StepRangeDomain", "13")
def StepRange = StepRangeDomain
} | openmole/openmole | openmole/plugins/org.openmole.plugin.domain.range/src/main/scala/org/openmole/plugin/domain/range/package.scala | Scala | agpl-3.0 | 4,766 |
package com.datawizards.dqm.validator
import java.sql.Date
import com.datawizards.dqm.configuration.{GroupByConfiguration, TableConfiguration, ValidationContext}
import com.datawizards.dqm.configuration.location.StaticTableLocation
import com.datawizards.dqm.filter.FilterByYearMonthDayColumns
import com.datawizards.dqm.mocks.{EmptyHistoryStatisticsReader, StaticHistoryStatisticsReader}
import com.datawizards.dqm.result._
import com.datawizards.dqm.rules.field.NotNullRule
import com.datawizards.dqm.rules.group.NotEmptyGroups
import com.datawizards.dqm.rules.trend.CurrentVsPreviousDayRowCountIncrease
import com.datawizards.dqm.rules.{FieldRules, TableRules}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FunSuite, Matchers}
@RunWith(classOf[JUnitRunner])
class DataValidatorTest extends FunSuite with Matchers {
lazy val spark: SparkSession = SparkSession.builder().master("local").getOrCreate()
test("Validate records - simple") {
val schema = StructType(Seq(
StructField("f1", StringType),
StructField("f2", StringType),
StructField("f3", StringType),
StructField("year", IntegerType),
StructField("month", IntegerType),
StructField("day", IntegerType)
))
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("r1.f1", "r1.f2", null, 2000, 1, 2), // processing date
Row("r2.f1", null, "r2.f3", 2000, 1, 2), // processing date
Row(null, "r3.f2", "r3.f3", 2000, 1, 2), // processing date
Row(null, "r3.f2", "r3.f3", 2000, 1, 3) // not processing date
)), schema)
val processingDate = Date.valueOf("2000-01-02")
val input = StaticTableLocation(df, "table")
val result = DataValidator.validate(
TableConfiguration(
input,
TableRules(Seq(
FieldRules(
field = "f2",
rules = Seq(
NotNullRule
)
))),
Some(FilterByYearMonthDayColumns)
),
ValidationContext("table", processingDate),
EmptyHistoryStatisticsReader
)
result should equal(ValidationResult(
invalidRecords = Seq(
InvalidRecord(
tableName = "table",
columnName = "f2",
row = """{"f1" : "r2.f1", "year" : 2000, "f3" : "r2.f3", "day" : 2, "month" : 1, "f2" : "null"}""",
value = "null",
rule = "NOT NULL",
year = 2000,
month = 1,
day = 2
)
),
tableStatistics = TableStatistics(
tableName = "table",
rowsCount = 3,
columnsCount = 6,
year = 2000,
month = 1,
day = 2
),
columnsStatistics = Seq(
ColumnStatistics(
tableName = "table",
columnName = "f1",
columnType = "StringType",
notMissingCount = 2L,
rowsCount = 3L,
percentageNotMissing = Some(2.0/3.0),
year = 2000,
month = 1,
day = 2
),
ColumnStatistics(
tableName = "table",
columnName = "f2",
columnType = "StringType",
notMissingCount = 2L,
rowsCount = 3L,
percentageNotMissing = Some(2.0/3.0),
year = 2000,
month = 1,
day = 2
),
ColumnStatistics(
tableName = "table",
columnName = "f3",
columnType = "StringType",
notMissingCount = 2L,
rowsCount = 3L,
percentageNotMissing = Some(2.0/3.0),
year = 2000,
month = 1,
day = 2
),
ColumnStatistics(
tableName = "table",
columnName = "year",
columnType = "IntegerType",
notMissingCount = 3L,
rowsCount = 3L,
percentageNotMissing = Some(3.0/3.0),
min = Some(2000.0),
max = Some(2000.0),
avg = Some(2000.0),
stddev = Some(0.0),
year = 2000,
month = 1,
day = 2
),
ColumnStatistics(
tableName = "table",
columnName = "month",
columnType = "IntegerType",
notMissingCount = 3L,
rowsCount = 3L,
percentageNotMissing = Some(3.0/3.0),
min = Some(1.0),
max = Some(1.0),
avg = Some(1.0),
stddev = Some(0.0),
year = 2000,
month = 1,
day = 2
),
ColumnStatistics(
tableName = "table",
columnName = "day",
columnType = "IntegerType",
notMissingCount = 3L,
rowsCount = 3L,
percentageNotMissing = Some(3.0/3.0),
min = Some(2.0),
max = Some(2.0),
avg = Some(2.0),
stddev = Some(0.0),
year = 2000,
month = 1,
day = 2
)
)
))
}
test("Validate records - group statistics") {
val schema = StructType(Seq(
StructField("country", StringType)
))
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("country1"),
Row("country1"),
Row("country1"),
Row("country2"),
Row("country2"),
Row("country3")
)), schema)
val processingDate = Date.valueOf("2000-01-02")
val input = StaticTableLocation(df, "table")
val result = DataValidator.validate(
TableConfiguration(
location = input,
rules = TableRules(Seq.empty),
groups = Seq(GroupByConfiguration("COUNTRY", "country"))
),
ValidationContext("table", processingDate),
EmptyHistoryStatisticsReader
)
result.copy(groupByStatisticsList = result.groupByStatisticsList.sortBy(_.groupByFieldValue)) should equal(ValidationResult(
invalidRecords = Seq.empty,
tableStatistics = TableStatistics(
tableName = "table",
rowsCount = 6,
columnsCount = 1,
year = 2000,
month = 1,
day = 2
),
columnsStatistics = Seq(
ColumnStatistics(
tableName = "table",
columnName = "country",
columnType = "StringType",
notMissingCount = 6L,
rowsCount = 6L,
percentageNotMissing = Some(6.0/6.0),
year = 2000,
month = 1,
day = 2
)
),
groupByStatisticsList = Seq(
GroupByStatistics(
tableName = "table",
groupName = "COUNTRY",
groupByFieldValue = "country1",
rowsCount = 3,
year = 2000,
month = 1,
day = 2
),
GroupByStatistics(
tableName = "table",
groupName = "COUNTRY",
groupByFieldValue = "country2",
rowsCount = 2,
year = 2000,
month = 1,
day = 2
),
GroupByStatistics(
tableName = "table",
groupName = "COUNTRY",
groupByFieldValue = "country3",
rowsCount = 1,
year = 2000,
month = 1,
day = 2
)
)
))
}
test("Validate records - group validation rules") {
val schema = StructType(Seq(
StructField("country", StringType)
))
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("country1"),
Row("country1"),
Row("country1"),
Row("country2"),
Row("country2")
)), schema)
val processingDate = Date.valueOf("2000-01-02")
val input = StaticTableLocation(df, "table")
val result = DataValidator.validate(
TableConfiguration(
location = input,
rules = TableRules(Seq.empty),
groups = Seq(GroupByConfiguration("COUNTRY", "country", Seq(NotEmptyGroups(Seq("country1","country2","country3")))))
),
ValidationContext("table", processingDate),
EmptyHistoryStatisticsReader
)
val expectedValidationResult = ValidationResult(
invalidRecords = Seq.empty,
tableStatistics = TableStatistics(
tableName = "table",
rowsCount = 5,
columnsCount = 1,
year = 2000,
month = 1,
day = 2
),
columnsStatistics = Seq(
ColumnStatistics(
tableName = "table",
columnName = "country",
columnType = "StringType",
notMissingCount = 5L,
rowsCount = 5L,
percentageNotMissing = Some(5.0/5.0),
year = 2000,
month = 1,
day = 2
)
),
groupByStatisticsList = Seq(
GroupByStatistics(
tableName = "table",
groupName = "COUNTRY",
groupByFieldValue = "country1",
rowsCount = 3,
year = 2000,
month = 1,
day = 2
),
GroupByStatistics(
tableName = "table",
groupName = "COUNTRY",
groupByFieldValue = "country2",
rowsCount = 2,
year = 2000,
month = 1,
day = 2
)
),
invalidGroups = Seq(
InvalidGroup(
tableName = "table",
groupName = "COUNTRY",
groupValue = Some("country3"),
rule = "NotEmptyGroups",
year = 2000,
month = 1,
day = 2
)
)
)
result.copy(groupByStatisticsList = result.groupByStatisticsList.sortBy(_.groupByFieldValue)) should equal(expectedValidationResult)
}
test("Validate records - table trends") {
val schema = StructType(Seq(
StructField("country", StringType)
))
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("country1"),
Row("country1"),
Row("country1"),
Row("country2"),
Row("country2"),
Row("country3")
)), schema)
val processingDate = Date.valueOf("2000-01-02")
val input = StaticTableLocation(df, "table")
val result = DataValidator.validate(
TableConfiguration(
location = input,
rules = TableRules(
rowRules = Seq.empty,
tableTrendRules = Seq(
CurrentVsPreviousDayRowCountIncrease(10)
)
)
),
ValidationContext("table", processingDate),
new StaticHistoryStatisticsReader(Map(
"table" -> Seq(
TableStatistics(
tableName = "table",
rowsCount = 1,
columnsCount = 1,
year = 2000,
month = 1,
day = 1
),
TableStatistics(
tableName = "table",
rowsCount = 5,
columnsCount = 1,
year = 2000,
month = 1,
day = 2
)
)
))
)
val expectedValidationResult = ValidationResult(
invalidRecords = Seq.empty,
tableStatistics = TableStatistics(
tableName = "table",
rowsCount = 6,
columnsCount = 1,
year = 2000,
month = 1,
day = 2
),
columnsStatistics = Seq(
ColumnStatistics(
tableName = "table",
columnName = "country",
columnType = "StringType",
notMissingCount = 6L,
rowsCount = 6L,
percentageNotMissing = Some(6.0/6.0),
year = 2000,
month = 1,
day = 2
)
),
groupByStatisticsList = Seq.empty,
invalidTableTrends = Seq(
InvalidTableTrend(
tableName = "table",
rule = "CurrentVsPreviousDayRowCountIncrease",
comment = "1 -> 5 rows",
year = 2000,
month = 1,
day = 2
)
)
)
result should equal(expectedValidationResult)
}
}
| piotr-kalanski/data-quality-monitoring | src/test/scala/com/datawizards/dqm/validator/DataValidatorTest.scala | Scala | apache-2.0 | 11,790 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the Project entity.
*/
class ProjectGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-XSRF-TOKEN" -> "${xsrf_token}"
)
val scn = scenario("Test the Project entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "XSRF-TOKEN=(.*);[\\s]").saveAs("xsrf_token"))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login")
.check(headerRegex("Set-Cookie", "XSRF-TOKEN=(.*);[\\s]").saveAs("xsrf_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all projects")
.get("/api/projects")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new project")
.post("/api/projects")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "name":"SAMPLE_TEXT"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_project_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created project")
.get("${new_project_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created project")
.delete("${new_project_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| Lukle/hipTimeTracker | src/test/gatling/simulations/ProjectGatlingTest.scala | Scala | apache-2.0 | 3,331 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.Equality
import org.scalactic.Explicitly
import org.scalactic.StringNormalizations._
import org.scalactic.Uniformity
import collection.GenTraversable
import SharedHelpers._
import Matchers._
class OneOfContainMatcherDeciderSpec extends Spec with Explicitly {
val mapTrimmed: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
def normalized(s: (Int, String)): (Int, String) = (s._1, s._2.trim)
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
val javaMapTrimmed: Uniformity[java.util.Map.Entry[Int, String]] =
new Uniformity[java.util.Map.Entry[Int, String]] {
def normalized(s: java.util.Map.Entry[Int, String]): java.util.Map.Entry[Int, String] = Entry(s.getKey, s.getValue.trim)
def normalizedCanHandle(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (_: Int, _: String) => true
case _ => false
}
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (k: Int, v: String) => normalized(Entry(k, v))
case _ => b
}
case _ => b
}
}
val incremented: Uniformity[Int] =
new Uniformity[Int] {
var count = 0
def normalized(s: Int): Int = {
count += 1
s + count
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[Int]
def normalizedOrSame(b: Any) =
b match {
case i: Int => normalized(i)
case _ => b
}
}
val mapIncremented: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
var count = 0
def normalized(s: (Int, String)): (Int, String) = {
count += 1
(s._1 + count, s._2)
}
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
val appended: Uniformity[String] =
new Uniformity[String] {
var count = 0
def normalized(s: String): String = {
count += 1
s + count
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
val mapAppended: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
var count = 0
def normalized(s: (Int, String)): (Int, String) = {
count += 1
(s._1, s._2 + count)
}
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
val javaMapAppended: Uniformity[java.util.Map.Entry[Int, String]] =
new Uniformity[java.util.Map.Entry[Int, String]] {
var count = 0
def normalized(s: java.util.Map.Entry[Int, String]): java.util.Map.Entry[Int, String] = {
count += 1
Entry(s.getKey, s.getValue + count)
}
def normalizedCanHandle(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (_: Int, _: String) => true
case _ => false
}
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (k: Int, v: String) => normalized(Entry(k, v))
case _ => b
}
case _ => b
}
}
val lowerCaseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.toLowerCase == (right match {
case s: String => s.toLowerCase
case other => other
})
}
val mapLowerCaseEquality =
new Equality[(Int, String)] {
def areEqual(left: (Int, String), right: Any) =
right match {
case t2: Tuple2[_, _] =>
left._1 == t2._1 &&
left._2.toLowerCase == (t2._2 match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
val javaMapLowerCaseEquality =
new Equality[java.util.Map.Entry[Int, String]] {
def areEqual(left: java.util.Map.Entry[Int, String], right: Any) =
right match {
case entry: java.util.Map.Entry[_, _] =>
left.getKey == entry.getKey &&
left.getValue.toLowerCase == (entry.getValue match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
val reverseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.reverse == (right match {
case s: String => s.toLowerCase
case other => other
})
}
val mapReverseEquality =
new Equality[(Int, String)] {
def areEqual(left: (Int, String), right: Any) =
right match {
case t2: Tuple2[_, _] =>
left._1 == t2._1 &&
left._2.reverse == (t2._2 match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
val javaMapReverseEquality =
new Equality[java.util.Map.Entry[Int, String]] {
def areEqual(left: java.util.Map.Entry[Int, String], right: Any) =
right match {
case entry: java.util.Map.Entry[_, _] =>
left.getKey == entry.getKey &&
left.getValue.reverse == (entry.getValue match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
object `oneOf ` {
def checkShouldContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int) {
e.message should be (Some(FailureMessages("didNotContainOneOfElements", left, UnquotedString(right.map(FailureMessages.decorateToStringValue).mkString(", ")))))
e.failedCodeFileName should be (Some("OneOfContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
def checkShouldNotContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int) {
val leftText = FailureMessages.decorateToStringValue(left)
e.message should be (Some(FailureMessages("containedOneOfElements", left, UnquotedString(right.map(FailureMessages.decorateToStringValue).mkString(", ")))))
e.failedCodeFileName should be (Some("OneOfContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
def `should take specified normalization when 'should contain' is used` {
(List("1", " 2", "3") should contain oneOf ("2 ", "6", "8")) (after being trimmed)
(Set("1", " 2", "3") should contain oneOf ("2 ", "6", "8")) (after being trimmed)
(Array("1", " 2", "3") should contain oneOf ("2 ", "6", "8")) (after being trimmed)
(javaList("1", " 2", "3") should contain oneOf ("2 ", "6", "8")) (after being trimmed)
(javaSet("1", " 2", "3") should contain oneOf ("2 ", "6", "8")) (after being trimmed)
(Map(1 -> "one", 2 -> " two", 3 -> "three") should contain oneOf (2 -> "two", 6 -> "six", 8 -> "eight")) (after being mapTrimmed)
(javaMap(Entry(1, "one"), Entry(2, " two"), Entry(3, "three")) should contain oneOf (Entry(2, "two"), Entry(6, "six"), Entry(8, "eight"))) (after being javaMapTrimmed)
}
def `should take specified normalization when 'should not contain' is used` {
(List("1", "2", "3") should not contain oneOf ("1", "6", "8")) (after being appended)
(Set("1", "2", "3") should not contain oneOf ("1", "6", "8")) (after being appended)
(Array("1", "2", "3") should not contain oneOf ("1", "6", "8")) (after being appended)
(javaList("1", "2", "3") should not contain oneOf ("1", "6", "8")) (after being appended)
(javaSet("1", "2", "3") should not contain oneOf ("1", "6", "8")) (after being appended)
(Map(1 -> "one", 2 -> "two", 3 -> "three") should not contain oneOf (1 -> "one", 2 -> "two", 3 -> "three")) (after being mapAppended)
(javaMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three")) should not contain oneOf (Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))) (after being javaMapAppended)
}
def `should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified normalization` {
val left1 = List("1", "2", "3")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain oneOf ("1", "6", "8")) (after being appended)
}
checkShouldContainStackDepth(e1, left1, Array("1", "6", "8").deep, thisLineNumber - 2)
val left2 = Set("1", "2", "3")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain oneOf ("1", "6", "8")) (after being appended)
}
checkShouldContainStackDepth(e2, left2, Array("1", "6", "8").deep, thisLineNumber - 2)
val left3 = Array("1", "2", "3")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain oneOf ("1", "6", "8")) (after being appended)
}
checkShouldContainStackDepth(e3, left3, Array("1", "6", "8").deep, thisLineNumber - 2)
val left4 = javaList("1", "2", "3")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should contain oneOf ("1", "6", "8")) (after being appended)
}
checkShouldContainStackDepth(e4, left4, Array("1", "6", "8").deep, thisLineNumber - 2)
val left5 = Map(1 -> "one", 2 -> "two", 3 -> "three")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should contain oneOf (1 -> "one", 6 -> "six", 8 -> "eight")) (after being mapAppended)
}
checkShouldContainStackDepth(e5, left5, Array(1 -> "one", 6 -> "six", 8 -> "eight").deep, thisLineNumber - 2)
val left6 = javaMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should contain oneOf (Entry(1, "one"), Entry(6, "six"), Entry(8, "eight"))) (after being javaMapAppended)
}
checkShouldContainStackDepth(e6, left6, Array(Entry(1, "one"), Entry(6, "six"), Entry(8, "eight")).deep, thisLineNumber - 2)
}
def `should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified normalization` {
val left1 = List("1", " 2", "3")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain oneOf ("2 ", "6", "8")) (after being trimmed)
}
checkShouldNotContainStackDepth(e1, left1, Array("2 ", "6", "8").deep, thisLineNumber - 2)
val left2 = Set("1", " 2", "3")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain oneOf ("2 ", "6", "8")) (after being trimmed)
}
checkShouldNotContainStackDepth(e2, left2, Array("2 ", "6", "8").deep, thisLineNumber - 2)
val left3 = Array("1", " 2", "3")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain oneOf ("2 ", "6", "8")) (after being trimmed)
}
checkShouldNotContainStackDepth(e3, left3, Array("2 ", "6", "8").deep, thisLineNumber - 2)
val left4 = javaList("1", " 2", "3")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should not contain oneOf ("2 ", "6", "8")) (after being trimmed)
}
checkShouldNotContainStackDepth(e4, left4, Array("2 ", "6", "8").deep, thisLineNumber - 2)
val left5 = Map(1 -> "one", 2 -> " two", 3 -> "three")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should not contain oneOf (2 -> "two ", 6 -> "six", 8 -> "eight")) (after being mapTrimmed)
}
checkShouldNotContainStackDepth(e5, left5, Array(2 -> "two ", 6 -> "six", 8 -> "eight").deep, thisLineNumber - 2)
val left6 = javaMap(Entry(1, "one"), Entry(2, " two"), Entry(3, "three"))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should not contain oneOf (Entry(2, "two "), Entry(6, "six"), Entry(8, "eight"))) (after being javaMapTrimmed)
}
checkShouldNotContainStackDepth(e6, left6, Array(Entry(2, "two "), Entry(6, "six"), Entry(8, "eight")).deep, thisLineNumber - 2)
}
def `should take specified equality and normalization equality when 'should contain' is used` {
(List("ONE", " TWO", "THREE") should contain oneOf ("two ", "six", "eight")) (decided by lowerCaseEquality afterBeing trimmed)
(Set("ONE", " TWO", "THREE") should contain oneOf ("two ", "six", "eight")) (decided by lowerCaseEquality afterBeing trimmed)
(Array("ONE", " TWO", "THREE") should contain oneOf ("two ", "six", "eight")) (decided by lowerCaseEquality afterBeing trimmed)
(javaList("ONE", " TWO", "THREE") should contain oneOf ("two ", "six", "eight")) (decided by lowerCaseEquality afterBeing trimmed)
(Map(1 -> "ONE", 2 -> " TWO", 3 -> "THREE") should contain oneOf (2 -> "two ", 6 -> "six", 8 -> "eight")) (decided by mapLowerCaseEquality afterBeing mapTrimmed)
(javaMap(Entry(1, "ONE"), Entry(2, " TWO"), Entry(3, "THREE")) should contain oneOf (Entry(2, "two "), Entry(6, "six"), Entry(8, "eight"))) (decided by javaMapLowerCaseEquality afterBeing javaMapTrimmed)
}
def `should take specified equality and normalization when 'should not contain' is used` {
(List("one ", " two", "three ") should not contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(Set("one ", " two", "three ") should not contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(Array("one ", " two", "three ") should not contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(javaList("one ", " two", "three ") should not contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(Map(1 -> "one ", 2 -> " two", 3 -> "three ") should not contain oneOf (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapReverseEquality afterBeing mapTrimmed)
(javaMap(Entry(1, "one "), Entry(2, " two"), Entry(3, "three ")) should not contain oneOf (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapReverseEquality afterBeing javaMapTrimmed)
}
def `should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified equality and normalizationy` {
val left1 = List("one ", " two", "three ")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e1, left1, Array(" one", "two ", " three").deep, thisLineNumber - 2)
val left2 = Set("one ", " two", "three ")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e2, left2, Array(" one", "two ", " three").deep, thisLineNumber - 2)
val left3 = Array("one ", " two", "three ")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e3, left3, Array(" one", "two ", " three").deep, thisLineNumber - 2)
val left4 = javaList("one ", " two", "three ")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should contain oneOf (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e4, left4, Array(" one", "two ", " three").deep, thisLineNumber - 2)
val left5 = Map(1 -> "one ", 2 -> " two", 3 -> "three ")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should contain oneOf (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapReverseEquality afterBeing mapTrimmed)
}
checkShouldContainStackDepth(e5, left5, Array(1 -> " one", 2 -> "two ", 3 -> " three").deep, thisLineNumber - 2)
val left6 = javaMap(Entry(1, "one "), Entry(2, " two"), Entry(3, "three "))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should contain oneOf (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapReverseEquality afterBeing javaMapTrimmed)
}
checkShouldContainStackDepth(e6, left6, Array(Entry(1, " one"), Entry(2, "two "), Entry(3, " three")).deep, thisLineNumber - 2)
}
def `should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified equality and normalization` {
val left1 = List("ONE ", " TWO", "THREE ")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain oneOf ("two ", " six", "eight ")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e1, left1, Array("two ", " six", "eight ").deep, thisLineNumber - 2)
val left2 = Set("ONE ", " TWO", "THREE ")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain oneOf ("two ", " six", "eight ")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e2, left2, Array("two ", " six", "eight ").deep, thisLineNumber - 2)
val left3 = Array("ONE ", " TWO", "THREE ")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain oneOf ("two ", " six", "eight ")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e3, left3, Array("two ", " six", "eight ").deep, thisLineNumber - 2)
val left4 = javaList("ONE ", " TWO", "THREE ")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should not contain oneOf ("two ", " six", "eight ")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e4, left4, Array("two ", " six", "eight ").deep, thisLineNumber - 2)
val left5 = Map(1 -> "ONE ", 2 -> " TWO", 3 -> "THREE ")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should not contain oneOf (2 -> "two ", 6 -> " six", 8 -> "eight ")) (decided by mapLowerCaseEquality afterBeing mapTrimmed)
}
checkShouldNotContainStackDepth(e5, left5, Array(2 -> "two ", 6 -> " six", 8 -> "eight ").deep, thisLineNumber - 2)
val left6 = javaMap(Entry(1, "ONE "), Entry(2, " TWO"), Entry(3, "THREE "))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should not contain oneOf (Entry(2, "two "), Entry(6, " six"), Entry(8, "eight "))) (decided by javaMapLowerCaseEquality afterBeing javaMapTrimmed)
}
checkShouldNotContainStackDepth(e6, left6, Array(Entry(2, "two "), Entry(6, " six"), Entry(8, "eight ")).deep, thisLineNumber - 2)
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/OneOfContainMatcherDeciderSpec.scala | Scala | apache-2.0 | 21,035 |
package com.bwsw.tstreamstransactionserver.netty.server.transactionMetadataService
import java.util.concurrent.TimeUnit
import com.bwsw.tstreamstransactionserver.netty.server.RocksStorage
import com.bwsw.tstreamstransactionserver.netty.server.consumerService.{ConsumerServiceImpl, ConsumerTransactionRecord}
import com.bwsw.tstreamstransactionserver.netty.server.db.rocks.{Batch, RocksDBALL}
import com.bwsw.tstreamstransactionserver.netty.server.streamService.StreamKey
import com.bwsw.tstreamstransactionserver.netty.server.transactionMetadataService.stateHandler.{KeyStreamPartition, LastTransactionStreamPartition, TransactionStateHandler}
import com.bwsw.tstreamstransactionserver.rpc._
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
class TransactionMetaServiceImpl(rocksMetaServiceDB: RocksDBALL,
lastTransactionStreamPartition: LastTransactionStreamPartition,
consumerService: ConsumerServiceImpl)
extends TransactionStateHandler
with ProducerTransactionStateNotifier
{
import lastTransactionStreamPartition._
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
private val producerTransactionsDatabase =
rocksMetaServiceDB.getDatabase(RocksStorage.TRANSACTION_ALL_STORE)
private val producerTransactionsWithOpenedStateDatabase =
rocksMetaServiceDB.getDatabase(RocksStorage.TRANSACTION_OPEN_STORE)
private def fillOpenedTransactionsRAMTable: com.google.common.cache.Cache[ProducerTransactionKey, ProducerTransactionValue] = {
if (logger.isDebugEnabled)
logger.debug("Filling cache with Opened Transactions table.")
val secondsToLive = 180
val threadsToWriteNumber = 1
val cache = com.google.common.cache.CacheBuilder.newBuilder()
.concurrencyLevel(threadsToWriteNumber)
.expireAfterAccess(secondsToLive, TimeUnit.SECONDS)
.build[ProducerTransactionKey, ProducerTransactionValue]()
val iterator = producerTransactionsWithOpenedStateDatabase.iterator
iterator.seekToFirst()
while (iterator.isValid) {
cache.put(
ProducerTransactionKey.fromByteArray(iterator.key()),
ProducerTransactionValue.fromByteArray(iterator.value())
)
iterator.next()
}
iterator.close()
cache
}
private val transactionsRamTable: com.google.common.cache.Cache[ProducerTransactionKey, ProducerTransactionValue] =
fillOpenedTransactionsRAMTable
def getOpenedTransaction(key: ProducerTransactionKey): Option[ProducerTransactionValue] = {
Option(transactionsRamTable.getIfPresent(key))
.orElse {
val keyFound = key.toByteArray
Option(producerTransactionsWithOpenedStateDatabase.get(keyFound)).map { data =>
val producerTransactionValue =
ProducerTransactionValue.fromByteArray(data)
transactionsRamTable.put(key, producerTransactionValue)
producerTransactionValue
}
}
}
private type Timestamp = Long
private final def decomposeTransactionsToProducerTxnsAndConsumerTxns(transactions: Seq[(com.bwsw.tstreamstransactionserver.rpc.Transaction, Timestamp)],
batch: Batch) = {
if (logger.isDebugEnabled)
logger.debug("Decomposing transactions to producer and consumer transactions")
val producerTransactions = ArrayBuffer[(ProducerTransaction, Timestamp)]()
val consumerTransactions = ArrayBuffer[(ConsumerTransaction, Timestamp)]()
transactions foreach { case (transaction, timestamp) =>
(transaction.producerTransaction, transaction.consumerTransaction) match {
case (Some(txn), _) =>
val key = KeyStreamPartition(txn.stream, txn.partition)
if (txn.state != TransactionStates.Opened) {
producerTransactions += ((txn, timestamp))
} else if (!isThatTransactionOutOfOrder(key, txn.transactionID)) {
// updating RAM table, and last opened transaction database.
updateLastTransactionStreamPartitionRamTable(
key,
txn.transactionID,
isOpenedTransaction = true
)
putLastTransaction(key,
txn.transactionID,
isOpenedTransaction = true,
batch
)
if (logger.isDebugEnabled)
logger.debug(
s"On stream:${key.stream} partition:${key.partition} " +
s"last opened transaction is ${txn.transactionID} now."
)
producerTransactions += ((txn, timestamp))
}
case (_, Some(txn)) =>
consumerTransactions += ((txn, timestamp))
case _ =>
}
}
(producerTransactions, consumerTransactions)
}
private final def groupProducerTransactionsByStreamAndDecomposeThemToDatabaseRepresentation(txns: Seq[(ProducerTransaction, Timestamp)]): Map[StreamKey, ArrayBuffer[ProducerTransactionRecord]] = {
if (logger.isDebugEnabled)
logger.debug("Mapping all producer transactions streams attrbute to long representation(ID), " +
"grouping them by stream and partition, " +
"checking that the stream isn't deleted in order to process producer transactions."
)
txns.foldRight(scala.collection.mutable.Map[StreamKey, ArrayBuffer[ProducerTransactionRecord]]()) {
case ((producerTransaction, timestamp), acc) =>
val keyStream = StreamKey(producerTransaction.stream)
if (acc.contains(keyStream))
acc(keyStream) += ProducerTransactionRecord(producerTransaction, timestamp)
else
acc += ((keyStream, ArrayBuffer(ProducerTransactionRecord(producerTransaction, timestamp))))
acc
}.toMap
}
private final def decomposeConsumerTransactionsToDatabaseRepresentation(transactions: Seq[(ConsumerTransaction, Timestamp)]) = {
val consumerTransactionsKey = new Array[ConsumerTransactionRecord](transactions.length)
transactions.foldRight(0){case ((txn, timestamp), index) =>
consumerTransactionsKey(index) = ConsumerTransactionRecord(txn, timestamp)
index + 1
}
consumerTransactionsKey
}
private final def groupProducerTransactions(producerTransactions: Seq[ProducerTransactionRecord]) =
producerTransactions.groupBy(txn => txn.key)
private final def updateLastCheckpointedTransactionAndPutToDatabase(key: stateHandler.KeyStreamPartition,
producerTransactionWithNewState: ProducerTransactionRecord,
batch: Batch): Unit = {
updateLastTransactionStreamPartitionRamTable(
key,
producerTransactionWithNewState.transactionID,
isOpenedTransaction = false
)
putLastTransaction(
key,
producerTransactionWithNewState.transactionID,
isOpenedTransaction = false,
batch
)
if (logger.isDebugEnabled())
logger.debug(
s"On stream:${key.stream} partition:${key.partition} " +
s"last checkpointed transaction is ${producerTransactionWithNewState.transactionID} now."
)
}
private def putTransactionToAllAndOpenedTables(producerTransactionRecord: ProducerTransactionRecord,
notifications: scala.collection.mutable.ListBuffer[Unit => Unit],
batch: Batch) =
{
val binaryTxn = producerTransactionRecord.producerTransaction.toByteArray
val binaryKey = producerTransactionRecord.key.toByteArray
if (producerTransactionRecord.state == TransactionStates.Checkpointed) {
updateLastCheckpointedTransactionAndPutToDatabase(
stateHandler.KeyStreamPartition(
producerTransactionRecord.stream,
producerTransactionRecord.partition
),
producerTransactionRecord,
batch
)
}
transactionsRamTable.put(producerTransactionRecord.key, producerTransactionRecord.producerTransaction)
if (producerTransactionRecord.state == TransactionStates.Opened) {
batch.put(RocksStorage.TRANSACTION_OPEN_STORE, binaryKey, binaryTxn)
}
else {
batch.remove(RocksStorage.TRANSACTION_OPEN_STORE, binaryKey)
}
if (areThereAnyProducerNotifies)
notifications += tryCompleteProducerNotify(producerTransactionRecord)
batch.put(RocksStorage.TRANSACTION_ALL_STORE, binaryKey, binaryTxn)
if (logger.isDebugEnabled)
logger.debug(s"Producer transaction on stream: ${producerTransactionRecord.stream}" +
s"partition ${producerTransactionRecord.partition}, transactionId ${producerTransactionRecord.transactionID} " +
s"with state ${producerTransactionRecord.state} is ready for commit[commit id: ${batch.id}]"
)
}
private def putTransactions(transactions: Seq[(com.bwsw.tstreamstransactionserver.rpc.Transaction, Long)],
batch: Batch): ListBuffer[Unit => Unit] = {
val (producerTransactions, consumerTransactions) =
decomposeTransactionsToProducerTxnsAndConsumerTxns(transactions, batch)
val groupedProducerTransactionsWithTimestamp =
groupProducerTransactionsByStreamAndDecomposeThemToDatabaseRepresentation(producerTransactions)
val notifications = new scala.collection.mutable.ListBuffer[Unit => Unit]()
groupedProducerTransactionsWithTimestamp.foreach { case (stream, dbProducerTransactions) =>
val groupedProducerTransactions = groupProducerTransactions(dbProducerTransactions)
groupedProducerTransactions foreach { case (key, txns) =>
//retrieving an opened transaction from opened transaction database if it exist
val openedTransactionOpt = getOpenedTransaction(key)
openedTransactionOpt match {
case Some(data) =>
val persistedProducerTransactionRocks = ProducerTransactionRecord(key, data)
if (logger.isDebugEnabled)
logger.debug(
s"Transiting producer transaction on stream: ${persistedProducerTransactionRocks.stream}" +
s"partition ${persistedProducerTransactionRocks.partition}, " +
s"transaction ${persistedProducerTransactionRocks.transactionID} " +
s"with state ${persistedProducerTransactionRocks.state} to new state"
)
val producerTransaction =
transitProducerTransactionToNewState(persistedProducerTransactionRocks, txns)
producerTransaction.foreach { transaction =>
putTransactionToAllAndOpenedTables(transaction, notifications, batch)
}
case None =>
if (logger.isDebugEnabled)
logger.debug(s"Trying to put new producer transaction on stream ${key.stream}.")
val producerTransaction = transitProducerTransactionToNewState(txns)
producerTransaction.foreach { transaction =>
putTransactionToAllAndOpenedTables(transaction, notifications, batch)
}
}
}
}
val consumerTransactionsToProcess = decomposeConsumerTransactionsToDatabaseRepresentation(consumerTransactions)
val notificationAboutProducerAndConsumerTransactiomns = if (consumerTransactionsToProcess.nonEmpty)
notifications ++ consumerService.putConsumersCheckpoints(consumerTransactionsToProcess, batch)
else
notifications
notificationAboutProducerAndConsumerTransactiomns
}
private val commitLogDatabase = rocksMetaServiceDB.getDatabase(RocksStorage.COMMIT_LOG_STORE)
private[server] final def getLastProcessedCommitLogFileID: Option[Long] = {
val iterator = commitLogDatabase.iterator
iterator.seekToLast()
val id = if (iterator.isValid)
Some(CommitLogKey.fromByteArray(iterator.key()).id)
else
None
iterator.close()
id
}
private[server] final def getProcessedCommitLogFiles: ArrayBuffer[Long] = {
val processedCommitLogFiles = scala.collection.mutable.ArrayBuffer[Long]()
val iterator = commitLogDatabase.iterator
iterator.seekToFirst()
while (iterator.isValid) {
processedCommitLogFiles += CommitLogKey.fromByteArray(iterator.key()).id
iterator.next()
}
iterator.close()
processedCommitLogFiles
}
final class BigCommit(fileID: Long) {
private val batch = rocksMetaServiceDB.newBatch
private val notifications = new scala.collection.mutable.ListBuffer[Unit => Unit]
def putSomeTransactions(transactions: Seq[(com.bwsw.tstreamstransactionserver.rpc.Transaction, Long)]): Unit = {
if (logger.isDebugEnabled)
logger.debug("Adding to commit new transactions from commit log file.")
notifications ++= putTransactions(transactions, batch)
}
def commit(): Boolean = {
val key = CommitLogKey(fileID).toByteArray
val value = Array[Byte]()
batch.put(RocksStorage.COMMIT_LOG_STORE, key, value)
if (batch.write()) {
if (logger.isDebugEnabled) logger.debug(s"commit ${batch.id} is successfully fixed.")
notifications foreach (notification => notification(()))
true
} else {
false
}
}
}
def getBigCommit(fileID: Long) = new BigCommit(fileID)
final def getTransaction(streamID: Int, partition: Int, transaction: Long): com.bwsw.tstreamstransactionserver.rpc.TransactionInfo = {
val lastTransaction = getLastTransactionIDAndCheckpointedID(streamID, partition)
if (lastTransaction.isEmpty || transaction > lastTransaction.get.opened.id) {
TransactionInfo(exists = false, None)
} else {
val searchKey = new ProducerTransactionKey(streamID, partition, transaction).toByteArray
Option(producerTransactionsDatabase.get(searchKey)).map(searchData =>
new ProducerTransactionRecord(ProducerTransactionKey.fromByteArray(searchKey), ProducerTransactionValue.fromByteArray(searchData))
) match {
case None =>
TransactionInfo(exists = true, None)
case Some(producerTransactionRecord) =>
TransactionInfo(exists = true, Some(producerTransactionRecord))
}
}
}
final def getLastCheckpointedTransaction(streamID: Int, partition: Int): Option[Long] = {
val result = getLastTransactionIDAndCheckpointedID(streamID, partition) match {
case Some(last) => last.checkpointed match {
case Some(checkpointed) => Some(checkpointed.id)
case None => None
}
case None => None
}
result
}
private val comparator = com.bwsw.tstreamstransactionserver.`implicit`.Implicits.ByteArray
def scanTransactions(streamID: Int, partition: Int, from: Long, to: Long, count: Int, states: collection.Set[TransactionStates]): com.bwsw.tstreamstransactionserver.rpc.ScanTransactionsInfo =
{
val (lastOpenedTransactionID, toTransactionID) = getLastTransactionIDAndCheckpointedID(streamID, partition) match {
case Some(lastTransaction) => lastTransaction.opened.id match {
case lt if lt < from => (lt, from - 1L)
case lt if from <= lt && lt < to => (lt, lt)
case lt if lt >= to => (lt, to)
}
case None => (-1L, from - 1L)
}
if (logger.isDebugEnabled) logger.debug(s"Trying to retrieve transactions on stream $streamID, partition: $partition in range [$from, $to]." +
s"Actually as lt ${if (lastOpenedTransactionID == -1) "doesn't exist" else s"is $lastOpenedTransactionID"} the range is [$from, $toTransactionID].")
if (toTransactionID < from || count == 0) ScanTransactionsInfo(lastOpenedTransactionID, Seq())
else {
val iterator = producerTransactionsDatabase.iterator
val lastTransactionID = new ProducerTransactionKey(streamID, partition, toTransactionID).toByteArray
def moveCursorToKey: Option[ProducerTransactionRecord] = {
val keyFrom = new ProducerTransactionKey(streamID, partition, from)
iterator.seek(keyFrom.toByteArray)
val startKey = if (iterator.isValid && comparator.compare(iterator.key(), lastTransactionID) <= 0) {
Some(
new ProducerTransactionRecord(
ProducerTransactionKey.fromByteArray(iterator.key()),
ProducerTransactionValue.fromByteArray(iterator.value())
)
)
} else None
iterator.next()
startKey
}
moveCursorToKey match {
case None =>
iterator.close()
ScanTransactionsInfo(lastOpenedTransactionID, Seq())
case Some(producerTransactionKey) =>
val producerTransactions = ArrayBuffer[ProducerTransactionRecord](producerTransactionKey)
var txnState: TransactionStates = producerTransactionKey.state
while (
iterator.isValid &&
producerTransactions.length < count &&
!states.contains(txnState) &&
(comparator.compare(iterator.key(), lastTransactionID) <= 0)
) {
val producerTransaction =
ProducerTransactionRecord(
ProducerTransactionKey.fromByteArray(iterator.key()),
ProducerTransactionValue.fromByteArray(iterator.value())
)
txnState = producerTransaction.state
producerTransactions += producerTransaction
iterator.next()
}
iterator.close()
val result = if (states.contains(txnState))
producerTransactions.init
else
producerTransactions
ScanTransactionsInfo(lastOpenedTransactionID, result)
}
}
}
def transactionsToDeleteTask(timestampToDeleteTransactions: Long) {
def doesProducerTransactionExpired(producerTransactionWithoutKey: ProducerTransactionValue): Boolean = {
scala.math.abs(
producerTransactionWithoutKey.timestamp +
producerTransactionWithoutKey.ttl
) <= timestampToDeleteTransactions
}
if (logger.isDebugEnabled)
logger.debug(s"Cleaner[time: $timestampToDeleteTransactions] of expired transactions is running.")
val batch = rocksMetaServiceDB.newBatch
val iterator = producerTransactionsWithOpenedStateDatabase.iterator
iterator.seekToFirst()
val notifications = new ListBuffer[Unit => Unit]()
while (iterator.isValid) {
val producerTransactionValue = ProducerTransactionValue.fromByteArray(iterator.value())
if (doesProducerTransactionExpired(producerTransactionValue)) {
if (logger.isDebugEnabled)
logger.debug(s"Cleaning $producerTransactionValue as it's expired.")
val producerTransactionValueTimestampUpdated = producerTransactionValue.copy(timestamp = timestampToDeleteTransactions)
val key = iterator.key()
val producerTransactionKey = ProducerTransactionKey.fromByteArray(key)
val canceledTransactionRecordDueExpiration =
transitProducerTransactionToInvalidState(ProducerTransactionRecord(producerTransactionKey, producerTransactionValueTimestampUpdated))
if (areThereAnyProducerNotifies)
notifications += tryCompleteProducerNotify(ProducerTransactionRecord(producerTransactionKey, canceledTransactionRecordDueExpiration.producerTransaction))
transactionsRamTable.invalidate(producerTransactionKey)
batch.put(RocksStorage.TRANSACTION_ALL_STORE, key, canceledTransactionRecordDueExpiration.producerTransaction.toByteArray)
batch.remove(RocksStorage.TRANSACTION_OPEN_STORE, key)
}
iterator.next()
}
iterator.close()
batch.write()
notifications.foreach(notification => notification(()))
}
final def createAndExecuteTransactionsToDeleteTask(timestampToDeleteTransactions: Long): Unit = {
transactionsToDeleteTask(timestampToDeleteTransactions)
}
}
| bwsw/tstreams-transaction-server | src/main/scala/com/bwsw/tstreamstransactionserver/netty/server/transactionMetadataService/TransactionMetaServiceImpl.scala | Scala | apache-2.0 | 20,005 |
package szmq
import org.zeromq.ZMQ.Socket
/**
* Author: Yuri Buyanov
* Date: 7/29/11 7:02 PM
*/
abstract sealed class Endpoint(addresses: String*) {
def plug(s: Socket) {
addresses foreach (_plug(s, _))
}
def _plug(s: Socket, address: String)
}
case class BindTo(addresses: String*) extends Endpoint(addresses: _*) {
def _plug(s: Socket, address: String) { s bind address }
}
case class ConnectTo(addresses: String*) extends Endpoint(addresses: _*) {
def _plug(s: Socket, address: String) { s connect address }
} | elegion/Scala-ZMQ-RPC | src/main/scala/szmq/Endpoint.scala | Scala | mit | 536 |
package nl.soqua.lcpi.repl.parser
import nl.soqua.lcpi.repl.monad.ReplMonad.Repl
import org.scalatest.Matchers
trait StdInParserTester {
protected implicit class StdinParserTester(val line: String) extends Matchers {
def >>(expected: Repl[_]): Unit =
StdInParser(line).fold(ex => {
fail(s"Parsing of std in line '$line' failed: $ex")
}, result => {
result shouldBe expected
})
}
}
| kevinvandervlist/lcpi | repl/src/test/scala/nl/soqua/lcpi/repl/parser/StdInParserTester.scala | Scala | mit | 427 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.stats
/**
* Used to generally classify a stat or effect such as Saving Throws or Movement. These should
* generally correspond to the Game Client Menu although some are not visible from the UI NOTE: May
* not exactly match in game UI for UX purposes. (i.e. Dodge chance / AC / Incorporeal should fall
* under Avoidance Defense although we are expecting it to appear under main display)
*/
sealed trait Category
/**
* Corresponds to effects contributing to your total Miss-chance. These include concealment / dodge
* / incorporeal etc. https://ddowiki.com/page/Miss_chance
*/
trait MissChance extends Category
/**
* Represents a Penalty to target / Enemy Effects like Sunder.
*/
trait MissChancePenalty extends Category
/**
* Corresponds to effects that affect your chance to hit something.
*/
trait HitChance extends Category
/**
* Corresponds to damage reduction from 'Elemental' sources such as Fire. This includes protection /
* resistance and absorption.
*/
trait ElementalDefenses extends Category
/**
* Corresponds to saves vs general / specific spells, traps etc.
*/
trait SavingThrows extends Category
/**
* Corresponds to your movement speed.
*/
trait Movement extends Category
/**
* Special Attacks such as Cleave, Trip, Shattermantle or Spring Attack
*/
trait SpecialAttack extends Category
/**
* A very general category that applies to general and specific spell casting. These should include
* casting costs, cool-downs, stopping ability (silence / deafness) Spell power MAY fall under this
* or possibly split into another category as Critical Multiplier / Potentcy and Universal spell
* power like effects
*/
trait SpellCasting extends Category
/**
* General Combat should encompass Tactical feats and others not specific to Melee or Ranged.
*/
trait GeneralCombat extends Category
/**
* Hand to hand specific effects and abilities.
*/
trait MeleeCombat extends Category
/**
* Ranged spell and weapon effects.
*/
trait RangedCombat extends Category
/**
* Effects that alter the power, range or other undead specific effects. This should also include
* things that increase the amount of Turns.
*/
trait TurnUndead extends Category
/**
* General, Misc. or Main stats generally appear on the main character sheet such as Base Attack
* Bonus
* @note
* BAB may be moved to general combat.
*/
trait General extends Category
/**
* Not super useful by itself, but indicates the effect provides an ability. The power and extent of
* that ability may depend on other things. Examples may include Bard Songs, Sunder, Attack etc.
*/
trait Ability extends Category
/**
* Generally your Hit Points
*/
trait Health extends Category
trait SpellPointPool extends Category
trait Proficiency extends Category
/**
* Affects unconscious range, recovery (Die hard)
*/
trait Recovery extends Category
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/stats/Category.scala | Scala | apache-2.0 | 3,659 |
package com.github.mdr.graphospasm.grapheditor.model
import com.github.mdr.graphospasm.core.graph.mutable._
import com.github.mdr.graphospasm.grapheditor.utils.Utils
import com.github.mdr.graphospasm.core.graph._
import org.eclipse.draw2d.FigureUtilities
import org.eclipse.swt.graphics.GC
import org.eclipse.draw2d.geometry._
import org.eclipse.draw2d._
import org.eclipse.swt.widgets.Shell
import scala.math.max
object GraphDiagram {
def fromGraph(graph: Graph): GraphDiagram = {
Utils.withFont { font ⇒
val diagram = new GraphDiagram
var vertexToNode = Map[Vertex, Node]()
for (vertex ← graph.vertices) {
val node = new Node(vertex.name)
for ((name, value) ← vertex.attributes)
node.addAttribute(name, value)
val height = NodeContentsLayouter.layout(node, font).minimumRequiredHeight
val width = NodeContentsLayouter.preferredWidth(node, font)
node.bounds = new Rectangle(150, 150, width, height)
diagram.add(node)
vertexToNode = vertexToNode + (vertex -> node)
}
for (edge ← graph.edges) {
val connection = Connection.connect(vertexToNode(edge.source), vertexToNode(edge.target))
connection.nameOpt = edge.nameOpt
}
diagram
}
}
// def toGraph(graphDiagram: GraphDiagram): Graph = {
// val graph = new MutableGraphImpl
// for (node ← graphDiagram.nodes) {
// // graph.ad
// }
// graph
// }
}
class GraphDiagram extends Observable {
private var nodes_ : List[Node] = Nil
def nodes = nodes_
def add(node: Node) {
insert(node, nodes_.length)
}
def insert(node: Node, index: Int) {
nodes_ = nodes_.patch(index, List(node), 0)
node.setDiagram(this)
fireEvent(NodeInserted(node, index))
}
def remove(node: Node) {
val index = indexOf(node)
nodes_ = nodes_ filterNot { _ == node }
node.unsetDiagram()
fireEvent(NodeRemoved(node, index))
}
def indexOf(thing: Node) = nodes_ indexOf thing
}
| mdr/graphospasm | com.github.mdr.graphospasm.grapheditor/src/main/scala/com/github/mdr/graphospasm/grapheditor/model/GraphDiagram.scala | Scala | mit | 2,037 |
package spinoco.protocol.mgcp
object BearerAttribute {
case class BearerEncoding(tpe: BearerEncodingType.Value) extends BearerAttribute
case class BearerExtensionName(nameAndValue: String) extends BearerAttribute
}
sealed trait BearerAttribute
object BearerEncodingType extends Enumeration {
val ALaw = Value("A")
val uLaw = Value("mu")
} | Spinoco/protocol | mgcp/src/main/scala/spinoco/protocol/mgcp/BearerAttribute.scala | Scala | mit | 354 |
package org.maproulette.models.dal.mixin
import java.sql.Connection
import anorm._
import org.maproulette.data.ItemType
import org.maproulette.exception.LockedException
import org.maproulette.models.BaseObject
import org.maproulette.models.utils.{DALHelper, TransactionManager}
import org.maproulette.session.User
/**
* @author mcuthbert
*/
trait Locking[T <: BaseObject[_]] extends TransactionManager {
this: DALHelper =>
/**
* Unlocks an item in the database
*
* @param user The user requesting to unlock the item
* @param item The item being unlocked
* @param c A sql connection that is implicitly passed in from the calling function, this is an
* implicit function because this will always be called from within the code and never
* directly from an API call
* @return true if successful
*/
def unlockItem(user: User, item: T)(implicit c: Option[Connection] = None): Int =
this.withMRTransaction { implicit c =>
val checkQuery =
s"""SELECT user_id FROM locked WHERE item_id = {itemId} AND item_type = ${item.itemType.typeId} FOR UPDATE"""
SQL(checkQuery)
.on(Symbol("itemId") -> ParameterValue.toParameterValue(item.id)(p = keyToStatement))
.as(SqlParser.long("user_id").singleOpt) match {
case Some(id) =>
if (id == user.id) {
val query =
s"""DELETE FROM locked WHERE user_id = ${user.id} AND item_id = {itemId} AND item_type = ${item.itemType.typeId}"""
SQL(query)
.on(Symbol("itemId") -> ParameterValue.toParameterValue(item.id)(p = keyToStatement))
.executeUpdate()
} else {
throw new LockedException(
s"Item [${item.id}] currently locked by different user. [${user.id}"
)
}
case None =>
throw new LockedException(s"Item [${item.id}] trying to unlock does not exist.")
}
}
/**
* Refreshes an existing lock on an item in the database, extending its allowed duration
*
* @param user The user requesting to refresh the lock (and who must also own it)
* @param item The locked item
* @param c A sql connection that is implicitly passed in from the calling function, this is an
* implicit function because this will always be called from within the code and never
* directly from an API call
* @return true if successful
*/
def refreshItemLock(user: User, item: T)(implicit c: Option[Connection] = None): Int =
this.withMRTransaction { implicit c =>
val checkQuery =
s"""SELECT user_id FROM locked WHERE item_id = {itemId} AND item_type = ${item.itemType.typeId} FOR UPDATE"""
SQL(checkQuery)
.on(Symbol("itemId") -> ParameterValue.toParameterValue(item.id)(p = keyToStatement))
.as(SqlParser.long("user_id").singleOpt) match {
case Some(id) =>
if (id == user.id) {
val query =
s"""UPDATE locked set locked_time=NOW() WHERE user_id = ${user.id} AND item_id = {itemId} AND item_type = ${item.itemType.typeId}"""
SQL(query)
.on(Symbol("itemId") -> ParameterValue.toParameterValue(item.id)(p = keyToStatement))
.executeUpdate()
} else {
throw new LockedException(
s"Item [${item.id}] currently locked by different user. [${user.id}]"
)
}
case None => throw new LockedException(s"Lock on item [${item.id}] does not exist.")
}
}
/**
* Method to lock all items returned in the lambda block. It will first all unlock all items
* that have been locked by the user.
*
* @param user The user making the request
* @param itemType The type of item that will be locked
* @param block The block of code to execute inbetween unlocking and locking items
* @param c The connection
* @return List of objects
*/
def withListLocking(user: User, itemType: Option[ItemType] = None)(
block: () => List[T]
)(implicit c: Option[Connection] = None): List[T] = {
this.withMRTransaction { implicit c =>
// if a user is requesting a task, then we can unlock all other tasks for that user, as only a single
// task can be locked at a time
this.unlockAllItems(user, itemType)
val results = block()
// once we have the tasks, we need to lock each one, if any fail to lock we just remove
// them from the list. A guest user will not lock any tasks, but when logged in will be
// required to refetch the current task, and if it is locked, then will have to get another
// task
if (!user.guest) {
val resultList = results.filter(lockItem(user, _) > 0)
if (resultList.isEmpty) {
List[T]()
}
resultList
} else {
results
}
}
}
/**
* Method to lock a single optional item returned in a lambda block. It will first unlock all items
* that have been locked by the user
*
* @param user The user making the request
* @param itemType The type of item that will be locked
* @param block The block of code to execute inbetween unlocking and locking items
* @param c The connection
* @return Option object
*/
def withSingleLocking(user: User, itemType: Option[ItemType] = None)(
block: () => Option[T]
)(implicit c: Option[Connection] = None): Option[T] = {
this.withMRTransaction { implicit c =>
// if a user is requesting a task, then we can unlock all other tasks for that user, as only a single
// task can be locked at a time
this.unlockAllItems(user, itemType)
val result = block()
if (!user.guest) {
result match {
case Some(r) => lockItem(user, r)
case None => // ignore
}
}
result
}
}
/**
* Locks an item in the database.
*
* @param user The user requesting the lock
* @param item The item wanting to be locked
* @param c A sql connection that is implicitly passed in from the calling function, this is an
* implicit function because this will always be called from within the code and never
* directly from an API call
* @return true if successful
*/
def lockItem(user: User, item: T)(implicit c: Option[Connection] = None): Int =
this.withMRTransaction { implicit c =>
// first check to see if the item is already locked
val checkQuery =
s"""SELECT user_id FROM locked WHERE item_id = {itemId} AND item_type = ${item.itemType.typeId} FOR UPDATE"""
SQL(checkQuery)
.on(Symbol("itemId") -> ParameterValue.toParameterValue(item.id)(p = keyToStatement))
.as(SqlParser.long("user_id").singleOpt) match {
case Some(id) =>
if (id == user.id) {
val query =
s"UPDATE locked SET locked_time = NOW() WHERE user_id = ${user.id} AND item_id = {itemId} AND item_type = ${item.itemType.typeId}"
SQL(query)
.on(Symbol("itemId") -> ParameterValue.toParameterValue(item.id)(p = keyToStatement))
.executeUpdate()
} else {
0
//throw new LockedException(s"Could not acquire lock on object [${item.id}, already locked by user [$id]")
}
case None =>
val query =
s"INSERT INTO locked (item_type, item_id, user_id) VALUES (${item.itemType.typeId}, {itemId}, ${user.id})"
SQL(query)
.on(Symbol("itemId") -> ParameterValue.toParameterValue(item.id)(p = keyToStatement))
.executeUpdate()
}
}
/**
* Unlocks all the items that are associated with the current user
*
* @param user The user
* @param c an implicit connection, this function should generally be executed in conjunction
* with other requests
* @return Number of locks removed
*/
def unlockAllItems(user: User, itemType: Option[ItemType] = None)(
implicit c: Option[Connection] = None
): Int =
this.withMRTransaction { implicit c =>
itemType match {
case Some(it) =>
SQL"""DELETE FROM locked WHERE user_id = ${user.id} AND item_type = ${it.typeId}"""
.executeUpdate()
case None =>
SQL"""DELETE FROM locked WHERE user_id = ${user.id}""".executeUpdate()
}
}
}
| Crashfreak/maproulette2 | app/org/maproulette/models/dal/mixin/Locking.scala | Scala | apache-2.0 | 8,494 |
import angular.AngularScala.Angular
import scala.scalajs.js
/**
* Created by pwootage on 8/2/14.
*/
package object angular extends js.GlobalScope {
val angular: Angular = ???
val bridge: js.Dynamic = ???
}
| Pwootage/angular-scala | src/main/scala/angular/package.scala | Scala | mit | 213 |
package org.mitre.mandolin.util
/**
* @author wellner
*/
object Sampling {
def sampleWithoutReplacement(ftable: Array[Int], n: Int, initialResults: List[Int]) : List[Int] = {
val ll = ftable.length
var na = initialResults.length
var results = initialResults
while (na < n) {
var ri = util.Random.nextInt(ll)
val toAdd = ftable(ri)
if (!results.contains(toAdd)) {
na += 1
results = toAdd :: results
}
}
results
}
/** Sample n indices, unique ranging from 0 to bigN-1, uniformly.
* This simple approach works well when n << bigN; O(n^2) technically */
def sampleWithoutReplacement(bigN: Int, n: Int, initialResults: List[Int]) : List[Int] = {
if ((n.toDouble / bigN) > 0.1) sampleWithoutReplacementSmall(bigN, n, initialResults) else {
var results: List[Int] = initialResults
val rv = util.Random
var m = 0
while (m < n) {
val id = util.Random.nextInt(bigN)
if (!results.contains(id)) {
m += 1
results = id :: results
}
}
results
}
}
def shuffle(a: Array[Int]) : Unit = {
val ll = a.length
var mx = ll
var i = 0; while (i < mx) {
if (util.Random.nextBoolean) {
val ni = util.Random.nextInt(ll - i)
val tt = a(i)
a(i) = a(ni)
a(ni) = tt
mx -= 1
}
i += 1
}
}
def sampleWithoutReplacementSmall(bigN: Int, n: Int, initialResults: List[Int] = Nil) : List[Int] = {
val els = (for (i <- 0 until bigN if !initialResults.contains(i)) yield i).toList
val elsArray = els.toArray
shuffle(elsArray)
var results = initialResults
for (i <- 0 until n) results = elsArray(i) :: results
results
}
} | project-mandolin/mandolin | mandolin-core/src/main/scala/org/mitre/mandolin/util/Sampling.scala | Scala | apache-2.0 | 1,750 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.api.generation.common
trait ConfigurationsBase {
val ignoredCols = ExplicitField("ignored_columns", "HasIgnoredCols", null, None, Some("HasIgnoredColsOnMOJO"))
val defaultValuesOfCommonParameters = Map(
"convertUnknownCategoricalLevelsToNa" -> false,
"convertInvalidNumbersToNa" -> false,
"validationDataFrame" -> null,
"splitRatio" -> 1.0,
"columnsToCategorical" -> Array.empty[String],
"keepBinaryModels" -> false,
"dataFrameSerializer" -> "ai.h2o.sparkling.utils.JSONDataFrameSerializer")
def algorithmConfiguration: Seq[AlgorithmSubstitutionContext] = Seq.empty
def parametersConfiguration: Seq[ParameterSubstitutionContext] = Seq.empty
def modelOutputConfiguration: Seq[ModelOutputSubstitutionContext] = Seq.empty
}
| h2oai/sparkling-water | api-generation/src/main/scala/ai/h2o/sparkling/api/generation/common/ConfigurationsBase.scala | Scala | apache-2.0 | 1,591 |
package bynull.functional.functional.idiom
import scala.language.higherKinds
/**
* Created by null on 7/9/16.
*/
object Idioms {
trait Monoid[T] {
/**
* Identity value
*/
def zero: T
/**
* Associative operation
* @param a parameter
* @param b parameter
* @return
*/
def op(a: T, b: T): T
}
trait Monad[M[_]] {
def apply[T](a: T): M[T]
def flatMap[T, U](m: M[T])(f: T=>M[U]): M[U]
}
}
| xnull/insight | scala/src/main/scala/bynull/functional/functional/idiom/Idioms.scala | Scala | apache-2.0 | 470 |
/**
* Copyright (c) 2014-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow
package storage
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
// Specs2
import org.specs2.mutable.Specification
import org.specs2.scalaz.ValidationMatchers
// Snowplow
import enrich.common.utils.ScalazJson4sUtils
/**
* Tests BadEventTransformer
*/
class BadEventTransformerSpec extends Specification with ValidationMatchers {
"The from method" should {
"successfully convert a bad event JSON to an ElasticsearchObject" in {
val input =
"""{"line":"failed","errors":["Record does not match Thrift SnowplowRawEvent schema"]}"""
val result = new BadEventTransformer("snowplow", "bad")
.fromClass(input -> JsonRecord(input, None).success)
val elasticsearchObject =
result._2.getOrElse(throw new RuntimeException("Bad event failed transformation"))
elasticsearchObject.getIndex must_== "snowplow"
elasticsearchObject.getType must_== "bad"
elasticsearchObject.getSource must_== input
}
}
}
| TimothyKlim/snowplow | 4-storage/kafka-elasticsearch-sink/src/test/scala/com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch/BadEventTransformerSpec.scala | Scala | apache-2.0 | 1,789 |
trait Foo[F[_]]
trait Bar[F[_], A]
trait Or[A, B]
class Test {
implicit def orFoo[A]: Foo[({type L[X] = Or[A, X]})#L] = ???
implicit def barFoo[F[_]](implicit f: Foo[F]): Foo[({type L[X] = Bar[F, X]})#L] = ???
// Now we can define a couple of type aliases:
type StringOr[X] = Or[String, X]
type BarStringOr[X] = Bar[StringOr, X]
// ok
implicitly[Foo[BarStringOr]]
barFoo[StringOr](null) : Foo[BarStringOr]
barFoo(null) : Foo[BarStringOr]
// nok
implicitly[Foo[({type L[X] = Bar[StringOr, X]})#L]]
// Let's write the application explicitly, and then
// compile with just this line enabled and -explaintypes.
barFoo(null) : Foo[({type L[X] = Bar[StringOr, X]})#L]
// Foo[[X]Bar[F,X]] <: Foo[[X]Bar[[X]Or[String,X],X]]?
// Bar[[X]Or[String,X],X] <: Bar[F,X]?
// F[_] <: Or[String,_]?
// false
// false
// false
// Note that the type annotation above is typechecked as
// Foo[[X]Bar[[X]Or[String,X],X]], ie the type alias `L`
// is eta expanded.
//
// This is done so that it does not escape its defining scope.
// However, one this is done, higher kinded inference
// no longer is able to unify F with `StringOr` (scala/bug#2712)
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t6895b.scala | Scala | apache-2.0 | 1,202 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
/**
* Provides three implicit methods that loosen the equality constraint defined by <code>TypeCheckedTripleEquals</code> or <code>ConversionCheckedTripleEquals</code>
* for Scala <code>Traversable</code>s to one that more closely matches Scala's approach to <code>Traversable</code> equality.
*
* <p>
* Scala's approach to <code>Traversable</code> equality is that if the objects being compared are ether both <code>Seq</code>s, both <code>Set</code>s,
* or both <code>Map</code>s, the elements are compared to determine equality.
* This means you could compare an immutable <code>Vector</code> and a mutable <code>ListBuffer</code> for equality, for instance, and get true so long as the
* two <code>Seq</code>s contained the same elements in the same order. Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> import scala.collection.mutable.ListBuffer
* import scala.collection.mutable.ListBuffer
*
* scala> Vector(1, 2) == ListBuffer(1, 2)
* res0: Boolean = true
* </pre>
*
* <p>
* Such a comparison would not, however, compile if you used <code>===</code> under either <code>TypeCheckedTripleEquals</code> or <code>ConversionCheckedTripleEquals</code>,
* because <code>Vector</code> and <code>ListBuffer</code> are not in a subtype/supertype relationship, nor does an implicit conversion by default exist between them:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalactic._
* import org.scalactic._
*
* scala> import TypeCheckedTripleEquals._
* import TypeCheckedTripleEquals._
*
* scala> Vector(1, 2) === ListBuffer(1, 2)
* <console>:16: error: types scala.collection.immutable.Vector[Int] and
* scala.collection.mutable.ListBuffer[Int] do not adhere to the equality constraint selected for
* the === and !== operators; the missing implicit parameter is of type
* org.scalactic.EqualityConstraint[scala.collection.immutable.Vector[Int],
* scala.collection.mutable.ListBuffer[Int]]
* Vector(1, 2) === ListBuffer(1, 2)
* ^
* </pre>
*
* <p>
* If you mix or import the implicit conversion provided by <code>TraversableEqualityConstraint</code>, however, the comparison will be allowed:
* </p>
*
* <pre class="stREPL">
* scala> import TraversableEqualityConstraints._
* import TraversableEqualityConstraints._
*
* scala> Vector(1, 2) === ListBuffer(1, 2)
* res2: Boolean = true
* </pre>
*
* <p>
* The equality constraints provided by this trait require that left and right sides are both subclasses of either <code>scala.collection.GenSeq</code>,
* <code>scala.collection.GenSet</code>, or <code>scala.collection.GenMap</code>, and that
* an <code>EqualityConstraint</code> can be found for the element types for <code>Seq</code> and <code>Set</code>, or the key and value types for <code>Map</code>s. In
* the example above, both the <code>Vector</code> and
* <code>ListBuffer</code> are subclasses of <code>scala.collection.GenSeq</code>, and the regular <code>TypeCheckedTripleEquals</code> provides equality
* constraints for the element types, both of which are <code>Int</code>. By contrast, this
* trait would not allow a <code>Vector[Int]</code> to be compared against a <code>ListBuffer[java.util.Date]</code>, because no equality constraint
* will exist between the element types <code>Int</code> and <code>Date</code>:
* </p>
*
* <pre class="stREPL">
* scala> import java.util.Date
* import java.util.Date
*
* scala> Vector(1, 2) === ListBuffer(new Date, new Date)
* <console>:20: error: types scala.collection.immutable.Vector[Int] and
* scala.collection.mutable.ListBuffer[java.util.Date] do not adhere to the equality constraint selected for
* the === and !== operators; the missing implicit parameter is of type
* org.scalactic.EqualityConstraint[scala.collection.immutable.Vector[Int],
* scala.collection.mutable.ListBuffer[java.util.Date]]
* Vector(1, 2) === ListBuffer(new Date, new Date)
* ^
* </pre>
*
* <p>
* This trait simply mixes together <a href="SeqEqualityConstraints.html"><code>SeqEqualityConstraints</code></a>,
* <a href="SetEqualityConstraints.html"><code>SetEqualityConstraints</code></a>,
* and <a href="MapEqualityConstraints.html"><code>MapEqualityConstraints</code></a>.
* </p>
*
* @author Bill Venners
*/
@deprecated("TraversableEqualityConstraints has been deprecated and will be removed in a future version of ScalaTest. You should be able to just remove all mentions of TraversableEqualityConstriants, as the contraints it provided have been added to the Constraint companion object.")
trait TraversableEqualityConstraints extends SeqEqualityConstraints with SetEqualityConstraints with MapEqualityConstraints
/**
* Companion object that facilitates the importing of <code>TraversableEqualityConstraints</code> members as
* an alternative to mixing it in. One use case is to import <code>TraversableEqualityConstraints</code> members so you can use
* them in the Scala interpreter.
*/
@deprecated("TraversableEqualityConstraints has been deprecated and will be removed in a future version of ScalaTest. You should be able to just remove all mentions of TraversableEqualityConstriants, as the contraints it provided have been added to the Constraint companion object.")
object TraversableEqualityConstraints extends TraversableEqualityConstraints
| travisbrown/scalatest | src/main/scala/org/scalactic/TraversableEqualityConstraints.scala | Scala | apache-2.0 | 6,059 |
package spinoco.protocol.websocket
import org.scalacheck.{Gen, Prop, Properties}
import org.scalacheck.Prop._
import scodec.{Attempt, DecodeResult, Err}
import scodec.bits.{BitVector, ByteVector}
import spinoco.protocol.websocket.codec.WebSocketFrameCodec
object WebSocketFrameSpec extends Properties("WebSocketFrame") {
def decode(data: ByteVector):Attempt[WebSocketFrame] =
WebSocketFrameCodec.codec.decodeValue(data.bits)
def decode(hex: String):Attempt[WebSocketFrame] =
decode( ByteVector.fromHex(hex).get)
def encode(frame: WebSocketFrame):Attempt[BitVector] =
WebSocketFrameCodec.codec.encode(frame)
def decodeAndEncode(hex: String)(expect: WebSocketFrame): Prop = {
val data = ByteVector.fromHex(hex).get
decode(data).flatMap { wsf =>
encode(wsf).map { bits =>
"Decode" |: (wsf ?= expect) && ("Encode" |: (bits.bytes ?= data) )
}}.fold(err => err.toString() |: falsified, identity)
}
property("single-frame-unmasked") = secure {
decodeAndEncode("810548656c6c6f")(
WebSocketFrame(
fin = true
, rsv = (false, false, false)
, opcode = OpCode.Text
, payload = ByteVector("Hello".getBytes)
, mask = None
)
)
}
property("single-frame-masked") = secure {
decodeAndEncode("818537fa213d7f9f4d5158")(
WebSocketFrame(
fin = true
, rsv = (false, false, false)
, opcode = OpCode.Text
, payload = ByteVector("Hello".getBytes)
, mask = Some(939139389)
)
)
}
property("fragmented-frame-unmasked") = secure {
decodeAndEncode("010348656c")(
WebSocketFrame(
fin = false
, rsv = (false, false, false)
, opcode = OpCode.Text
, payload = ByteVector("Hel".getBytes)
, mask = None
)
) && decodeAndEncode("80026c6f")(
WebSocketFrame(
fin = true
, rsv = (false, false, false)
, opcode = OpCode.Continuation
, payload = ByteVector("lo".getBytes)
, mask = None
)
)
}
property("unmasked-ping") = secure {
decodeAndEncode("890548656c6c6f")(
WebSocketFrame(
fin = true
, rsv = (false, false, false)
, opcode = OpCode.Ping
, payload = ByteVector("Hello".getBytes)
, mask = None
)
)
}
property("masked-pong") = secure {
decodeAndEncode("8a8537fa213d7f9f4d5158")(
WebSocketFrame(
fin = true
, rsv = (false, false, false)
, opcode = OpCode.Pong
, payload = ByteVector("Hello".getBytes)
, mask = Some(939139389)
)
)
}
private val payloadLengths = {
val specialLengths = Seq(124, 125, 126, 127, 128, 256, 257, 65535, 65536, Int.MaxValue)
Gen.chooseNum(0, Long.MaxValue, specialLengths.map(_.toLong): _*)
}
property("binary-bytes") = forAll(payloadLengths.filter(_ < BigInt(2).pow(17))) { (length: Long) =>
(length >= 0) ==> {
val prefix = {
def lengthToHex(numChars: Int) = s"%${numChars}x".format(length).replace(" ", "0")
if (length <= 125) "82" + lengthToHex(2)
else if (length <= 65535) "827e" + lengthToHex(4)
else "827f" + lengthToHex(16)
}
val data = "aa"*length.toInt
decodeAndEncode(prefix + data)(
WebSocketFrame(
fin = true
, rsv = (false, false, false)
, opcode = OpCode.Binary
, payload = ByteVector.fromHex(data).get
, mask = None
)
)
}
}
property("payload-length") = forAll(payloadLengths) { (length: Long) =>
(length >= 0) ==> {
val codec = WebSocketFrameCodec.impl.payloadLength
val bits = {
if (length <= 125) BitVector.fromInt(length.toInt, 7)
else if (length <= 65535) BitVector.fromInt(126, 7) ++ BitVector.fromInt(length.toInt, 16)
else BitVector.fromInt(127, 7) ++ BitVector.fromLong(length)
}
if (length <= Int.MaxValue) {
val decodedLength = codec.decode(bits)
val encodedBits = codec.encode(length.toInt)
val decode = "Decode" |: (decodedLength ?= Attempt.successful(DecodeResult(length.toInt, BitVector.empty)))
val encode = "Encode" |: (encodedBits ?= Attempt.successful(bits))
decode && encode
} else {
codec.decodeValue(bits) ?= Attempt.failure(Err(s"Max supported size is ${Int.MaxValue}, got $length"))
}
}
}
}
| Spinoco/protocol | websocket/src/test/scala/spinoco/protocol/websocket/WebSocketFrameSpec.scala | Scala | mit | 4,488 |
package ru.org.codingteam.horta.plugins.pet.commands
import akka.actor.ActorRef
import ru.org.codingteam.horta.plugins.pet.{PtcUtils, PetData}
import ru.org.codingteam.horta.security.Credential
class RatingCommand extends AbstractCommand {
override def apply(pet: PetData, coins: ActorRef, credential: Credential, args: Array[String]): (PetData, String) = {
val coinData = PtcUtils.queryPTC(coins)
val users = coinData.toStream.sortBy(-_._2).take(10).filter(_._2 > 0)
val response = "\\n" + users.map(user => {
val name = user._1
val amount = user._2
s"$name: ${amount}PTC"
}).mkString("\\n")
(pet, response)
}
}
| codingteam/horta-hell | src/main/scala/ru/org/codingteam/horta/plugins/pet/commands/RatingCommand.scala | Scala | mit | 657 |
package beam.agentsim.agents
import akka.actor.{Actor, ActorRef, ActorSystem, PoisonPill, Props}
import akka.testkit.TestActors.ForwardActor
import akka.testkit.{ImplicitSender, TestActorRef, TestFSMRef, TestKitBase, TestProbe}
import beam.agentsim.agents.PersonTestUtil._
import beam.agentsim.agents.choice.mode.ModeChoiceUniformRandom
import beam.agentsim.agents.household.HouseholdActor.HouseholdActor
import beam.agentsim.agents.modalbehaviors.DrivesVehicle.{AlightVehicleTrigger, BoardVehicleTrigger}
import beam.agentsim.agents.ridehail.{RideHailRequest, RideHailResponse}
import beam.agentsim.agents.vehicles.{ReservationRequest, ReservationResponse, ReserveConfirmInfo, _}
import beam.agentsim.events._
import beam.agentsim.infrastructure.{TrivialParkingManager, ZonalParkingManager}
import beam.agentsim.scheduler.BeamAgentScheduler
import beam.agentsim.scheduler.BeamAgentScheduler.{CompletionNotice, ScheduleTrigger, SchedulerProps, StartSchedule}
import beam.router.BeamRouter._
import beam.router.Modes.BeamMode
import beam.router.Modes.BeamMode.{RIDE_HAIL, RIDE_HAIL_TRANSIT, TRANSIT, WALK, WALK_TRANSIT}
import beam.router.model.RoutingModel.TransitStopsInfo
import beam.router.model.{EmbodiedBeamLeg, _}
import beam.router.osm.TollCalculator
import beam.router.{BeamSkimmer, RouteHistory, TravelTimeObserved}
import beam.utils.TestConfigUtils.testConfig
import beam.utils.{SimRunnerForTest, StuckFinder, TestConfigUtils}
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.config.ConfigFactory
import com.vividsolutions.jts.geom.Envelope
import org.matsim.api.core.v01.events._
import org.matsim.api.core.v01.network.Link
import org.matsim.api.core.v01.{Coord, Id}
import org.matsim.core.api.experimental.events.{EventsManager, TeleportationArrivalEvent}
import org.matsim.core.config.ConfigUtils
import org.matsim.core.events.EventsManagerImpl
import org.matsim.core.events.handler.BasicEventHandler
import org.matsim.core.population.PopulationUtils
import org.matsim.core.population.routes.RouteUtils
import org.matsim.households.{Household, HouseholdsFactoryImpl}
import org.scalatest.FunSpecLike
import org.scalatest.mockito.MockitoSugar
import scala.collection.{mutable, JavaConverters}
class PersonAgentSpec
extends FunSpecLike
with TestKitBase
with SimRunnerForTest
with MockitoSugar
with ImplicitSender
with BeamvilleFixtures {
lazy val config: Config = ConfigFactory
.parseString(
"""
akka.log-dead-letters = 10
akka.actor.debug.fsm = true
akka.loglevel = debug
"""
)
.withFallback(testConfig("test/input/beamville/beam.conf"))
.resolve()
lazy implicit val system: ActorSystem = ActorSystem("PersonAgentSpec", config)
override def outputDirPath: String = TestConfigUtils.testOutputDir
private val householdsFactory: HouseholdsFactoryImpl = new HouseholdsFactoryImpl()
private lazy val modeChoiceCalculator = new ModeChoiceUniformRandom(beamConfig)
// Mock a transit driver (who has to be a child of a mock router)
private lazy val transitDriverProps = Props(new ForwardActor(self))
describe("A PersonAgent") {
val hoseHoldDummyId = Id.create("dummy", classOf[Household])
it("should allow scheduler to set the first activity") {
val eventsManager = new EventsManagerImpl()
eventsManager.addHandler(
new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
self ! event
}
}
)
val scheduler =
TestActorRef[BeamAgentScheduler](
SchedulerProps(
beamConfig,
stopTick = 11,
maxWindow = 10,
new StuckFinder(beamConfig.beam.debug.stuckAgentDetection)
)
)
val parkingManager = system.actorOf(Props(new TrivialParkingManager))
val household = householdsFactory.createHousehold(hoseHoldDummyId)
val person = PopulationUtils.getFactory.createPerson(Id.createPersonId("dummyAgent"))
putDefaultBeamAttributes(person, Vector(WALK))
val homeActivity = PopulationUtils.createActivityFromLinkId("home", Id.createLinkId(1))
homeActivity.setStartTime(1.0)
homeActivity.setEndTime(10.0)
val plan = PopulationUtils.getFactory.createPlan()
plan.addActivity(homeActivity)
person.addPlan(plan)
val personAgentRef = TestFSMRef(
new PersonAgent(
scheduler,
services,
beamScenario,
modeChoiceCalculator,
beamScenario.transportNetwork,
self,
self,
eventsManager,
Id.create("dummyAgent", classOf[PersonAgent]),
plan,
parkingManager,
services.tollCalculator,
self,
beamSkimmer = new BeamSkimmer(beamScenario, services.geo),
routeHistory = new RouteHistory(beamConfig),
travelTimeObserved = new TravelTimeObserved(beamScenario, services.geo),
boundingBox = boundingBox
)
)
watch(personAgentRef)
scheduler ! ScheduleTrigger(InitializeTrigger(0), personAgentRef)
scheduler ! StartSchedule(0)
expectTerminated(personAgentRef)
expectMsg(CompletionNotice(0, Vector()))
}
// Hopefully deterministic test, where we mock a router and give the agent just one option for its trip.
it("should demonstrate a complete trip, throwing MATSim events") {
val eventsManager = new EventsManagerImpl()
eventsManager.addHandler(
new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
self ! event
}
}
)
val household = householdsFactory.createHousehold(hoseHoldDummyId)
val population = PopulationUtils.createPopulation(matsimConfig)
val person = PopulationUtils.getFactory.createPerson(Id.createPersonId("dummyAgent"))
putDefaultBeamAttributes(person, Vector(RIDE_HAIL, RIDE_HAIL_TRANSIT, WALK))
val plan = PopulationUtils.getFactory.createPlan()
val homeActivity = PopulationUtils.createActivityFromLinkId("home", Id.createLinkId(1))
homeActivity.setEndTime(28800) // 8:00:00 AM
plan.addActivity(homeActivity)
val workActivity = PopulationUtils.createActivityFromLinkId("work", Id.createLinkId(2))
plan.addActivity(workActivity)
person.addPlan(plan)
population.addPerson(person)
household.setMemberIds(JavaConverters.bufferAsJavaList(mutable.Buffer(person.getId)))
val scheduler = TestActorRef[BeamAgentScheduler](
SchedulerProps(
beamConfig,
stopTick = 1000000,
maxWindow = 10,
new StuckFinder(beamConfig.beam.debug.stuckAgentDetection)
)
)
val parkingManager = system.actorOf(Props(new TrivialParkingManager))
val householdActor = TestActorRef[HouseholdActor](
new HouseholdActor(
services,
beamScenario,
_ => modeChoiceCalculator,
scheduler,
beamScenario.transportNetwork,
services.tollCalculator,
self,
self,
parkingManager,
eventsManager,
population,
household,
Map(),
new Coord(0.0, 0.0),
Vector(),
new RouteHistory(beamConfig),
new BeamSkimmer(beamScenario, services.geo),
new TravelTimeObserved(beamScenario, services.geo),
boundingBox
)
)
scheduler ! ScheduleTrigger(InitializeTrigger(0), householdActor)
scheduler ! StartSchedule(0)
// The agent will ask for a ride, and we will answer.
val inquiry = expectMsgType[RideHailRequest]
lastSender ! RideHailResponse(inquiry, None, None)
// This is the ridehail to transit request.
// We don't provide an option.
val request1 = expectMsgType[RoutingRequest]
assert(request1.streetVehiclesUseIntermodalUse == AccessAndEgress)
lastSender ! RoutingResponse(
itineraries = Vector(),
requestId = request1.requestId
)
// This is the regular routing request.
// We provide an option.
val request2 = expectMsgType[RoutingRequest]
assert(request2.streetVehiclesUseIntermodalUse == Access)
lastSender ! RoutingResponse(
itineraries = Vector(
EmbodiedBeamTrip(
legs = Vector(
EmbodiedBeamLeg(
beamLeg = BeamLeg(
startTime = 28800,
mode = BeamMode.WALK,
duration = 100,
travelPath = BeamPath(
linkIds = Vector(1, 2),
linkTravelTime = Vector(50, 50),
transitStops = None,
startPoint = SpaceTime(0.0, 0.0, 28800),
endPoint = SpaceTime(1.0, 1.0, 28900),
distanceInM = 1000D
)
),
beamVehicleId = Id.createVehicleId("body-dummyAgent"),
Id.create("BODY-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = true,
cost = 0.0,
unbecomeDriverOnCompletion = true
)
)
)
),
requestId = request2.requestId
)
expectMsgType[ModeChoiceEvent]
expectMsgType[ActivityEndEvent]
expectMsgType[PersonDepartureEvent]
expectMsgType[PersonEntersVehicleEvent]
expectMsgType[VehicleEntersTrafficEvent]
expectMsgType[VehicleLeavesTrafficEvent]
expectMsgType[PathTraversalEvent]
expectMsgType[PersonLeavesVehicleEvent]
expectMsgType[TeleportationArrivalEvent]
expectMsgType[PersonArrivalEvent]
expectMsgType[ActivityStartEvent]
expectMsgType[CompletionNotice]
}
it("should know how to take a walk_transit trip when it's already in its plan") {
val busId = Id.createVehicleId("bus:B3-WEST-1-175")
val tramId = Id.createVehicleId("train:R2-SOUTH-1-93")
val iteration: ActorRef = TestActorRef(
Props(new Actor() {
context.actorOf(
Props(new Actor() {
context.actorOf(transitDriverProps, "TransitDriverAgent-" + busId.toString)
context.actorOf(transitDriverProps, "TransitDriverAgent-" + tramId.toString)
override def receive: Receive = Actor.emptyBehavior
}),
"transit-system"
)
override def receive: Receive = Actor.emptyBehavior
}),
"BeamMobsim.iteration"
)
// In this tests, it's not easy to chronologically sort Events vs. Triggers/Messages
// that we are expecting. And also not necessary in real life.
// So we put the Events on a separate channel to avoid a non-deterministically failing test.
val events = new TestProbe(system)
val eventsManager: EventsManager = new EventsManagerImpl()
eventsManager.addHandler(
new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
events.ref ! event
}
}
)
val busLeg = EmbodiedBeamLeg(
BeamLeg(
startTime = 28800,
mode = BeamMode.BUS,
duration = 600,
travelPath = BeamPath(
Vector(),
Vector(),
Some(TransitStopsInfo(1, busId, 2)),
SpaceTime(services.geo.utm2Wgs(new Coord(166321.9, 1568.87)), 28800),
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 29400),
1.0
)
),
beamVehicleId = busId,
Id.create("TRANSIT-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = false,
cost = 2.75,
unbecomeDriverOnCompletion = false
)
val busLeg2 = EmbodiedBeamLeg(
beamLeg = BeamLeg(
startTime = 29400,
mode = BeamMode.BUS,
duration = 600,
travelPath = BeamPath(
Vector(),
Vector(),
Some(TransitStopsInfo(2, busId, 3)),
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 29400),
SpaceTime(services.geo.utm2Wgs(new Coord(180000.4, 1200)), 30000),
1.0
)
),
beamVehicleId = busId,
Id.create("TRANSIT-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = false,
cost = 0.0,
unbecomeDriverOnCompletion = false
)
val tramLeg = EmbodiedBeamLeg(
beamLeg = BeamLeg(
startTime = 30000,
mode = BeamMode.TRAM,
duration = 600,
travelPath = BeamPath(
linkIds = Vector(),
linkTravelTime = Vector(),
transitStops = Some(TransitStopsInfo(3, tramId, 4)),
startPoint = SpaceTime(services.geo.utm2Wgs(new Coord(180000.4, 1200)), 30000),
endPoint = SpaceTime(services.geo.utm2Wgs(new Coord(190000.4, 1300)), 30600),
distanceInM = 1.0
)
),
beamVehicleId = tramId,
Id.create("TRANSIT-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = false,
cost = 1.0, // $1 fare
unbecomeDriverOnCompletion = false
)
val household = householdsFactory.createHousehold(hoseHoldDummyId)
val population = PopulationUtils.createPopulation(ConfigUtils.createConfig())
val person = PopulationUtils.getFactory.createPerson(Id.createPersonId("dummyAgent"))
putDefaultBeamAttributes(person, Vector(WALK_TRANSIT))
val plan = PopulationUtils.getFactory.createPlan()
val homeActivity = PopulationUtils.createActivityFromCoord("home", new Coord(166321.9, 1568.87))
homeActivity.setEndTime(28800) // 8:00:00 AM
plan.addActivity(homeActivity)
val leg = PopulationUtils.createLeg("walk_transit")
val route = RouteUtils.createLinkNetworkRouteImpl(
Id.createLinkId(1),
Array[Id[Link]](),
Id.createLinkId(2)
)
leg.setRoute(route)
plan.addLeg(leg)
val workActivity = PopulationUtils.createActivityFromCoord("work", new Coord(167138.4, 1117))
workActivity.setEndTime(61200) //5:00:00 PM
plan.addActivity(workActivity)
person.addPlan(plan)
population.addPerson(person)
household.setMemberIds(JavaConverters.bufferAsJavaList(mutable.Buffer(person.getId)))
val scheduler = TestActorRef[BeamAgentScheduler](
SchedulerProps(
beamConfig,
stopTick = 1000000,
maxWindow = 10,
new StuckFinder(beamConfig.beam.debug.stuckAgentDetection)
)
)
val parkingManager = system.actorOf(Props(new TrivialParkingManager))
val householdActor = TestActorRef[HouseholdActor](
new HouseholdActor(
beamServices = services,
beamScenario,
modeChoiceCalculatorFactory = _ => modeChoiceCalculator,
schedulerRef = scheduler,
transportNetwork = beamScenario.transportNetwork,
services.tollCalculator,
router = self,
rideHailManager = self,
parkingManager = parkingManager,
eventsManager = eventsManager,
population = population,
household = household,
vehicles = Map(),
homeCoord = new Coord(0.0, 0.0),
Vector(),
new RouteHistory(beamConfig),
new BeamSkimmer(beamScenario, services.geo),
new TravelTimeObserved(beamScenario, services.geo),
boundingBox
)
)
scheduler ! ScheduleTrigger(InitializeTrigger(0), householdActor)
scheduler ! StartSchedule(0)
expectMsgType[RoutingRequest]
val personActor = lastSender
lastSender ! RoutingResponse(
itineraries = Vector(
EmbodiedBeamTrip(
legs = Vector(
EmbodiedBeamLeg(
beamLeg = BeamLeg(
startTime = 28800,
mode = BeamMode.WALK,
duration = 0,
travelPath = BeamPath(
linkIds = Vector(),
linkTravelTime = Vector(),
transitStops = None,
startPoint = SpaceTime(services.geo.utm2Wgs(new Coord(166321.9, 1568.87)), 28800),
endPoint = SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 28800),
distanceInM = 1D
)
),
beamVehicleId = Id.createVehicleId("body-dummyAgent"),
Id.create("TRANSIT-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = true,
cost = 0.0,
unbecomeDriverOnCompletion = false
),
busLeg,
busLeg2,
tramLeg,
EmbodiedBeamLeg(
beamLeg = BeamLeg(
startTime = 30600,
mode = BeamMode.WALK,
duration = 0,
travelPath = BeamPath(
linkIds = Vector(),
linkTravelTime = Vector(),
transitStops = None,
startPoint = SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 30600),
endPoint = SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 30600),
distanceInM = 1D
)
),
beamVehicleId = Id.createVehicleId("body-dummyAgent"),
Id.create("TRANSIT-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = true,
cost = 0.0,
unbecomeDriverOnCompletion = false
)
)
)
),
requestId = 1
)
events.expectMsgType[ModeChoiceEvent]
events.expectMsgType[ActivityEndEvent]
events.expectMsgType[PersonDepartureEvent]
events.expectMsgType[PersonEntersVehicleEvent]
events.expectMsgType[VehicleEntersTrafficEvent]
events.expectMsgType[VehicleLeavesTrafficEvent]
events.expectMsgType[PathTraversalEvent]
val reservationRequestBus = expectMsgType[ReservationRequest]
scheduler ! ScheduleTrigger(
BoardVehicleTrigger(28800, busLeg.beamVehicleId),
personActor
)
scheduler ! ScheduleTrigger(
AlightVehicleTrigger(30000, busLeg.beamVehicleId),
personActor
)
lastSender ! ReservationResponse(
reservationRequestBus.requestId,
Right(
ReserveConfirmInfo(
busLeg.beamLeg,
busLeg2.beamLeg,
reservationRequestBus.passengerVehiclePersonId
)
),
TRANSIT
)
events.expectMsgType[PersonEntersVehicleEvent]
events.expectMsgType[AgencyRevenueEvent]
events.expectMsgType[PersonCostEvent]
//Generating 1 event of PersonCost having 0.0 cost in between PersonEntersVehicleEvent & PersonLeavesVehicleEvent
events.expectMsgType[PersonLeavesVehicleEvent]
val reservationRequestTram = expectMsgType[ReservationRequest]
lastSender ! ReservationResponse(
reservationRequestTram.requestId,
Right(
ReserveConfirmInfo(
tramLeg.beamLeg,
tramLeg.beamLeg,
reservationRequestTram.passengerVehiclePersonId,
Vector(
ScheduleTrigger(
BoardVehicleTrigger(
30000,
tramLeg.beamVehicleId
),
personActor
),
ScheduleTrigger(
AlightVehicleTrigger(
32000,
tramLeg.beamVehicleId
),
personActor
) // My tram is late!
)
)
),
TRANSIT
)
//expects a message of type PersonEntersVehicleEvent
events.expectMsgType[PersonEntersVehicleEvent]
events.expectMsgType[AgencyRevenueEvent]
events.expectMsgType[PersonCostEvent]
events.expectMsgType[PersonLeavesVehicleEvent]
events.expectMsgType[VehicleEntersTrafficEvent]
events.expectMsgType[VehicleLeavesTrafficEvent]
events.expectMsgType[PathTraversalEvent]
events.expectMsgType[TeleportationArrivalEvent]
events.expectMsgType[PersonArrivalEvent]
events.expectMsgType[ActivityStartEvent]
expectMsgType[CompletionNotice]
iteration ! PoisonPill
}
it("should also work when the first bus is late") {
val eventsManager = new EventsManagerImpl()
val events = new TestProbe(system)
eventsManager.addHandler(new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
events.ref ! event
}
})
val transitDriverProps = Props(new ForwardActor(self))
val busId = Id.createVehicleId("bus:B3-WEST-1-175")
val tramId = Id.createVehicleId("train:R2-SOUTH-1-93")
val iteration: ActorRef = TestActorRef(
Props(new Actor() {
context.actorOf(
Props(new Actor() {
context.actorOf(transitDriverProps, "TransitDriverAgent-" + busId.toString)
context.actorOf(transitDriverProps, "TransitDriverAgent-" + tramId.toString)
override def receive: Receive = Actor.emptyBehavior
}),
"transit-system"
)
override def receive: Receive = Actor.emptyBehavior
}),
"BeamMobsim.iteration"
)
val busLeg = EmbodiedBeamLeg(
BeamLeg(
28800,
BeamMode.BUS,
600,
BeamPath(
Vector(),
Vector(),
Some(TransitStopsInfo(1, busId, 2)),
SpaceTime(services.geo.utm2Wgs(new Coord(166321.9, 1568.87)), 28800),
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 29400),
1.0
)
),
busId,
Id.create("beamVilleCar", classOf[BeamVehicleType]),
asDriver = false,
0,
unbecomeDriverOnCompletion = false
)
val busLeg2 = EmbodiedBeamLeg(
BeamLeg(
29400,
BeamMode.BUS,
600,
BeamPath(
Vector(),
Vector(),
Some(TransitStopsInfo(2, busId, 3)),
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 29400),
SpaceTime(services.geo.utm2Wgs(new Coord(180000.4, 1200)), 30000),
1.0
)
),
busId,
Id.create("beamVilleCar", classOf[BeamVehicleType]),
asDriver = false,
0,
unbecomeDriverOnCompletion = false
)
val tramLeg = EmbodiedBeamLeg(
BeamLeg(
30000,
BeamMode.TRAM,
600,
BeamPath(
Vector(),
Vector(),
Some(TransitStopsInfo(3, tramId, 4)),
SpaceTime(services.geo.utm2Wgs(new Coord(180000.4, 1200)), 30000),
SpaceTime(services.geo.utm2Wgs(new Coord(190000.4, 1300)), 30600),
1.0
)
),
tramId,
Id.create("beamVilleCar", classOf[BeamVehicleType]),
asDriver = false,
0,
unbecomeDriverOnCompletion = false
)
val replannedTramLeg = EmbodiedBeamLeg(
BeamLeg(
35000,
BeamMode.TRAM,
600,
BeamPath(
Vector(),
Vector(),
Some(TransitStopsInfo(3, tramId, 4)),
SpaceTime(services.geo.utm2Wgs(new Coord(180000.4, 1200)), 35000),
SpaceTime(services.geo.utm2Wgs(new Coord(190000.4, 1300)), 35600),
1.0
)
),
tramId,
Id.create("beamVilleCar", classOf[BeamVehicleType]),
asDriver = false,
0,
unbecomeDriverOnCompletion = false
)
val household = householdsFactory.createHousehold(Id.create("dummy", classOf[Household]))
val population = PopulationUtils.createPopulation(ConfigUtils.createConfig())
val person = PopulationUtils.getFactory.createPerson(Id.createPersonId("dummyAgent"))
putDefaultBeamAttributes(person, Vector(WALK_TRANSIT))
val plan = PopulationUtils.getFactory.createPlan()
val homeActivity = PopulationUtils.createActivityFromCoord("home", new Coord(166321.9, 1568.87))
homeActivity.setEndTime(28800) // 8:00:00 AM
plan.addActivity(homeActivity)
val leg = PopulationUtils.createLeg("walk_transit")
val route = RouteUtils.createLinkNetworkRouteImpl(
Id.createLinkId(1),
Array[Id[Link]](),
Id.createLinkId(2)
)
leg.setRoute(route)
plan.addLeg(leg)
val workActivity = PopulationUtils.createActivityFromCoord("work", new Coord(167138.4, 1117))
workActivity.setEndTime(61200) //5:00:00 PM
plan.addActivity(workActivity)
person.addPlan(plan)
population.addPerson(person)
household.setMemberIds(JavaConverters.bufferAsJavaList(mutable.Buffer(person.getId)))
val scheduler = TestActorRef[BeamAgentScheduler](
SchedulerProps(
beamConfig,
stopTick = 1000000,
maxWindow = 10,
new StuckFinder(beamConfig.beam.debug.stuckAgentDetection)
)
)
val parkingManager = system.actorOf(
ZonalParkingManager.props(beamConfig, beamScenario.tazTreeMap, services.geo, services.beamRouter, boundingBox),
"ParkingManager"
)
val householdActor = TestActorRef[HouseholdActor](
new HouseholdActor(
services,
beamScenario,
_ => modeChoiceCalculator,
scheduler,
beamScenario.transportNetwork,
new TollCalculator(services.beamConfig),
self,
self,
parkingManager,
eventsManager,
population,
household,
Map(),
new Coord(0.0, 0.0),
Vector(),
new RouteHistory(beamConfig),
new BeamSkimmer(beamScenario, services.geo),
new TravelTimeObserved(beamScenario, services.geo),
boundingBox
)
)
scheduler ! ScheduleTrigger(InitializeTrigger(0), householdActor)
scheduler ! StartSchedule(0)
expectMsgType[RoutingRequest]
val personActor = lastSender
scheduler ! ScheduleTrigger(
BoardVehicleTrigger(28800, busLeg.beamVehicleId),
personActor
)
scheduler ! ScheduleTrigger(
AlightVehicleTrigger(34400, busLeg.beamVehicleId),
personActor
)
lastSender ! RoutingResponse(
Vector(
EmbodiedBeamTrip(
Vector(
EmbodiedBeamLeg(
BeamLeg(
28800,
BeamMode.WALK,
0,
BeamPath(
Vector(),
Vector(),
None,
SpaceTime(services.geo.utm2Wgs(new Coord(166321.9, 1568.87)), 28800),
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 28800),
1.0
)
),
Id.createVehicleId("body-dummyAgent"),
Id.create("BODY-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = true,
0,
unbecomeDriverOnCompletion = false
),
busLeg,
busLeg2,
tramLeg,
EmbodiedBeamLeg(
BeamLeg(
30600,
BeamMode.WALK,
0,
BeamPath(
Vector(),
Vector(),
None,
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 30600),
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 30600),
1.0
)
),
Id.createVehicleId("body-dummyAgent"),
Id.create("BODY-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = true,
0,
unbecomeDriverOnCompletion = false
)
)
)
),
requestId = 1
)
events.expectMsgType[ModeChoiceEvent]
events.expectMsgType[ActivityEndEvent]
events.expectMsgType[PersonDepartureEvent]
events.expectMsgType[PersonEntersVehicleEvent]
events.expectMsgType[VehicleEntersTrafficEvent]
events.expectMsgType[VehicleLeavesTrafficEvent]
events.expectMsgType[PathTraversalEvent]
val reservationRequestBus = expectMsgType[ReservationRequest]
lastSender ! ReservationResponse(
reservationRequestBus.requestId,
Right(
ReserveConfirmInfo(
busLeg.beamLeg,
busLeg2.beamLeg,
reservationRequestBus.passengerVehiclePersonId
)
),
TRANSIT
)
events.expectMsgType[PersonEntersVehicleEvent]
//Generating 2 events of PersonCost having 0.0 cost in between PersonEntersVehicleEvent & PersonLeavesVehicleEvent
val personLeavesVehicleEvent = events.expectMsgType[PersonLeavesVehicleEvent]
assert(personLeavesVehicleEvent.getTime == 34400.0)
events.expectMsgType[ReplanningEvent]
expectMsgType[RoutingRequest]
lastSender ! RoutingResponse(
Vector(
EmbodiedBeamTrip(
Vector(
replannedTramLeg,
EmbodiedBeamLeg(
BeamLeg(
35600,
BeamMode.WALK,
0,
BeamPath(
Vector(),
Vector(),
None,
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 35600),
SpaceTime(services.geo.utm2Wgs(new Coord(167138.4, 1117)), 35600),
1.0
)
),
Id.createVehicleId("body-dummyAgent"),
Id.create("BODY-TYPE-DEFAULT", classOf[BeamVehicleType]),
asDriver = true,
0,
unbecomeDriverOnCompletion = false
)
)
)
),
1
)
events.expectMsgType[ModeChoiceEvent]
// Person first does the dummy walk leg
events.expectMsgType[VehicleEntersTrafficEvent]
events.expectMsgType[VehicleLeavesTrafficEvent]
events.expectMsgType[PathTraversalEvent]
val reservationRequestTram = expectMsgType[ReservationRequest]
lastSender ! ReservationResponse(
reservationRequestTram.requestId,
Right(
ReserveConfirmInfo(
tramLeg.beamLeg,
tramLeg.beamLeg,
reservationRequestBus.passengerVehiclePersonId,
Vector(
ScheduleTrigger(
BoardVehicleTrigger(
35000,
replannedTramLeg.beamVehicleId
),
personActor
),
ScheduleTrigger(
AlightVehicleTrigger(
40000,
replannedTramLeg.beamVehicleId
),
personActor
) // My tram is late!
)
)
),
TRANSIT
)
events.expectMsgType[PersonEntersVehicleEvent]
//Generating 2 events of PersonCost having 0.0 cost in between PersonEntersVehicleEvent & PersonLeavesVehicleEvent
events.expectMsgType[PersonLeavesVehicleEvent]
events.expectMsgType[VehicleEntersTrafficEvent]
events.expectMsgType[VehicleLeavesTrafficEvent]
events.expectMsgType[PathTraversalEvent]
events.expectMsgType[TeleportationArrivalEvent]
events.expectMsgType[PersonArrivalEvent]
events.expectMsgType[ActivityStartEvent]
expectMsgType[CompletionNotice]
iteration ! PoisonPill
}
}
override def afterAll(): Unit = {
shutdown()
super.afterAll()
}
}
| colinsheppard/beam | src/test/scala/beam/agentsim/agents/PersonAgentSpec.scala | Scala | gpl-3.0 | 32,112 |
package com.sksamuel.elastic4s.search
import com.sksamuel.elastic4s.testkit.ElasticSugar
import org.scalatest.{Matchers, WordSpec}
class SearchShowTest extends WordSpec with Matchers with ElasticSugar {
"Search" should {
"have a show typeclass implementation" in {
val request = {
search in "gameofthrones" / "characters" query {
bool {
should {
termQuery("name", "snow")
}.must {
matchQuery("location", "the wall")
}
}
}
}
request.show.trim shouldBe
"""{
| "query" : {
| "bool" : {
| "must" : [
| {
| "match" : {
| "location" : {
| "query" : "the wall",
| "operator" : "OR",
| "prefix_length" : 0,
| "max_expansions" : 50,
| "fuzzy_transpositions" : true,
| "lenient" : false,
| "zero_terms_query" : "NONE",
| "boost" : 1.0
| }
| }
| }
| ],
| "should" : [
| {
| "term" : {
| "name" : {
| "value" : "snow",
| "boost" : 1.0
| }
| }
| }
| ],
| "disable_coord" : false,
| "adjust_pure_negative" : true,
| "boost" : 1.0
| }
| },
| "ext" : { }
|}""".stripMargin.trim
}
}
}
| ulric260/elastic4s | elastic4s-core-tests/src/test/scala/com/sksamuel/elastic4s/search/SearchShowTest.scala | Scala | apache-2.0 | 1,743 |
import sbt._
import Keys._
object Dependencies {
val seleniumVersion = "2.21.0"
val selenium = Seq(
"org.seleniumhq.selenium" % "selenium-firefox-driver" % seleniumVersion,
"org.seleniumhq.selenium" % "selenium-server" % seleniumVersion
)
val jodaTime = Seq(
"org.joda" % "joda-convert" % "1.2",
"joda-time" % "joda-time" % "2.0"
)
val slf4j = "org.slf4j" % "slf4j-api" % "1.6.4"
val logback = "ch.qos.logback" % "logback-classic" % "1.0.0"
val scalaTest = "org.scalatest" %% "scalatest" % "1.7.1" % "test"
}
| mlegac/selenate | code/scala/project/Dependencies.scala | Scala | bsd-3-clause | 555 |
package io.iohk.ethereum
import java.math.BigInteger
import java.security.SecureRandom
import akka.util.ByteString
import io.iohk.ethereum.blockchain.sync.StateSyncUtils.MptNodeData
import io.iohk.ethereum.crypto.ECDSASignature
import io.iohk.ethereum.domain.BlockHeader.HeaderExtraFields
import io.iohk.ethereum.domain.BlockHeader.HeaderExtraFields._
import io.iohk.ethereum.domain._
import io.iohk.ethereum.mpt.HexPrefix.bytesToNibbles
import io.iohk.ethereum.mpt.{BranchNode, ExtensionNode, HashNode, LeafNode, MptNode, MptTraversals}
import io.iohk.ethereum.network.p2p.messages.CommonMessages.NewBlock
import io.iohk.ethereum.network.p2p.messages.PV64
import org.bouncycastle.crypto.AsymmetricCipherKeyPair
import org.scalacheck.{Arbitrary, Gen, Shrink}
// scalastyle:off number.of.methods
trait ObjectGenerators {
def noShrink[T]: Shrink[T] = Shrink[T](_ => Stream.empty)
def byteGen: Gen[Byte] = Gen.choose(Byte.MinValue, Byte.MaxValue)
def shortGen: Gen[Short] = Gen.choose(Short.MinValue, Short.MaxValue)
def intGen(min: Int, max: Int): Gen[Int] = Gen.choose(min, max)
def intGen: Gen[Int] = Gen.choose(Int.MinValue, Int.MaxValue)
def longGen: Gen[Long] = Gen.choose(Long.MinValue, Long.MaxValue)
def bigIntGen: Gen[BigInt] = byteArrayOfNItemsGen(32).map(b => new BigInteger(1, b))
def randomSizeByteArrayGen(minSize: Int, maxSize: Int): Gen[Array[Byte]] =
Gen.choose(minSize, maxSize).flatMap(byteArrayOfNItemsGen(_))
def byteArrayOfNItemsGen(n: Int): Gen[Array[Byte]] = Gen.listOfN(n, Arbitrary.arbitrary[Byte]).map(_.toArray)
def randomSizeByteStringGen(minSize: Int, maxSize: Int): Gen[ByteString] =
Gen.choose(minSize, maxSize).flatMap(byteStringOfLengthNGen)
def byteStringOfLengthNGen(n: Int): Gen[ByteString] = byteArrayOfNItemsGen(n).map(ByteString(_))
def seqByteStringOfNItemsGen(n: Int): Gen[Seq[ByteString]] = Gen.listOf(byteStringOfLengthNGen(n))
def hexPrefixDecodeParametersGen(): Gen[(Array[Byte], Boolean)] = {
for {
aByteList <- Gen.nonEmptyListOf(Arbitrary.arbitrary[Byte])
t <- Arbitrary.arbitrary[Boolean]
} yield (aByteList.toArray, t)
}
def keyValueListGen(): Gen[List[(Int, Int)]] = {
for {
aKeyList <- Gen.nonEmptyListOf(Arbitrary.arbitrary[Int]).map(_.distinct)
} yield aKeyList.zip(aKeyList)
}
def keyValueByteStringGen(size: Int): Gen[List[(ByteString, Array[Byte])]] = {
for {
byteStringList <- Gen.nonEmptyListOf(byteStringOfLengthNGen(size))
arrayList <- Gen.nonEmptyListOf(byteArrayOfNItemsGen(size))
} yield byteStringList.zip(arrayList)
}
def receiptGen(): Gen[Receipt] = for {
postTransactionStateHash <- byteArrayOfNItemsGen(32)
cumulativeGasUsed <- bigIntGen
logsBloomFilter <- byteArrayOfNItemsGen(256)
} yield Receipt.withHashOutcome(
postTransactionStateHash = ByteString(postTransactionStateHash),
cumulativeGasUsed = cumulativeGasUsed,
logsBloomFilter = ByteString(logsBloomFilter),
logs = Seq()
)
def addressGen: Gen[Address] = byteArrayOfNItemsGen(20).map(Address(_))
def transactionGen(): Gen[Transaction] = for {
nonce <- bigIntGen
gasPrice <- bigIntGen
gasLimit <- bigIntGen
receivingAddress <- addressGen
value <- bigIntGen
payload <- byteStringOfLengthNGen(256)
} yield Transaction(
nonce,
gasPrice,
gasLimit,
receivingAddress,
value,
payload
)
def receiptsGen(n: Int): Gen[Seq[Seq[Receipt]]] = Gen.listOfN(n, Gen.listOf(receiptGen()))
def branchNodeGen: Gen[BranchNode] = for {
children <- Gen
.listOfN(16, byteStringOfLengthNGen(32))
.map(childrenList => childrenList.map(child => HashNode(child.toArray[Byte])))
terminator <- byteStringOfLengthNGen(32)
} yield {
val branchNode = BranchNode(children.toArray, Some(terminator))
val asRlp = MptTraversals.encode(branchNode)
branchNode.copy(parsedRlp = Some(asRlp))
}
def extensionNodeGen: Gen[ExtensionNode] = for {
keyNibbles <- byteArrayOfNItemsGen(32)
value <- byteStringOfLengthNGen(32)
} yield {
val extNode = ExtensionNode(ByteString(bytesToNibbles(keyNibbles)), HashNode(value.toArray[Byte]))
val asRlp = MptTraversals.encode(extNode)
extNode.copy(parsedRlp = Some(asRlp))
}
def leafNodeGen: Gen[LeafNode] = for {
keyNibbles <- byteArrayOfNItemsGen(32)
value <- byteStringOfLengthNGen(32)
} yield {
val leafNode = LeafNode(ByteString(bytesToNibbles(keyNibbles)), value)
val asRlp = MptTraversals.encode(leafNode)
leafNode.copy(parsedRlp = Some(asRlp))
}
def nodeGen: Gen[MptNode] = Gen.choose(0, 2).flatMap { i =>
i match {
case 0 => branchNodeGen
case 1 => extensionNodeGen
case 2 => leafNodeGen
}
}
def signedTxSeqGen(length: Int, secureRandom: SecureRandom, chainId: Option[Byte]): Gen[Seq[SignedTransaction]] = {
val senderKeys = crypto.generateKeyPair(secureRandom)
val txsSeqGen = Gen.listOfN(length, transactionGen())
txsSeqGen.map { txs =>
txs.map { tx =>
SignedTransaction.sign(tx, senderKeys, chainId).tx
}
}
}
def genKey(rnd: SecureRandom): Gen[AsymmetricCipherKeyPair] = {
Gen.resultOf { _: Unit =>
crypto.generateKeyPair(rnd)
}
}
def newBlockGen(secureRandom: SecureRandom, chainId: Option[Byte]): Gen[NewBlock] = for {
blockHeader <- blockHeaderGen
stxs <- signedTxSeqGen(10, secureRandom, chainId)
uncles <- seqBlockHeaderGen
td <- bigIntGen
} yield NewBlock(Block(blockHeader, BlockBody(stxs, uncles)), td)
def newBlock64Gen(secureRandom: SecureRandom, chainId: Option[Byte]): Gen[PV64.NewBlock] = for {
blockHeader <- blockHeaderGen
stxs <- signedTxSeqGen(10, secureRandom, chainId)
uncles <- seqBlockHeaderGen
chainWeight <- chainWeightGen
} yield PV64.NewBlock(Block(blockHeader, BlockBody(stxs, uncles)), chainWeight)
def extraFieldsGen: Gen[HeaderExtraFields] = for {
optOut <- Arbitrary.arbitrary[Option[Boolean]]
checkpoint <- if (optOut.isDefined) Gen.option(fakeCheckpointOptGen(0, 5)) else Gen.const(None)
} yield (optOut, checkpoint) match {
case (Some(definedOptOut), Some(definedCheckpoint)) => HefPostEcip1097(definedOptOut, definedCheckpoint)
case (Some(definedOptOut), None) => HefPostEcip1098(definedOptOut)
case _ => HefEmpty
}
def blockHeaderGen: Gen[BlockHeader] = for {
parentHash <- byteStringOfLengthNGen(32)
ommersHash <- byteStringOfLengthNGen(32)
beneficiary <- byteStringOfLengthNGen(20)
stateRoot <- byteStringOfLengthNGen(32)
transactionsRoot <- byteStringOfLengthNGen(32)
receiptsRoot <- byteStringOfLengthNGen(32)
logsBloom <- byteStringOfLengthNGen(50)
difficulty <- bigIntGen
number <- bigIntGen
gasLimit <- bigIntGen
gasUsed <- bigIntGen
unixTimestamp <- intGen.map(_.abs)
extraData <- byteStringOfLengthNGen(8)
mixHash <- byteStringOfLengthNGen(8)
nonce <- byteStringOfLengthNGen(8)
extraFields <- extraFieldsGen
} yield BlockHeader(
parentHash = parentHash,
ommersHash = ommersHash,
beneficiary = beneficiary,
stateRoot = stateRoot,
transactionsRoot = transactionsRoot,
receiptsRoot = receiptsRoot,
logsBloom = logsBloom,
difficulty = difficulty,
number = number,
gasLimit = gasLimit,
gasUsed = gasUsed,
unixTimestamp = unixTimestamp,
extraData = extraData,
mixHash = mixHash,
nonce = nonce,
extraFields = extraFields
)
def seqBlockHeaderGen: Gen[Seq[BlockHeader]] = Gen.listOf(blockHeaderGen)
private def fakeCheckpointOptGen(min: Int, max: Int): Gen[Option[Checkpoint]] =
Gen.option(fakeCheckpointGen(min, max))
def fakeCheckpointGen(minSignatures: Int, maxSignatures: Int): Gen[Checkpoint] =
for {
n <- Gen.choose(minSignatures, maxSignatures)
signatures <- Gen.listOfN(n, fakeSignatureGen)
} yield Checkpoint(signatures)
def fakeSignatureGen: Gen[ECDSASignature] =
for {
r <- bigIntGen
s <- bigIntGen
v <- byteGen
} yield ECDSASignature(r, s, v)
def listOfNodes(min: Int, max: Int): Gen[Seq[MptNode]] = for {
size <- intGen(min, max)
nodes <- Gen.listOfN(size, nodeGen)
} yield nodes
def genMptNodeData: Gen[MptNodeData] = for {
receivingAddress <- addressGen
code <- byteStringOfLengthNGen(10)
storageSize <- intGen(1, 100)
storage <- Gen.listOfN(storageSize, intGen(1, 5000))
storageAsBigInts = storage.distinct.map(s => (BigInt(s), BigInt(s)))
value <- intGen(0, 2000)
} yield MptNodeData(receivingAddress, Some(code), storageAsBigInts, value)
def genMultipleNodeData(max: Int): Gen[List[MptNodeData]] = for {
n <- intGen(1, max)
list <- Gen.listOfN(n, genMptNodeData)
} yield list
val chainWeightGen = for {
lcn <- bigIntGen
td <- bigIntGen
} yield ChainWeight(lcn, td)
}
object ObjectGenerators extends ObjectGenerators
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/ObjectGenerators.scala | Scala | mit | 8,929 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.mv.rewrite
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
class MVCountAndCaseTestCase extends QueryTest with BeforeAndAfterAll{
override def beforeAll(): Unit = {
drop
sql("create table region(l4id string,l4name string) using carbondata")
sql(
s"""create table data_table(
|starttime int, seq long,succ long,LAYER4ID string,tmp int)
|using carbondata""".stripMargin)
}
def drop(): Unit ={
sql("drop table if exists region")
sql("drop table if exists data_table")
}
test("test mv count and case when expression") {
sql("drop materialized view if exists data_table_mv")
sql(s"""create materialized view data_table_mv as
| SELECT STARTTIME,LAYER4ID,
| SUM(seq) AS seq_c,
| SUM(succ) AS succ_c
| FROM data_table
| GROUP BY STARTTIME,LAYER4ID""".stripMargin)
var frame = sql(s"""SELECT MT.`3600` AS `3600`,
| MT.`2250410101` AS `2250410101`,
| count(1) over() as countNum,
| (CASE WHEN (SUM(COALESCE(seq_c, 0))) = 0 THEN NULL
| ELSE
| (CASE WHEN (CAST((SUM(COALESCE(seq_c, 0))) AS int)) = 0 THEN 0
| ELSE ((CAST((SUM(COALESCE(succ_c, 0))) AS double))
| / (CAST((SUM(COALESCE(seq_c, 0))) AS double)))
| END) * 100
| END) AS rate
| FROM (
| SELECT sum_result.*, H_REGION.`2250410101` FROM
| (SELECT cast(floor((starttime + 28800) / 3600) * 3600 - 28800 as int) AS `3600`,
| LAYER4ID,
| COALESCE(SUM(seq), 0) AS seq_c,
| COALESCE(SUM(succ), 0) AS succ_c
| FROM data_table
| WHERE STARTTIME >= 1549866600 AND STARTTIME < 1549899900
| GROUP BY cast(floor((STARTTIME + 28800) / 3600) * 3600 - 28800 as int),LAYER4ID
| )sum_result
| LEFT JOIN
| (SELECT l4id AS `225040101`,
| l4name AS `2250410101`,
| l4name AS NAME_2250410101
| FROM region
| GROUP BY l4id, l4name) H_REGION
| ON sum_result.LAYER4ID = H_REGION.`225040101`
| WHERE H_REGION.NAME_2250410101 IS NOT NULL
| ) MT
| GROUP BY MT.`3600`, MT.`2250410101`
| ORDER BY `3600` ASC LIMIT 5000""".stripMargin)
assert(TestUtil.verifyMVDataMap(frame.queryExecution.optimizedPlan, "data_table_mv"))
}
override def afterAll(): Unit = {
drop
}
}
| jackylk/incubator-carbondata | mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCountAndCaseTestCase.scala | Scala | apache-2.0 | 3,770 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.tools.asm
import org.objectweb.asm.Type
import org.objectweb.asm.signature.SignatureVisitor
import org.objectweb.asm.signature.SignatureWriter
sealed abstract class SignatureBuilder(protected val sv: SignatureVisitor) {
def build(): String = sv.toString()
override def toString(): String = build()
}
final class ClassSignatureBuilder(sv: SignatureVisitor) extends SignatureBuilder(sv) {
def this() = this(new SignatureWriter())
private var visitedSuperclass = false
private var visitedInterface = false
def newFormalTypeParameter(name: String, classBound: Type, interfaceBounds: Type*): this.type = {
newFormalTypeParameter(name) {
_
.newClassBound(classBound)
.newInterfaceBounds(interfaceBounds: _*)
}
}
def newFormalTypeParameter(name: String)(block: FormalTypeSignatureBuilder => Unit): this.type = {
assert(!visitedSuperclass && !visitedInterface)
sv.visitFormalTypeParameter(name)
block(new FormalTypeSignatureBuilder(sv))
this
}
def newSuperclass(`type`: Type): this.type = {
newSuperclass {
_.newClassType(`type`)
}
}
def newSuperclass(block: TypeSignatureBuilder => Unit): this.type = {
assert(!visitedSuperclass && !visitedInterface)
sv.visitSuperclass()
block(new TypeSignatureBuilder(sv))
visitedSuperclass = true
this
}
def newInterface(`type`: Type): this.type = {
newInterface {
_.newClassType(`type`)
}
}
def newInterface(block: TypeSignatureBuilder => Unit): this.type = {
sv.visitInterface()
block(new TypeSignatureBuilder(sv))
visitedInterface = true
this
}
def newInterfaces(types: Type*): this.type = {
types.foreach(newInterface)
this
}
override def build(): String = {
assert(visitedSuperclass)
super.build()
}
}
final class TypeSignatureBuilder(sv: SignatureVisitor) extends SignatureBuilder(sv) {
def this() = this(new SignatureWriter())
private var typed = false
private var classType = false
def newTypeVariable(name: String): this.type = {
assert(!typed)
sv.visitTypeVariable(name)
typed = true
this
}
def newArrayType(block: TypeSignatureBuilder => Unit): this.type = {
assert(!typed)
sv.visitArrayType()
block(new TypeSignatureBuilder(sv))
typed = true
this
}
def newClassType(
`type`: Type)(implicit block: TypeArgumentSignatureBuilder => Unit): this.type = {
assert(!typed)
if (`type`.getSort() < Type.ARRAY) {
sv.visitBaseType(`type`.getDescriptor().charAt(0))
} else if (`type`.getSort() == Type.ARRAY) {
(0 until `type`.getDimensions()).foreach(_ => sv.visitArrayType())
newClassType(`type`.getElementType())(block)
} else {
sv.visitClassType(`type`.getInternalName())
block(new TypeArgumentSignatureBuilder(sv))
sv.visitEnd()
}
typed = true
classType = true
this
}
def newInnnerClassType(
`type`: Type)(implicit block: TypeArgumentSignatureBuilder => Unit): this.type = {
assert(classType)
sv.visitInnerClassType(`type`.getInternalName())
block(new TypeArgumentSignatureBuilder(sv))
sv.visitEnd()
this
}
}
object TypeSignatureBuilder {
implicit val emptyTypeArgumentSignatureBuilderBlock: TypeArgumentSignatureBuilder => Unit = {
sb =>
}
}
final class MethodSignatureBuilder(sv: SignatureVisitor) extends SignatureBuilder(sv) {
def this() = this(new SignatureWriter())
private var visitedParameterType = false
private var visitedReturnType = false
def newFormalTypeParameter(name: String, classBound: Type, interfaceBounds: Type*): this.type = {
newFormalTypeParameter(name) {
_
.newClassBound(classBound)
.newInterfaceBounds(interfaceBounds: _*)
}
}
def newFormalTypeParameter(name: String)(block: FormalTypeSignatureBuilder => Unit): this.type = {
assert(!visitedParameterType && !visitedReturnType)
sv.visitFormalTypeParameter(name)
block(new FormalTypeSignatureBuilder(sv))
this
}
def newParameterType(`type`: Type): this.type = {
newParameterType {
_.newClassType(`type`)
}
}
def newParameterType(block: TypeSignatureBuilder => Unit): this.type = {
assert(!visitedReturnType)
sv.visitParameterType()
block(new TypeSignatureBuilder(sv))
visitedParameterType = true
this
}
def newParameterTypes(types: Type*): this.type = {
types.foreach(newParameterType)
this
}
def newVoidReturnType(): this.type = {
newReturnType(Type.VOID_TYPE)
}
def newReturnType(`type`: Type): this.type = {
newReturnType {
_.newClassType(`type`)
}
}
def newReturnType(block: TypeSignatureBuilder => Unit): this.type = {
sv.visitReturnType()
block(new TypeSignatureBuilder(sv))
visitedReturnType = true
this
}
def newExceptionType(`type`: Type): this.type = {
newExceptionType {
_.newClassType(`type`)
}
}
def newExceptionType(block: TypeSignatureBuilder => Unit): this.type = {
assert(visitedReturnType)
sv.visitExceptionType()
block(new TypeSignatureBuilder(sv))
this
}
def newExceptionTypes(types: Type*): this.type = {
types.foreach(newExceptionType)
this
}
override def build(): String = {
assert(visitedReturnType)
super.build()
}
}
final class FormalTypeSignatureBuilder private[asm] (sv: SignatureVisitor)
extends SignatureBuilder(sv) {
private var visitedClassBound = false
private var visitedInterfaceBound = false
def newClassBound(`type`: Type): this.type = {
newClassBound {
_.newClassType(`type`.boxed)
}
}
def newClassBound(block: TypeSignatureBuilder => Unit): this.type = {
assert(!visitedClassBound && !visitedInterfaceBound)
sv.visitClassBound()
block(new TypeSignatureBuilder(sv))
visitedClassBound = true
this
}
def newInterfaceBound(`type`: Type): this.type = {
newInterfaceBound {
_.newClassType(`type`.boxed)
}
}
def newInterfaceBound(block: TypeSignatureBuilder => Unit): this.type = {
sv.visitInterfaceBound()
block(new TypeSignatureBuilder(sv))
visitedInterfaceBound = true
this
}
def newInterfaceBounds(types: Type*): this.type = {
types.foreach(newInterfaceBound)
this
}
}
final class TypeArgumentSignatureBuilder private[asm] (sv: SignatureVisitor)
extends SignatureBuilder(sv) {
def newTypeArgument(): this.type = {
sv.visitTypeArgument()
this
}
def newTypeArgument(wildcard: Char, `type`: Type): this.type = {
newTypeArgument(wildcard) {
_.newClassType(`type`.boxed)
}
}
def newTypeArgument(wildcard: Char, name: String): this.type = {
newTypeArgument(wildcard) {
_.newTypeVariable(name)
}
}
def newTypeArgument(wildcard: Char)(block: TypeSignatureBuilder => Unit): this.type = {
sv.visitTypeArgument(wildcard)
block(new TypeSignatureBuilder(sv))
this
}
}
| asakusafw/asakusafw-spark | tools/asm/src/main/scala/com/asakusafw/spark/tools/asm/SignatureBuilder.scala | Scala | apache-2.0 | 7,606 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.