code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.madsen.xcs.core
import java.util.concurrent.atomic.AtomicBoolean
trait Sumo {
private val running: AtomicBoolean = new AtomicBoolean(true)
def run(): Unit = {
while (running.get()) {
/*
(1) Sensor values are read.
(2) All active chromosomes are in a pool. Predicates react to sensors.
(3) All predicates matching sensor values are found
(4) The highest fitness chromosome with a matching predicate is chosen
(5) The chromosome's action is executed
(6) Reinforcement system's feedback value for action is obtained
(7) Update fitness
(8) Generate new rules
*/
}
}
def stop() = running.set(false)
}
|
beatmadsen/xcs-main
|
src/main/scala/com/madsen/xcs/core/Sumo.scala
|
Scala
|
mit
| 693 |
package com.eevolution.context.dictionary.domain.api.repository
import com.eevolution.context.dictionary._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 02/11/17.
*/
trait ReplicationLogRepository [ReplicationLog , Int] extends api.Repostory [ReplicationLog , Int] {
}
|
adempiere/ADReactiveSystem
|
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/repository/ReplicationLogRepository.scala
|
Scala
|
gpl-3.0
| 1,145 |
package com.github.jlprat.gameserver.become.actor
import akka.actor.{Props, ActorSystem}
import akka.testkit._
import com.github.jlprat.gameserver.become.actors.Player
import com.github.jlprat.gameserver.become.model.{Card, Hand}
import com.github.jlprat.gameserver.become.protocol.ClientProtocol._
import com.github.jlprat.gameserver.become.protocol.Protocol._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.duration._
/**
* Specification on how the player should behave
* Created by josep on 2/22/15.
*/
class PlayerActorSpec (_system: ActorSystem) extends TestKit(_system) with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll {
def this() = this(ActorSystem("GameServerSpec"))
val turnDuration = 400 milliseconds
val eight: Card = Card(8, 8, "blue")
val playerHand = Hand(List.tabulate(5)(elem => Card(elem, elem, "blue")))
def giveMeAPlayerActor(id: Int = 1) : (TestActorRef[Player], TestProbe, TestProbe) = {
val tableActorProbe = TestProbe()
val clientActorProbe = TestProbe()
(TestActorRef(Props(classOf[Player], id, tableActorProbe.ref, clientActorProbe.ref, turnDuration)), tableActorProbe, clientActorProbe)
}
def giveMeAPlayerInTurn(id: Int = 1) : (TestActorRef[Player], TestProbe, TestProbe) = {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerActor(id)
playerActor ! TakenCards(playerHand, playerId = id)
clientActorProbe.expectMsg(Out.ReceiveCard(playerHand, playerId = id))
playerActor ! NextTurn(playerId = id)
clientActorProbe.expectMsg(Out.PlayerInTurn(playerId = id))
assert(playerActor.underlyingActor.playersHand === playerHand)
(playerActor, tableActorProbe, clientActorProbe)
}
"A player starts without cards," when {
"receives any message except a TakenCards" must {
val (playerActor, _, clientActorProbe) = giveMeAPlayerActor()
"log the message received" in {
val message = NextTurn(2)
playerActor ! message
EventFilter.error(message = s"Unknown message $message", occurrences = 1)
assert(playerActor.underlyingActor.playersHand === Hand())
assert(playerActor.underlyingActor.turnTimer === None)
}
"send a wrong message to the client if the message comes from the them" in {
playerActor ! In.PlayCardRequest(Card(1,1,"blue"))
clientActorProbe.expectMsg(Out.WrongAction)
assert(playerActor.underlyingActor.playersHand === Hand())
assert(playerActor.underlyingActor.turnTimer === None)
}
}
"receives a TakenCards message" when {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerActor()
val red1 = Card(1, 1, "red")
"is for other player" must {
"log the message received" in {
val message = TakenCards(Hand(red1), playerId = 2)
playerActor ! message
clientActorProbe.expectMsg(Out.ReceiveCardOpponent(numberCards = 1, playerId = 2))
EventFilter.info(message = s"Player ${message.playerId} receives ${message.hand.size} cards", occurrences = 1)
assert(playerActor.underlyingActor.playersHand === Hand())
assert(playerActor.underlyingActor.turnTimer === None)
}
}
"is for same player" must {
"communicate it back to client" in {
val message = TakenCards(Hand(red1), playerId = 1)
playerActor ! message
clientActorProbe.expectMsg(Out.ReceiveCard(Hand(red1), playerId = 1))
assert(playerActor.underlyingActor.playersHand === Hand(red1))
assert(playerActor.underlyingActor.turnTimer === None)
}
"receive other TakenCards for other players" in {
val message = TakenCards(Hand(Card(2,2,"red")), playerId = 2)
playerActor ! message
clientActorProbe.expectMsg(Out.ReceiveCardOpponent(numberCards = 1, playerId = 2))
assert(playerActor.underlyingActor.playersHand === Hand(red1))
assert(playerActor.underlyingActor.turnTimer === None)
}
"ignore Incoming requests" in {
val message = In.PlayCardRequest(card = red1)
playerActor ! message
clientActorProbe.expectMsg(Out.NotInTurn)
assert(playerActor.underlyingActor.turnTimer === None)
}
"accept LastCard calls" in {
playerActor ! In.AnnounceLastCard
tableActorProbe.expectMsg(AnnounceLastCard(playerId = 1))
assert(playerActor.underlyingActor.turnTimer === None)
}
}
}
}
"A player" must {
"be informed about the top card" when {
"is in the inactive state" in {
val (playerActor, _, clientActorProbe) = giveMeAPlayerActor(id = 20)
playerActor ! TakenCards(playerHand, playerId = 20)
assert(playerActor.underlyingActor.playersHand === playerHand)
clientActorProbe.expectMsg(Out.ReceiveCard(playerHand, playerId = 20))
val aCard = Card(30, 1, "yellow")
playerActor ! TopCard(aCard)
clientActorProbe.expectMsg(Out.TopCard(aCard))
}
}
}
"After dealt, a not in turn player" can {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerActor(id = 2)
playerActor ! TakenCards(playerHand, playerId = 2)
assert(playerActor.underlyingActor.playersHand === playerHand)
clientActorProbe.expectMsg(Out.ReceiveCard(playerHand, playerId = 2))
"receive Next turn messages for other players, several times" in {
playerActor ! NextTurn(playerId = 1)
playerActor ! NextTurn(playerId = 3)
clientActorProbe.expectMsg(Out.PlayerInTurn(playerId = 1))
clientActorProbe.expectMsg(Out.PlayerInTurn(playerId = 3))
assert(playerActor.underlyingActor.turnTimer === None)
}
val cardInHand: Card = Card(0, 0, "blue")
"not play any card" when {
"not in turn" must {
"client get a notification" in {
playerActor ! In.PlayCardRequest(cardInHand)
clientActorProbe.expectMsg(Out.NotInTurn)
assert(playerActor.underlyingActor.playersHand === playerHand)
assert(playerActor.underlyingActor.turnTimer === None)
}
"table get no message" in {
tableActorProbe.expectNoMsg(50 milliseconds)
}
}
}
}
"A player is in turn" when {
"receives NextTurn message for thyself" in {
val (playerActor, _, _) = giveMeAPlayerInTurn(2)
assert(playerActor.underlyingActor.playersHand === playerHand)
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
"After this," can {
"play a card" when {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(2)
"not in hand, client must be notified" in {
playerActor ! In.PlayCardRequest(Card(2, 2, "red"))
clientActorProbe.expectMsg(Out.WrongAction)
assert(playerActor.underlyingActor.playersHand === playerHand)
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
"in hand, table must get this request" in {
val cardInHand: Card = Card(0, 0, "blue")
playerActor ! In.PlayCardRequest(cardInHand)
assert(playerActor.underlyingActor.playersHand === playerHand.play(cardInHand)._2)
tableActorProbe.expectMsg(PlayCard(cardInHand, playerId = 2))
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
}
"ask for more cards" in {
val (playerActor, tableActorProbe, _) = giveMeAPlayerInTurn(2)
playerActor ! In.TakeCardsRequest
tableActorProbe.expectMsg(TakeCard(playerId = 2))
assert(playerActor.underlyingActor.playersHand === playerHand)
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
"not announce last card" when {
"it's not their last card" in {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(2)
playerActor ! In.AnnounceLastCard
tableActorProbe.expectMsg(TakeCard(playerId = 2))
clientActorProbe.expectMsg(Out.WrongAction)
assert(playerActor.underlyingActor.playersHand === playerHand)
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
}
}
}
"Once a player requested to play a card" when {
val cardPlayed = Card(1, 1, "blue")
val handAfterPlay: Hand = playerHand.play(cardPlayed)._2
"correctly played" must {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(3)
playerActor ! In.PlayCardRequest(cardPlayed)
tableActorProbe.expectMsg(PlayCard(cardPlayed, playerId = 3))
assert(playerActor.underlyingActor.playersHand === handAfterPlay)
"receive confirmation that the card was played" in {
playerActor ! PlayedCard(Card(1, 1, "blue"), playerId = 3)
clientActorProbe.expectMsg(Out.PlayedCardSuccessfully(cardPlayed, playerId = 3))
assert(playerActor.underlyingActor.playersHand === handAfterPlay)
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
}
"incorrectly played" must {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(3)
playerActor ! In.PlayCardRequest(cardPlayed)
tableActorProbe.expectMsg(PlayCard(cardPlayed, playerId = 3))
assert(playerActor.underlyingActor.playersHand === handAfterPlay)
assert(playerActor.underlyingActor.turnTimer.isDefined)
"receive confirmation of wrong move" in {
playerActor ! PlayedCardIllegal(Card(1, 1, "blue"), playerId = 3)
clientActorProbe.expectMsg(Out.PlayedCardIrregularly(cardPlayed, playerId = 3))
assert(playerActor.underlyingActor.playersHand.sort === playerHand.sort)
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
}
}
"Once a player requested some cards" when {
"table decides to send only 1 card" must {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(3)
playerActor ! In.TakeCardsRequest
tableActorProbe.expectMsg(TakeCard(playerId = 3))
assert(playerActor.underlyingActor.playersHand === playerHand)
"send cards to player" in {
val incomingCard = Card(10, 1, "red")
playerActor ! TakenCards(Hand(incomingCard), playerId = 3)
clientActorProbe.expectMsg(Out.ReceiveCard(Hand(incomingCard), playerId = 3))
assert(playerActor.underlyingActor.playersHand === (playerHand ::: Hand(incomingCard)))
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
}
"table decides to send only multiple cards" must {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(3)
playerActor ! In.TakeCardsRequest
tableActorProbe.expectMsg(TakeCard(playerId = 3))
assert(playerActor.underlyingActor.playersHand === playerHand)
"send cards to player" in {
val incomingHand = Hand(List(Card(10, 1, "red"), Card(20, 2, "yellow")))
playerActor ! TakenCards(incomingHand, playerId = 3)
clientActorProbe.expectMsg(Out.ReceiveCard(incomingHand, playerId = 3))
assert(playerActor.underlyingActor.playersHand === (playerHand ::: incomingHand))
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
}
}
"A player can chose the suit" when {
"plays any 8" should {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(4)
playerActor.underlyingActor.playersHand = eight :: playerActor.underlyingActor.playersHand
playerActor ! In.PlayCardRequest(eight)
assert(playerActor.underlyingActor.turnTimer.isDefined)
tableActorProbe.expectMsg(PlayCard(eight, playerId = 4))
playerActor ! ChangeSuitRequest(playerId = 4)
clientActorProbe.expectMsg(Out.SelectSuitRequest(playerId = 4))
"select the suit" in {
playerActor ! In.PlayCardRequest(Card(0, 0, "blue"))
tableActorProbe.expectNoMsg(50 milliseconds)
assert(playerActor.underlyingActor.playersHand === playerHand)
playerActor ! In.SelectSuitRequest(suit = "yellow")
tableActorProbe.expectMsg(ChangeSuit(suit = "yellow", playerId = 4))
playerActor ! ChangedSuit("yellow", playerId = 4)
clientActorProbe.expectMsg(Out.NewSuitSelected("yellow", playerId = 4))
assert(playerActor.underlyingActor.turnTimer.isDefined)
}
}
}
"A player" must {
"be not in turn after getting a nextTurn for some other player" in {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(5)
assert(playerActor.underlyingActor.turnTimer.isDefined)
playerActor ! In.TakeCardsRequest
tableActorProbe.expectMsg(TakeCard(playerId = 5))
val incomingCard = Card(10, 1, "red")
playerActor ! TakenCards(Hand(incomingCard), playerId = 5)
clientActorProbe.expectMsg(Out.ReceiveCard(Hand(incomingCard), playerId = 5))
assert(playerActor.underlyingActor.playersHand === (playerHand ::: Hand(incomingCard)))
val turnTimer = playerActor.underlyingActor.turnTimer
assert(turnTimer.isDefined)
playerActor ! NextTurn(1)
assert(playerActor.underlyingActor.turnTimer === None)
assert(turnTimer.forall(_.isCancelled))
clientActorProbe.expectMsg(Out.PlayerInTurn(1))
}
}
"A player times out" when {
"didn't perform any action" must {
"Take Cards be sent" in {
val (playerActor, tableActorProbe, _) = giveMeAPlayerInTurn(6)
assert(playerActor.underlyingActor.turnTimer.isDefined)
within(450 milliseconds) {
tableActorProbe.expectMsg(TakeCard(playerId = 6))
}
}
}
"Played an 8" must {
"automatically pick up a color" in {
val (playerActor, tableActorProbe, clientActorProbe) = giveMeAPlayerInTurn(6)
assert(playerActor.underlyingActor.turnTimer.isDefined)
playerActor.underlyingActor.playersHand = eight :: playerActor.underlyingActor.playersHand
playerActor ! In.PlayCardRequest(eight)
assert(playerActor.underlyingActor.turnTimer.isDefined)
tableActorProbe.expectMsg(PlayCard(eight, playerId = 6))
playerActor ! ChangeSuitRequest(playerId = 6)
clientActorProbe.expectMsg(Out.SelectSuitRequest(playerId = 6))
within(450 milliseconds) {
tableActorProbe.expectMsg(ChangeSuit(suit = "blue", playerId = 6))
}
}
}
}
}
|
jlprat/akka-gameserver
|
src/test/scala/com/github/jlprat/gameserver/become/actor/PlayerActorSpec.scala
|
Scala
|
apache-2.0
| 14,486 |
package cromwell.jobstore
import akka.actor.{LoggingFSM, Props}
import cromwell.core.Dispatcher.EngineDispatcher
import cromwell.core.actor.BatchingDbWriter._
import cromwell.core.actor.{BatchingDbWriter, BatchingDbWriterActor}
import cromwell.jobstore.JobStore.{JobCompletion, WorkflowCompletion}
import cromwell.jobstore.JobStoreActor._
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}
case class JobStoreWriterActor(jsd: JobStore, override val dbBatchSize: Int, override val dbFlushRate: FiniteDuration) extends LoggingFSM[BatchingDbWriterState, BatchingDbWriter.BatchingDbWriterData] with BatchingDbWriterActor {
implicit val ec = context.dispatcher
startWith(WaitingToWrite, NoData)
when(WaitingToWrite) {
case Event(command: JobStoreWriterCommand, curData) =>
curData.addData(CommandAndReplyTo(command, sender)) match {
case newData: HasData[_] if newData.length >= dbBatchSize => goto(WritingToDb) using newData
case newData => stay() using newData
}
case Event(ScheduledFlushToDb, curData) =>
log.debug("Initiating periodic job store flush to DB")
goto(WritingToDb) using curData
}
when(WritingToDb) {
case Event(ScheduledFlushToDb, _) => stay
case Event(command: JobStoreWriterCommand, curData) => stay using curData.addData(CommandAndReplyTo(command, sender))
case Event(FlushBatchToDb, NoData) =>
log.debug("Attempted job store flush to DB but had nothing to write")
goto(WaitingToWrite)
case Event(FlushBatchToDb, HasData(data)) =>
log.debug("Flushing {} job store commands to the DB", data.length)
val completions = data.toVector.collect({ case CommandAndReplyTo(c: JobStoreWriterCommand, _) => c.completion })
if (completions.nonEmpty) {
val workflowCompletions = completions collect { case w: WorkflowCompletion => w }
val completedWorkflowIds = workflowCompletions map { _.workflowId } toSet
// Filter job completions that also have a corresponding workflow completion; these would just be
// immediately deleted anyway.
val jobCompletions = completions.toList collect { case j: JobCompletion if !completedWorkflowIds.contains(j.key.workflowId) => j }
jsd.writeToDatabase(workflowCompletions, jobCompletions, dbBatchSize) onComplete {
case Success(_) =>
data map { case CommandAndReplyTo(c: JobStoreWriterCommand, r) => r ! JobStoreWriteSuccess(c) }
self ! DbWriteComplete
case Failure(regerts) =>
log.error("Failed to properly job store entries to database", regerts)
data map { case CommandAndReplyTo(_, r) => r ! JobStoreWriteFailure(regerts) }
self ! DbWriteComplete
}
}
stay using NoData
case Event(DbWriteComplete, _) =>
log.debug("Flush of job store commands complete")
goto(WaitingToWrite)
}
}
object JobStoreWriterActor {
def props(jobStoreDatabase: JobStore, dbBatchSize: Int, dbFlushRate: FiniteDuration): Props = Props(new JobStoreWriterActor(jobStoreDatabase, dbBatchSize, dbFlushRate)).withDispatcher(EngineDispatcher)
}
|
ohsu-comp-bio/cromwell
|
engine/src/main/scala/cromwell/jobstore/JobStoreWriterActor.scala
|
Scala
|
bsd-3-clause
| 3,191 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.amp
import java.time.{LocalDate, LocalDateTime, ZoneOffset}
import connectors.DataCacheConnector
import controllers.actions.SuccessfulAuthAction
import models.amp.Amp
import org.mockito.Mockito._
import play.api.inject.bind
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.{JsValue, Json}
import play.api.test.Helpers._
import utils.{AmlsSpec, AuthAction, AuthorisedFixture, CacheMocks}
import org.mockito.Matchers.{eq => eqTo, _}
import play.api.test.FakeRequest
import services.ProxyCacheService
import scala.concurrent.Future
class AmpControllerSpec extends AmlsSpec with CacheMocks {
val dateVal = LocalDateTime.now
val completeData = Json.obj(
"typeOfParticipant" -> Seq("artGalleryOwner"),
"soldOverThreshold" -> true,
"dateTransactionOverThreshold" -> LocalDate.now,
"identifyLinkedTransactions" -> true,
"percentageExpectedTurnover" -> "fortyOneToSixty"
)
val completeJson = Json.obj(
"_id" -> "someid",
"data" -> completeData,
"lastUpdated" -> Json.obj("$date" -> dateVal.atZone(ZoneOffset.UTC).toInstant.toEpochMilli),
"hasChanged" -> false,
"hasAccepted" -> false
)
trait Fixture extends AuthorisedFixture {
self =>
val request = addToken(authRequest)
val proxyCacheService = mock[ProxyCacheService]
val credId = "someId"
lazy val app = new GuiceApplicationBuilder()
.disable[com.kenshoo.play.metrics.PlayModule]
.overrides(bind[AuthAction].to(SuccessfulAuthAction))
.overrides(bind[ProxyCacheService].to(proxyCacheService))
.overrides(bind[DataCacheConnector].to(mockCacheConnector))
.build()
val controller = app.injector.instanceOf[AmpController]
}
"get returns 200" when {
"no amp section in cache" in new Fixture {
when(proxyCacheService.getAmp(any())(any())).thenReturn(Future.successful(Some(Json.obj())))
val result = controller.get(credId)(request)
status(result) must be(OK)
val document = Json.parse(contentAsString(result))
document mustBe(Json.obj())
}
"amp section in cache" in new Fixture {
when(proxyCacheService.getAmp(any())(any())).thenReturn(Future.successful(Some(completeJson)))
val result = controller.get(credId)(request)
status(result) must be(OK)
val document = Json.parse(contentAsString(result))
document mustBe(completeJson)
}
}
"set" when {
"passed valid json" in new Fixture {
val postRequest = FakeRequest("POST", "/")
.withHeaders(CONTENT_TYPE -> "application/json")
.withBody[JsValue](completeJson)
when(proxyCacheService.setAmp(any(), any())(any())).thenReturn(Future.successful(mockCacheMap))
val result = controller.set(credId)(postRequest)
status(result) must be(OK)
val document = Json.parse(contentAsString(result))
document mustBe(Json.obj("_id" -> credId))
}
}
"accept" must {
"set accept flag to true and redirect to RegistrationProgressController" in new Fixture {
when(mockCacheConnector.fetch[Amp](any(), any())(any(), any()))
.thenReturn(Future.successful(Some(completeJson.as[Amp])))
when(mockCacheConnector.save[Amp](any(), any(), any())(any(), any()))
.thenReturn(Future.successful(mockCacheMap))
val result = controller.accept.apply(FakeRequest())
status(result) mustBe SEE_OTHER
redirectLocation(result).value mustBe controllers.routes.RegistrationProgressController.get.toString
verify(mockCacheConnector).save[Amp](any(), eqTo(Amp.key),
eqTo(completeJson.as[Amp].copy(hasAccepted = true)))(any(), any())
}
}
}
|
hmrc/amls-frontend
|
test/controllers/amp/AmpControllerSpec.scala
|
Scala
|
apache-2.0
| 4,363 |
package chapter07
object IfExpressions {
def main(args: Array[String]): Unit = {
println("Enter your Marks :")
val marks = readInt()
val grade =
if (marks >= 90) "S"
else if (marks >= 80) "A"
else if (marks >= 70) "B"
else if (marks >= 60) "C"
else if (marks >= 50) "D"
else if (marks >= 40) "E"
else "FAIL"
println("Grade : " + grade)
}
}
|
aakashmathai/ScalaTutorial
|
src/main/scala/chapter07/IfExpressions.scala
|
Scala
|
apache-2.0
| 404 |
/*
* Copyright 2010 Michael Fortin <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.mock
import org.brzy.fab.mod.ViewModProvider
class MockModProvider(c:MockModConfig) extends ViewModProvider {
val fileExtension = ".ssp"
override val name = c.name.get
}
|
m410/brzy
|
src/test/scala/org/brzy/mock/MockModProvider.scala
|
Scala
|
apache-2.0
| 803 |
/*
* Copyright 2016 Actian Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.actian.spark_vector.datastream
import scala.util.Try
import com.actian.spark_vector.util.Logging
import com.actian.spark_vector.vector.VectorJDBC
import com.actian.spark_vector.vector.ColumnMetadata
/** Information to connect to a VectorEndpoint (DataStream) */
case class VectorEndpoint(host: String, port: Int, username: String, password: String) extends Serializable
/**
* Contains helpers to obtain VectorEndpoint information from `Vector`'s SQL interface.
*
* @note The way this information is obtained, by issuing a select from a system table, will very likely be modified in the future
*/
object VectorEndpoint extends Logging {
private val hostDbColumn = "host"
private val qhostDbColumn = "qhost"
private val portDbColumn = "port"
private val usernameDbColumn = "username"
private val passwordDbColumn = "password"
private val dataStreamsTable = "iivwtable_datastreams"
def apply(seq: Seq[Any], jdbcHost: String = "localhost"): Option[VectorEndpoint] = seq match {
case Seq(host: String, port: String, username: String, password: String) =>
Try {
val real_host = host match {
case "" => jdbcHost
case _ => host
}
VectorEndpoint(real_host, port.toInt, username, password)
}.toOption
case _ => None
}
/** If possible, we try to use the fully qualified hostname (qhost) instead of the
* simple hostname (host) in order to avoid issues in a Kubernetes setup. However,
* depending on the VH version, the qhost column might not be available yet as it
* was first introduced with VH 6.1.0.
*/
private def extractHostColumnName(col_meta: Seq[ColumnMetadata]): String = {
val res = col_meta.filter(col => { col.name == qhostDbColumn || col.name == hostDbColumn})
res match {
case Seq(_, ColumnMetadata(`qhostDbColumn`,_,_,_,_,_)) => qhostDbColumn
case Seq(ColumnMetadata(`qhostDbColumn`,_,_,_,_,_), _*) => qhostDbColumn
case Seq(ColumnMetadata(`hostDbColumn`,_,_,_,_,_)) => hostDbColumn
case _ => throw new IllegalStateException(s"Table $dataStreamsTable is missing a host column!")
}
}
/** Issues a query through JDBC to obtain connection information from the `DataStreams` system table */
def fromDataStreamsTable(cxn: VectorJDBC): IndexedSeq[VectorEndpoint] = {
val col_meta = cxn.columnMetadata(dataStreamsTable)
val getVectorEndPointSql: String = s"select ${extractHostColumnName(col_meta)}, $portDbColumn, $usernameDbColumn, $passwordDbColumn from $dataStreamsTable"
logDebug(s"Running sql query ${getVectorEndPointSql} to get the datastream endpoints' info.")
val resultSet = cxn.query(getVectorEndPointSql)
val ret = resultSet
.map(VectorEndpoint(_, cxn.getIngresHost))
.flatten
logDebug(s"Got the following VectorEndPoints from the datastreams table: ${ret.map(_.toString).mkString(",")}")
ret.toIndexedSeq
}
}
|
ActianCorp/spark-vector
|
src/main/scala/com/actian/spark_vector/datastream/VectorEndpoint.scala
|
Scala
|
apache-2.0
| 3,531 |
package at.vizu.s2n.parser
import scala.reflect.runtime.universe._
/**
* Phil on 25.09.15.
*/
case class AST(fileName: String, internalTree: Tree)
|
viZu/nasca
|
src/main/scala/at/vizu/s2n/parser/AST.scala
|
Scala
|
apache-2.0
| 150 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools {
import org.scalatest.FunSuite
import org.scalatools.testing.{Event, EventHandler, Result, Logger, Runner => TestingRunner}
// testing runner.run:
// def run(testClassName: String, fingerprint: TestFingerprint, args: Array[String]): Array[Event]
class ScalaTestRunnerSuite extends FunSuite {
test("call with simple class") {
val results = run("org.scalatest.tools.test.SimpleTest")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
}
test("three different results") {
val results = run("org.scalatest.tools.test.ThreeTestsTest")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results(1).testName === "throw")
assert(results(1).result === Result.Failure)
assert(results(1).error.getMessage === "baah")
assert(results(2).testName === "assert bad")
assert(results(2).result === Result.Failure)
assert(results(2).error.getMessage === "1 did not equal 3")
assert(results.size === 3)
}
test("one tag included") {
val results = run("org.scalatest.tools.test.TagsTest", "-n hello")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results.size === 1)
}
test("two tags included") {
val results = run("org.scalatest.tools.test.TagsTest", Array("-n", "hello helloAgain"))
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results(1).testName === "hello, world again")
assert(results(1).result === Result.Success)
assert(results.size === 2)
}
test("one tag excluded") {
val results = run("org.scalatest.tools.test.TagsTest", Array("-l", "hello"))
assert(results(0).testName === "hello, world again")
assert(results(0).result === Result.Success)
assert(results(1).testName === "tag3")
assert(results(1).result === Result.Success)
assert(results(2).testName === "throw")
assert(results(2).result === Result.Failure)
assert(results(2).error.getMessage === "baah")
assert(results(3).testName === "assert bad")
assert(results(3).result === Result.Failure)
assert(results(3).error.getMessage === "1 did not equal 3")
assert(results.size === 4)
}
test("configs") {
val results = run("org.scalatest.tools.test.TestWithConfigMap", "-Djosh=cool")
assert(results(0).testName === "get config")
assert(results(0).result === Result.Success)
val resultsF = run("org.scalatest.tools.test.TestWithConfigMap", "-Djosh=bad")
assert(resultsF(0).testName === "get config")
assert(resultsF(0).result === Result.Failure)
assert(resultsF(0).error.getMessage === "\\"[bad]\\" did not equal \\"[cool]\\"")
}
test("configs 2"){
val results = run("org.scalatest.tools.test.TestWithConfigMap2", "-Da=z -Db=y -Dc=x")
assert(results(0).testName === "get config")
assert(results(0).result === Result.Success)
}
test("illegal arg on private constructor"){
intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.PrivateConstructor")
}
}
test("skipped test results in Result.Skipped") {
val results = run("org.scalatest.tools.test.SuiteWithSkippedTest")
assert(results.size === 2)
assert(results(0).testName === "dependeeThatFails")
assert(results(0).result === Result.Failure)
assert(results(0).error.getMessage === "fail")
assert(results(1).testName === "depender")
assert(results(1).result === Result.Skipped)
}
test("pending test results in Result.Skipped") {
val results = run("org.scalatest.tools.test.PendingTest")
assert(results.size === 1)
assert(results(0).testName === "i am pending")
assert(results(0).result === Result.Skipped)
}
val framework = new ScalaTestFramework
val runner: TestingRunner = {
framework.testRunner(currentThread.getContextClassLoader, Array(new TestLogger))
}
val fingerprint = {
val fingerprints = framework.tests
fingerprints(0).
asInstanceOf[org.scalatools.testing.TestFingerprint]
}
def run(classname: String): Array[Event] = run(classname, Array[String]())
def run(classname: String, args:String): Array[Event] = run(classname, args.split(" "))
def run(classname: String, args:Array[String]): Array[Event] = {
// val buf = scala.collection.mutable.ArrayBuffer[Event]() // Only worked under 2.8
val buf = new scala.collection.mutable.ArrayBuffer[Event]
val listener = new EventHandler {
def handle(event: Event) {
buf += event
}
}
runner.run(classname, fingerprint, listener, args)
buf.toArray
}
class TestLogger extends Logger {
def trace(t:Throwable) {}
def error(msg: String) {}
def warn(msg: String) {}
def info(msg: String) {}
def debug(msg: String) {}
def ansiCodesSupported = false
}
}
package test{
private class SimpleTest extends FunSuite {
test("hello, world") {"hello, world"}
}
private class ThreeTestsTest extends FunSuite {
test("hello, world") {"hello, world"}
test("throw") {throw new Exception("baah")}
test("assert bad") {assert(1 === 3)}
}
import org.scalatest.fixture
private class TestWithConfigMap extends fixture.FunSuite {
type FixtureParam = String
override def withFixture(test: OneArgTest) {
test(test.configMap("josh").toString)
}
test("get config"){ conf => assert(conf === "cool") }
}
private class TestWithConfigMap2 extends fixture.FunSuite {
type FixtureParam = Map[String,Any]
override def withFixture(test: OneArgTest) {
test(test.configMap)
}
test("get config"){ conf => assert(conf === Map("a" -> "z", "b" -> "y", "c" -> "x")) }
}
private class TagsTest extends FunSuite {
test("hello, world", org.scalatest.Tag("hello")) {"hello, world"}
test("hello, world again", org.scalatest.Tag("helloAgain")) {"hello, world again"}
test("tag3", org.scalatest.Tag("tag3")) {"tag3"}
test("throw") {throw new Exception("baah")}
test("assert bad") {assert(1 === 3)}
}
private class PrivateConstructor private() extends FunSuite
private class PendingTest extends FunSuite {
test("i am pending")(pending)
}
import org.scalatest.testng.TestNGSuite
private class SuiteWithSkippedTest extends TestNGSuite {
import org.testng.annotations.Test
@Test(groups = Array("run")) def dependeeThatFails() { throw new Exception("fail") }
@Test(dependsOnGroups = Array("run")) def depender() {}
}
}
}
|
yyuu/scalatest
|
src/test/scala/org/scalatest/tools/ScalaTestRunnerSuite.scala
|
Scala
|
apache-2.0
| 7,734 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor
import org.apache.ignite._
import org.apache.ignite.cluster.{ClusterGroup, ClusterGroupEmptyException, ClusterMetrics, ClusterNode}
import org.apache.ignite.events.EventType._
import org.apache.ignite.events.{DiscoveryEvent, Event}
import org.apache.ignite.internal.IgniteEx
import org.apache.ignite.internal.IgniteNodeAttributes._
import org.apache.ignite.internal.cluster.ClusterGroupEmptyCheckedException
import org.apache.ignite.internal.util.lang.{GridFunc => F}
import org.apache.ignite.internal.util.typedef._
import org.apache.ignite.internal.util.{GridConfigurationFinder, IgniteUtils => U}
import org.apache.ignite.lang._
import org.apache.ignite.thread.IgniteThreadPoolExecutor
import org.apache.ignite.visor.commands.common.VisorTextTable
import jline.console.ConsoleReader
import org.jetbrains.annotations.Nullable
import java.io._
import java.lang.{Boolean => JavaBoolean}
import java.net._
import java.text._
import java.util.concurrent._
import java.util.{Collection => JavaCollection, HashSet => JavaHashSet, _}
import org.apache.ignite.internal.visor.cache._
import org.apache.ignite.internal.visor.node.VisorNodeEventsCollectorTask.VisorNodeEventsCollectorTaskArg
import org.apache.ignite.internal.visor.node._
import org.apache.ignite.internal.visor.util.VisorEventMapper
import org.apache.ignite.internal.visor.util.VisorTaskUtils._
import org.apache.ignite.internal.visor.{VisorMultiNodeTask, VisorTaskArgument}
import scala.collection.JavaConversions._
import scala.collection.immutable
import scala.language.{implicitConversions, reflectiveCalls}
import scala.util.control.Breaks._
/**
* Holder for command help information.
*/
sealed case class VisorCommandHolder(
name: String,
shortInfo: String,
longInfo: Seq[String],
aliases: Seq[String],
spec: Seq[String],
args: Seq[(String, AnyRef)],
examples: Seq[(String, AnyRef)],
emptyArgs: () => Unit,
withArgs: (String) => Unit
) {
/** Command host with optional aliases. */
lazy val nameWithAliases: String =
if (aliases != null && aliases.nonEmpty)
name + " (" + ("" /: aliases)((b, a) => if (b.length() == 0) a else b + ", " + a) + ")"
else
name
}
/**
* ==Overview==
* This is the '''tagging''' trait existing solely to have type associated with
* with `visor` object so that implicit conversions can be done
* on `visor` object itself. Implicit conversions are essential to extensibility
* of the Visor.
*
* ==Example==
* This is an example on how [[VisorTag]] trait is used to
* extend `visor` natively with custom commands:
*
* <ex>
* class VisorCustomCommand {
* def foo(@Nullable args: String) = {
* if (visor.hasValue("bar", visor.parse(args)))
* println("foobar")
* else
* println("foo")
* }
* def foo(@Nullable args: Symbol*) = foo(visor.flatSymbols(args: _*))
* }
* object VisorCustomCommand {
* implicit def fromVisor(vs: VisorTag) = new VisorCustomCommand
* }
* </ex>
*/
trait VisorTag
/**
* {{{
* ___ _________________________ ________
* __ | / /____ _/__ ___/__ __ \___ __ \
* __ | / / __ / _____ \ _ / / /__ /_/ /
* __ |/ / __/ / ____/ / / /_/ / _ _, _/
* _____/ /___/ /____/ \____/ /_/ |_|
*
* }}}
*
* ==Overview==
* Visor console provides monitoring capabilities for Ignite.
*
* ==Usage==
* Ignite ships with `IGNITE_HOME/bin/ignitevisorcmd.{sh|bat}` script that starts Visor console.
*
* Just type:<ex>help</ex> in Visor console to get help and get started.
*/
@IgniteNotPeerDeployable
object visor extends VisorTag {
/** Argument type. */
type Arg = (String, String)
/** Type alias for command argument list. */
type ArgList = Seq[Arg]
/** Type alias for general node filter. */
type NodeFilter = ClusterNode => Boolean
/** Type alias for general event filter. */
type EventFilter = Event => Boolean
/** `Nil` is for empty list, `Til` is for empty tuple. */
val Til: Arg = (null, null)
/** Node filter that includes any node. */
final val ALL_NODES_FILTER = (_: ClusterNode) => true
/** System line separator. */
final val NL = System getProperty "line.separator"
/** Display value for `null`. */
final val NA = "<n/a>"
/** */
private var cmdLst: Seq[VisorCommandHolder] = Nil
/** Node left listener. */
private var nodeLeftLsnr: IgnitePredicate[Event] = null
/** Node join listener. */
private var nodeJoinLsnr: IgnitePredicate[Event] = null
/** Node segmentation listener. */
private var nodeSegLsnr: IgnitePredicate[Event] = null
/** Node stop listener. */
private var nodeStopLsnr: IgnitionListener = null
/** */
@volatile private var isCon: Boolean = false
/**
* Whether or not Visor is the owner of connection - or it
* reused one already opened.
*/
@volatile private var conOwner: Boolean = false
/** */
@volatile private var conTs: Long = 0
private final val LOC = Locale.US
/** Date time format. */
private final val dtFmt = new SimpleDateFormat("MM/dd/yy, HH:mm:ss", LOC)
/** Date format. */
private final val dFmt = new SimpleDateFormat("dd MMMM yyyy", LOC)
private final val DEC_FMT_SYMS = new DecimalFormatSymbols(LOC)
/** Number format. */
private final val nmFmt = new DecimalFormat("#", DEC_FMT_SYMS)
/** KB format. */
private final val kbFmt = new DecimalFormat("###,###,###,###,###", DEC_FMT_SYMS)
/** */
private val mem = new ConcurrentHashMap[String, String]()
/** List of close callbacks*/
@volatile private var cbs = Seq.empty[() => Unit]
/** List of shutdown callbacks*/
@volatile private var shutdownCbs = Seq.empty[() => Unit]
/** Default log file path. */
/**
* Default log file path. Note that this path is relative to `IGNITE_HOME/work` folder
* if `IGNITE_HOME` system or environment variable specified, otherwise it is relative to
* `work` folder under system `java.io.tmpdir` folder.
*/
private final val DFLT_LOG_PATH = "visor/visor-log"
/** Log file. */
private var logFile: File = null
/** Log timer. */
private var logTimer: Timer = null
/** Topology log timer. */
private var topTimer: Timer = null
/** Log started flag. */
@volatile private var logStarted = false
/** Internal thread pool. */
@volatile var pool: ExecutorService = new IgniteThreadPoolExecutor()
/** Configuration file path, if any. */
@volatile var cfgPath: String = null
/** */
@volatile var ignite: IgniteEx = null
/** */
@volatile var prevIgnite: Option[IgniteEx] = None
private var reader: ConsoleReader = null
def reader(reader: ConsoleReader) {
assert(reader != null)
this.reader = reader
}
/**
* Get grid node for specified ID.
*
* @param nid Node ID.
* @return GridNode instance.
* @throws IgniteException if Visor is disconnected or node not found.
*/
def node(nid: UUID): ClusterNode = {
val g = ignite
if (g == null)
throw new IgniteException("Visor disconnected")
else {
val node = g.cluster.node(nid)
if (node == null)
throw new IgniteException("Node is gone: " + nid)
node
}
}
/**
* @param node Optional node.
* @param cacheName Cache name to take cluster group for.
* @return Cluster group with data nodes for specified cache or cluster group for specified node.
*/
def groupForDataNode(node: Option[ClusterNode], cacheName: String) = {
val grp = node match {
case Some(n) => ignite.cluster.forNode(n)
case None => ignite.cluster.forNodeIds(executeRandom(classOf[VisorCacheNodesTask], cacheName))
}
if (grp.nodes().isEmpty)
throw new ClusterGroupEmptyException("Topology is empty.")
grp
}
/**
* @param nodeOpt Node.
* @param cacheName Cache name.
* @return Message about why node was not found.
*/
def messageNodeNotFound(nodeOpt: Option[ClusterNode], cacheName: String) = nodeOpt match {
case Some(node) => "Can't find node with specified id: " + node.id()
case None => "Can't find nodes for cache: " + escapeName(cacheName)
}
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
try
if (ignite != null && isConnected) {
// Call all shutdown callbacks.
shutdownCbs foreach(_.apply())
close() // This will stop the grid too if Visor is connection owner.
}
catch {
case ignore: Throwable => // ignore
}
}
})
addHelp(
name = "mlist",
shortInfo = "Prints Visor console memory variables.",
spec = Seq(
"mlist {arg}"
),
args = Seq(
"arg" ->
"String that contains start characters of variable names."
),
examples = Seq(
"mlist" ->
"Prints out all Visor console memory variables.",
"mlist ac" ->
"Lists variables that start with 'a' or 'c' from Visor console memory."
),
emptyArgs = mlist,
withArgs = mlist
)
addHelp(
name = "mclear",
shortInfo = "Clears Visor console memory variables.",
spec = Seq(
"mclear",
"mclear <name>|-ev|-al|-ca|-no|-tn|-ex"
),
args = Seq(
"<name>" -> Seq(
"Variable name to clear.",
"Note that name doesn't include '@' symbol used to reference variable."
),
"-ev" ->
"Clears all 'event' variables.",
"-al" ->
"Clears all 'alert' variables.",
"-ca" ->
"Clears all 'cache' variables.",
"-no" ->
"Clears all 'node' variables.",
"-tn" ->
"Clears all 'task name' variables.",
"-ex" ->
"Clears all 'task execution' variables."
),
examples = Seq(
"mclear" ->
"Clears all Visor console variables.",
"mclear -ca" ->
"Clears all Visor console cache variables.",
"mclear n2" ->
"Clears 'n2' Visor console variable."
),
emptyArgs = mclear,
withArgs = mclear
)
addHelp(
name = "mget",
shortInfo = "Gets Visor console memory variable.",
longInfo = Seq(
"Gets Visor console memory variable. Variable can be referenced with '@' prefix."
),
spec = Seq(
"mget <@v>"
),
args = Seq(
"@v" ->
"Variable name."
),
examples = Seq(
"mget <@v>" ->
"Gets Visor console variable whose name is referenced by variable 'v'."
),
emptyArgs = mget,
withArgs = mget
)
addHelp(
name = "help",
shortInfo = "Prints Visor console help.",
aliases = Seq("?"),
spec = Seq(
"help {c1 c2 ... ck}"
),
args = Seq(
"ck" ->
"Command to get help for."
),
examples = Seq(
"help status" ->
"Prints help for 'status' command.",
"help" ->
"Prints help for all command."
),
emptyArgs = help,
withArgs = help
)
addHelp(
name = "status",
shortInfo = "Prints Visor console status.",
aliases = Seq("!"),
spec = Seq(
"status {-q}"
),
args = Seq(
"-q" ->
"Quite output without ASCII logo."
),
examples = Seq(
"status" ->
"Prints Visor console status.",
"status -q" ->
"Prints Visor console status in quiet mode."
),
emptyArgs = status,
withArgs = status
)
/**
* @param name - command name.
*/
private def wrongArgs(name: String) {
warn("Invalid arguments for command without arguments.",
s"Type 'help $name' to see how to use this command.")
}
addHelp(
name = "close",
shortInfo = "Disconnects Visor console from the grid.",
spec = Seq("close"),
examples = Seq(
"close" ->
"Disconnects Visor console from the grid."
),
emptyArgs = close,
withArgs = _ => wrongArgs("close")
)
addHelp(
name = "quit",
shortInfo = "Quit from Visor console.",
spec = Seq("quit"),
examples = Seq(
"quit" ->
"Quit from Visor console."
),
aliases = Seq("exit"),
emptyArgs = quit,
withArgs = _ => wrongArgs("quit")
)
addHelp(
name = "log",
shortInfo = "Starts or stops grid-wide events logging.",
longInfo = Seq(
"Logging of discovery and failure grid-wide events.",
" ",
"Events are logged to a file. If path is not provided,",
"it will log into '<Ignite home folder>/work/visor/visor-log'.",
" ",
"File is always opened in append mode.",
"If file doesn't exist, it will be created.",
" ",
"It is often convenient to 'tail -f' the log file",
"in a separate console window.",
" ",
"Log command prints periodic topology snapshots in the following format:",
"H/N/C |1 |1 |4 |=^========..........|",
"where:",
" H - Hosts",
" N - Nodes",
" C - CPUs",
" = - 5%-based marker of average CPU load across the topology",
" ^ - 5%-based marker of average heap memory used across the topology"
),
spec = Seq(
"log",
"log -l {-f=<path>} {-p=<num>} {-t=<num>} {-dl}",
"log -s"
),
args = Seq(
"-l" -> Seq(
"Starts logging.",
"If logging is already started - it's no-op."
),
"-f=<path>" -> Seq(
"Provides path to the file.",
"Path to the file can be absolute or relative to Ignite home folder."
),
"-p=<num>" -> Seq(
"Provides period of querying events (in seconds).",
"Default is 10."
),
"-t=<num>" -> Seq(
"Provides period of logging topology snapshot (in seconds).",
"Default is 20."
),
"-s" -> Seq(
"Stops logging.",
"If logging is already stopped - it's no-op."
),
"-dl" -> Seq(
"Disables collecting of job and task fail events, cache rebalance events from remote nodes."
)
),
examples = Seq(
"log" ->
"Prints log status.",
"log -l -f=/home/user/visor-log" ->
"Starts logging to file 'visor-log' located at '/home/user'.",
"log -l -f=log/visor-log" ->
"Starts logging to file 'visor-log' located at '<Ignite home folder>/log'.",
("log -l -p=20",
"Starts logging to file '<Ignite home folder>/work/visor/visor-log' " +
"with querying events period of 20 seconds."),
("log -l -t=30",
"Starts logging to file '<Ignite home folder>/work/visor/visor-log' " +
"with topology snapshot logging period of 30 seconds."),
("log -l -dl",
"Starts logging to file '<Ignite home folder>/work/visor/visor-log' " +
"with disabled collection events from remote nodes."),
"log -s" ->
"Stops logging."
),
emptyArgs = log,
withArgs = log
)
logText("Visor started.")
// Print out log explanation at the beginning.
logText("<log>: H - Hosts")
logText("<log>: N - Nodes")
logText("<log>: C - CPUs")
logText("<log>: = - 5%-based marker of average CPU load across the topology")
logText("<log>: ^ - 5%-based marker of average heap memory used across the topology")
/**
* ==Command==
* Lists Visor console memory variables.
*
* ==Examples==
* <ex>mlist ac</ex>
* Lists variables that start with `a` or `c` from Visor console memory.
*
* <ex>mlist</ex>
* Lists all variables from Visor console memory.
*
* @param arg String that contains start characters of listed variables.
* If empty - all variables will be listed.
*/
def mlist(arg: String) {
assert(arg != null)
if (mem.isEmpty)
println("Memory is empty.")
else {
val r = if (arg.trim == "") mem.toMap else mem.filter { case (k, _) => arg.contains(k.charAt(0)) }
if (r.isEmpty)
println("No matches found.")
else {
val t = new VisorTextTable()
t.maxCellWidth = 70
t #= ("Name", "Value")
r.toSeq.sortBy(_._1).foreach { case (k, v) => t += (k, v) }
t.render()
nl()
println(
"Variable can be referenced in other commands with '@' prefix." + NL +
"Reference can be either a flag or a parameter value." + NL +
"\nEXAMPLE: " + NL +
" 'help @cmd' - where 'cmd' variable contains command name." + NL +
" 'node -id8=@n11' - where 'n11' variable contains node ID8."
)
}
}
}
/**
* Shortcut for `println()`.
*/
def nl() {
println()
}
/**
* ==Command==
* Lists all Visor console memory.
*
* ==Examples==
* <ex>mlist</ex>
* Lists all variables in Visor console memory.
*/
def mlist() {
mlist("")
}
/**
* Clears given Visor console variable or the whole namespace.
*
* @param arg Variable host or namespace mnemonic.
*/
def mclear(arg: String) {
assert(arg != null)
arg match {
case "-ev" => clearNamespace("e")
case "-al" => clearNamespace("a")
case "-ca" => clearNamespace("c")
case "-no" => clearNamespace("n")
case "-tn" => clearNamespace("t")
case "-ex" => clearNamespace("s")
case _ => mem.remove(arg)
}
}
/**
* Clears given variable namespace.
*
* @param namespace Namespace.
*/
private def clearNamespace(namespace: String) {
assert(namespace != null)
mem.keySet.foreach(k => {
if (k.startsWith(namespace))
try {
k.substring(1).toInt
mem.remove(k)
}
catch {
case ignored: Throwable => // No-op.
}
})
}
/**
* Clears all Visor console memory.
*/
def mclear() {
mem.clear()
}
/**
* Finds variable by its value.
*
* @param v Value to find by.
*/
def mfind(@Nullable v: String): Option[(String, String)] =
mem find(t => t._2 == v)
/**
* Sets Visor console memory variable. Note that this method '''does not'''
* perform variable substitution on its parameters.
*
* @param n Name of the variable. Can't be `null`.
* @param v Value of the variable. Can't be `null`.
* @return Previous value.
*/
def mset(n: String, v: String): String = {
msetOpt(n, v).orNull
}
/**
* Sets Visor console memory variable. Note that this method '''does not'''
* perform variable substitution on its parameters.
*
* @param n Name of the variable. Can't be `null`.
* @param v Value of the variable. Can't be `null`.
* @return Previous value as an option.
*/
def msetOpt(n: String, v: String): Option[String] = {
assert(n != null)
assert(v != null)
val prev = mem.get(n)
mem.put(n, v)
Option(prev)
}
/**
* ==Command==
* Gets Visor console memory variable. Note that this method '''does not'''
* perform variable substitution on its parameters.
*
* ==Examples==
* <ex>mget @a</ex>
* Gets the value for Visor console variable '@a'.
*
* @param n Name of the variable.
* @return Variable value or `null` if such variable doesn't exist or its value was set as `null`.
*/
def mget(n: String) {
val key = if (n.startsWith("@")) n.substring(1) else n
if (mem.containsKey(key)) {
val t = new VisorTextTable()
t.maxCellWidth = 70
t #= ("Name", "Value")
t += (n, mem.get(key))
t.render()
nl()
}
else {
warn("Missing variable with name: \'" + n + "\'.")
}
}
/**
* Trap for missing arguments.
*/
def mget() {
warn("Missing argument.")
warn("Type 'help mget' to see how to use this command.")
}
/**
* ==Command==
* Gets Visor console memory variable. Note that this method '''does not'''
* perform variable substitution on its parameters.
*
* ==Examples==
* <ex>mgetOpt a</ex>
* Gets the value as an option for Visor console variable 'a'.
*
* @param n Name of the variable.
* @return Variable host as an option.
*/
def mgetOpt(n: String): Option[String] = {
assert(n != null)
Option(mem.get(n))
}
/**
* If variable with given value and prefix doesn't exist - creates
* a new variable with given value and returns its host. Otherwise,
* returns an existing variable host.
*
* @param v Value.
* @param prefix Variable host prefix.
* @return Existing variable host or the new variable host.
*/
def setVarIfAbsent(v: AnyRef, prefix: String): String = {
assert(v != null)
assert(prefix != null && prefix.length > 0)
val s = v.toString
val t = mem.find((t: (String, String)) => t._1.startsWith(prefix) && t._2 == s)
if (t.isDefined)
t.get._1
else {
for (i <- 0 until Int.MaxValue if mem.putIfAbsent(prefix + i, s) == null)
return prefix + i
throw new IgniteException("No more memory.")
}
}
/**
* Try get variable value with given name.
*
* @param v variable name.
* @return variable value or `v` if variable with name `v` not exist.
*/
def getVariable(v: String): String = {
v match {
case name if name.startsWith("@") => mgetOpt(name.substring(1)).getOrElse(v)
case _ => v
}
}
/**
* Creates a new variable with given value and returns its host.
*
* @param v Value.
* @param prefix Variable host prefix.
* @return New variable host.
*/
def setVar(v: AnyRef, prefix: String): String = {
assert(v != null)
assert(prefix != null && prefix.length > 0)
val s = v.toString
for (i <- 0 until Int.MaxValue if mem.putIfAbsent(prefix + i, s) == null)
return prefix + i
throw new IgniteException("No more memory.")
}
/**
* Adds command help to the Visor console. This will be printed as part of `help` command.
*
* @param name Command name.
* @param shortInfo Short command description.
* @param longInfo Optional multi-line long command description. If not provided - short description
* will be used instead.
* @param aliases List of aliases. Optional.
* @param spec Command specification.
* @param args List of `(host, description)` tuples for command arguments. Optional.
* @param examples List of `(example, description)` tuples for command examples.
* @param emptyArgs - command implementation with empty arguments.
* @param withArgs - command implementation with arguments.
*/
def addHelp(
name: String,
shortInfo: String,
@Nullable longInfo: Seq[String] = null,
@Nullable aliases: Seq[String] = Seq.empty,
spec: Seq[String],
@Nullable args: Seq[(String, AnyRef)] = null,
examples: Seq[(String, AnyRef)],
emptyArgs: () => Unit,
withArgs: (String) => Unit) {
assert(name != null)
assert(shortInfo != null)
assert(spec != null && spec.nonEmpty)
assert(examples != null && examples.nonEmpty)
assert(emptyArgs != null)
assert(withArgs != null)
// Add and re-sort
cmdLst = (cmdLst ++ Seq(VisorCommandHolder(name, shortInfo, longInfo, aliases, spec, args, examples, emptyArgs, withArgs))).
sortWith((a, b) => a.name.compareTo(b.name) < 0)
}
/**
* Extract node from command arguments.
*
* @param argLst Command arguments.
* @return Error message or node ref.
*/
def parseNode(argLst: ArgList) = {
val id8 = argValue("id8", argLst)
val id = argValue("id", argLst)
if (id8.isDefined && id.isDefined)
Left("Only one of '-id8' or '-id' is allowed.")
else if (id8.isDefined) {
nodeById8(id8.get) match {
case Nil => Left("Unknown 'id8' value: " + id8.get)
case node :: Nil => Right(Option(node))
case _ => Left("'id8' resolves to more than one node (use full 'id' instead): " + id8.get)
}
}
else if (id.isDefined)
try {
val node = Option(ignite.cluster.node(java.util.UUID.fromString(id.get)))
if (node.isDefined)
Right(node)
else
Left("'id' does not match any node: " + id.get)
}
catch {
case e: IllegalArgumentException => Left("Invalid node 'id': " + id.get)
}
else
Right(None)
}
private[this] def parseArg(arg: String): Arg = {
if (arg(0) == '-' || arg(0) == '/') {
val eq = arg.indexOf('=')
if (eq == -1)
arg.substring(1) -> null
else {
val n = arg.substring(1, eq).trim
var v = arg.substring(eq + 1).trim.replaceAll("['\"`]$", "").replaceAll("^['\"`]", "")
if (v.startsWith("@"))
v = mgetOpt(v.substring(1)).getOrElse(v)
n -> v
}
}
else {
val k: String = null
val v = if (arg.startsWith("@"))
mgetOpt(arg.substring(1)).getOrElse(arg)
else
arg
k -> v
}
}
private val quotedArg = "(?:[-/].*=)?(['\"`]).*".r
/**
* Utility method that parses command arguments. Arguments represented as a string
* into argument list represented as list of tuples (host, value) performing
* variable substitution:
*
* `-p=@n` - A named parameter where `@n` will be considered as a reference to variable named `n`.
* `@ n` - An unnamed parameter where `@n` will be considered as a reference to variable named `n`.
* `-p` - A flag doesn't support variable substitution.
*
* Note that recursive substitution isn't supported. If specified variable isn't set - the value
* starting with `@` will be used as-is.
*
* @param args Command arguments to parse.
*/
def parseArgs(@Nullable args: String): ArgList = {
val buf = collection.mutable.ArrayBuffer.empty[Arg]
if (args != null && args.trim.nonEmpty) {
val lst = args.trim.split(" ")
val sb = new StringBuilder()
for (i <- 0 until lst.size if lst(i).nonEmpty || sb.nonEmpty) {
val arg = sb.toString + lst(i)
arg match {
case quotedArg(quote) if arg.count(_ == quote(0)) % 2 != 0 && i + 1 < lst.size =>
sb.append(lst(i)).append(" ")
case _ =>
sb.clear()
buf += parseArg(arg)
}
}
}
buf
}
/**
* Shortcut method that checks if passed in argument list has an argument with given value.
*
* @param v Argument value to check for existence in this list.
* @param args Command argument list.
*/
def hasArgValue(@Nullable v: String, args: ArgList): Boolean = {
assert(args != null)
args.exists(_._2 == v)
}
/**
* Shortcut method that checks if passed in argument list has an argument with given host.
*
* @param n Argument host to check for existence in this list.
* @param args Command argument list.
*/
def hasArgName(@Nullable n: String, args: ArgList): Boolean = {
assert(args != null)
args.exists(_._1 == n)
}
/**
* Shortcut method that checks if flag (non-`null` host and `null` value) is set
* in the argument list.
*
* @param n Name of the flag.
* @param args Command argument list.
*/
def hasArgFlag(n: String, args: ArgList): Boolean = {
assert(n != null && args != null)
args.exists((a) => a._1 == n && a._2 == null)
}
/**
* Gets the value for a given argument host.
*
* @param n Argument host.
* @param args Argument list.
* @return Argument value.
*/
@Nullable def argValue(n: String, args: ArgList): Option[String] = {
assert(n != null && args != null)
Option((args find(_._1 == n) getOrElse Til)._2)
}
/**
* Gets a non-`null` value for given parameter.
*
* @param a Parameter.
* @param dflt Value to return if `a` is `null`.
*/
def safe(@Nullable a: Any, dflt: Any = NA) = {
assert(dflt != null)
if (a != null) a.toString else dflt.toString
}
/**
* Joins array elements to string.
*
* @param arr Array.
* @param dflt Value to return if `arr` is `null` or empty.
* @return String.
*/
def arr2Str[T](arr: Array[T], dflt: Any = NA) =
if (arr != null && arr.length > 0) U.compact(arr.mkString(", ")) else dflt.toString
/**
* Converts `Boolean` to 'on'/'off' string.
*
* @param bool Boolean value.
* @return String.
*/
def bool2Str(bool: Boolean) = if (bool) "on" else "off"
/**
* Converts `java.lang.Boolean` to 'on'/'off' string.
*
* @param bool Boolean value.
* @param ifNull Default value in case if `bool` is `null`.
* @return String.
*/
def javaBoolToStr(bool: JavaBoolean, ifNull: Boolean = false) =
bool2Str(if (bool == null) ifNull else bool.booleanValue())
/**
* Reconstructs string presentation for given argument.
*
* @param arg Argument to reconstruct.
*/
@Nullable def makeArg(arg: Arg): String = {
assert(arg != null)
var s = ""
if (arg._1 != null) {
s = "-" + arg._1
if (arg._2 != null)
s = s + '=' + arg._2
}
else
s = arg._2
s
}
/**
* Reconstructs string presentation for given argument list.
*
* @param args Argument list to reconstruct.
*/
def makeArgs(args: ArgList): String = {
assert(args != null)
("" /: args)((b, a) => if (b.length == 0) makeArg(a) else b + ' ' + makeArg(a))
}
/**
* Parses string containing mnemonic predicate and returns Scala predicate.
*
* @param s Mnemonic predicate.
* @return Long to Boolean predicate or null if predicate cannot be created.
*/
def makeExpression(s: String): Option[Long => Boolean] = {
assert(s != null)
def value(v: String): Long =
// Support for seconds, minutes and hours.
// NOTE: all memory sizes are assumed to be in MB.
v.last match {
case 's' => v.substring(0, v.length - 1).toLong * 1000
case 'm' => v.substring(0, v.length - 1).toLong * 1000 * 60
case 'h' => v.substring(0, v.length - 1).toLong * 1000 * 60 * 60
case _ => v.toLong
}
try
if (s.startsWith("lte")) // <=
Some(_ <= value(s.substring(3)))
else if (s.startsWith("lt")) // <
Some(_ < value(s.substring(2)))
else if (s.startsWith("gte")) // >=
Some(_ >= value(s.substring(3)))
else if (s.startsWith("gt")) // >
Some(_ > value(s.substring(2)))
else if (s.startsWith("eq")) // ==
Some(_ == value(s.substring(2)))
else if (s.startsWith("neq")) // !=
Some(_ != value(s.substring(3)))
else
None
catch {
case e: Throwable => None
}
}
// Formatters.
private val dblFmt = new DecimalFormat("#0.00", DEC_FMT_SYMS)
private val intFmt = new DecimalFormat("#0", DEC_FMT_SYMS)
/**
* Formats double value with `#0.00` formatter.
*
* @param d Double value to format.
*/
def formatDouble(d: Double): String = {
dblFmt.format(d)
}
/**
* Formats double value with `#0` formatter.
*
* @param d Double value to format.
*/
def formatInt(d: Double): String = {
intFmt.format(d.round)
}
/**
* Returns string representation of the timestamp provided. Result formatted
* using pattern `MM/dd/yy, HH:mm:ss`.
*
* @param ts Timestamp.
*/
def formatDateTime(ts: Long): String =
dtFmt.format(ts)
/**
* Returns string representation of the date provided. Result formatted using
* pattern `MM/dd/yy, HH:mm:ss`.
*
* @param date Date.
*/
def formatDateTime(date: Date): String =
dtFmt.format(date)
/**
* Returns string representation of the timestamp provided. Result formatted
* using pattern `MM/dd/yy`.
*
* @param ts Timestamp.
*/
def formatDate(ts: Long): String =
dFmt.format(ts)
/**
* Returns string representation of the date provided. Result formatted using
* pattern `MM/dd/yy`.
*
* @param date Date.
*/
def formatDate(date: Date): String =
dFmt.format(date)
/**
* Base class for memory units.
*
* @param name Unit name to display on screen.
* @param base Unit base to convert from bytes.
*/
private[this] sealed abstract class VisorMemoryUnit(name: String, val base: Long) {
/**
* Convert memory in bytes to memory in units.
*
* @param m Memory in bytes.
* @return Memory in units.
*/
def toUnits(m: Long): Double = m.toDouble / base
/**
* Check if memory fits measure units.
*
* @param m Memory in bytes.
* @return `True` if memory is more than `1` after converting bytes to units.
*/
def has(m: Long): Boolean = toUnits(m) >= 1
override def toString = name
}
private[this] case object BYTES extends VisorMemoryUnit("b", 1)
private[this] case object KILOBYTES extends VisorMemoryUnit("kb", 1024L)
private[this] case object MEGABYTES extends VisorMemoryUnit("mb", 1024L * 1024L)
private[this] case object GIGABYTES extends VisorMemoryUnit("gb", 1024L * 1024L * 1024L)
private[this] case object TERABYTES extends VisorMemoryUnit("tb", 1024L * 1024L * 1024L * 1024L)
/**
* Detect memory measure units: from BYTES to TERABYTES.
*
* @param m Memory in bytes.
* @return Memory measure units.
*/
private[this] def memoryUnit(m: Long): VisorMemoryUnit =
if (TERABYTES.has(m))
TERABYTES
else if (GIGABYTES.has(m))
GIGABYTES
else if (MEGABYTES.has(m))
MEGABYTES
else if (KILOBYTES.has(m))
KILOBYTES
else
BYTES
/**
* Returns string representation of the memory.
*
* @param n Memory size.
*/
def formatMemory(n: Long): String = {
if (n > 0) {
val u = memoryUnit(n)
kbFmt.format(u.toUnits(n)) + u.toString
}
else
"0"
}
/**
* Returns string representation of the memory limit.
*
* @param n Memory size.
*/
def formatMemoryLimit(n: Long): String = {
n match {
case -1 => "Disabled"
case 0 => "Unlimited"
case m => formatMemory(m)
}
}
/**
* Returns string representation of the number.
*
* @param n Number.
*/
def formatNumber(n: Long): String =
nmFmt.format(n)
/**
* Tests whether or not Visor console is connected.
*
* @return `True` if Visor console is connected.
*/
def isConnected =
isCon
/**
* Gets timestamp of Visor console connection. Returns `0` if Visor console is not connected.
*
* @return Timestamp of Visor console connection.
*/
def connectTimestamp =
conTs
/**
* Prints properly formatted error message like:
* {{{
* (wrn) <visor>: warning message
* }}}
*
* @param warnMsgs Error messages to print. If `null` - this function is no-op.
*/
def warn(warnMsgs: Any*) {
assert(warnMsgs != null)
warnMsgs.foreach(line => println(s"(wrn) <visor>: $line"))
}
/**
* Prints standard 'not connected' error message.
*/
def adviseToConnect() {
warn(
"Visor is disconnected.",
"Type 'open' to connect Visor console or 'help open' to get help."
)
}
/**
* Gets global projection as an option.
*/
def gridOpt =
Option(ignite)
def noop() {}
/**
* ==Command==
* Prints Visor console status.
*
* ==Example==
* <ex>status -q</ex>
* Prints Visor console status without ASCII logo.
*
* @param args Optional "-q" flag to disable ASCII logo printout.
*/
def status(args: String) {
val t = VisorTextTable()
t += ("Status", if (isCon) "Connected" else "Disconnected")
t += ("Grid name",
if (ignite == null)
NA
else {
val n = ignite.name
escapeName(n)
}
)
t += ("Config path", safe(cfgPath))
t += ("Uptime", if (isCon) X.timeSpan2HMS(uptime) else NA)
t.render()
}
/**
* ==Command==
* Prints Visor console status (with ASCII logo).
*
* ==Example==
* <ex>status</ex>
* Prints Visor console status.
*/
def status() {
status("")
}
/**
* ==Command==
* Prints help for specific command(s) or for all commands.
*
* ==Example==
* <ex>help</ex>
* Prints general help.
*
* <ex>help open</ex>
* Prints help for 'open' command.
*
* @param args List of commands to print help for. If empty - prints generic help.
*/
def help(args: String = null) {
val argLst = parseArgs(args)
if (!has(argLst)) {
val t = VisorTextTable()
t.autoBorder = false
t.maxCellWidth = 55
t #= ("Command", "Description")
cmdLst foreach (hlp => t += (hlp.nameWithAliases, hlp.shortInfo))
t.render()
println("\nType 'help \"command name\"' to see how to use this command.")
}
else
for (c <- argLst)
if (c._1 != null)
warn("Invalid command name: " + argName(c))
else if (c._2 == null)
warn("Invalid command name: " + argName(c))
else {
val n = c._2
val opt = cmdLst.find(_.name == n)
if (opt.isEmpty)
warn("Invalid command name: " + n)
else {
val hlp: VisorCommandHolder = opt.get
val t = VisorTextTable()
t += (hlp.nameWithAliases, if (hlp.longInfo == null) hlp.shortInfo else hlp.longInfo)
t.render()
println("\nSPECIFICATION:")
hlp.spec foreach(s => println(blank(4) + s))
if (has(hlp.args)) {
println("\nARGUMENTS:")
hlp.args foreach (a => {
val (arg, desc) = a
println(blank(4) + arg)
desc match {
case (lines: Iterable[_]) => lines foreach (line => println(blank(8) + line))
case s: AnyRef => println(blank(8) + s.toString)
}
})
}
if (has(hlp.examples)) {
println("\nEXAMPLES:")
hlp.examples foreach (a => {
val (ex, desc) = a
println(blank(4) + ex)
desc match {
case (lines: Iterable[_]) => lines foreach (line => println(blank(8) + line))
case s: AnyRef => println(blank(8) + s.toString)
}
})
}
nl()
}
}
}
/**
* Tests whether passed in sequence is not `null` and not empty.
*/
private def has[T](@Nullable s: Seq[T]): Boolean = {
s != null && s.nonEmpty
}
/**
* ==Command==
* Prints generic help.
*
* ==Example==
* <ex>help</ex>
* Prints help.
*/
def help() {
help("")
}
/**
* Helper function that makes up the full argument host from tuple.
*
* @param t Command argument tuple.
*/
def argName(t: (String, String)): String =
if (F.isEmpty(t._1) && F.isEmpty(t._2))
"<empty>"
else if (F.isEmpty(t._1))
t._2
else
t._1
/**
* Helper method that produces blank string of given length.
*
* @param len Length of the blank string.
*/
private def blank(len: Int) = new String().padTo(len, ' ')
/**
* Connects Visor console to configuration with path.
*
* @param gridName Name of grid instance.
* @param cfgPath Configuration path.
*/
def open(gridName: String, cfgPath: String) {
this.cfgPath = cfgPath
ignite =
try
Ignition.ignite(gridName).asInstanceOf[IgniteEx]
catch {
case _: IllegalStateException =>
this.cfgPath = null
throw new IgniteException("Named grid unavailable: " + gridName)
}
assert(cfgPath != null)
isCon = true
conOwner = true
conTs = System.currentTimeMillis
ignite.cluster.nodes().foreach(n => {
setVarIfAbsent(nid8(n), "n")
val ip = sortAddresses(n.addresses()).headOption
if (ip.isDefined)
setVarIfAbsent(ip.get, "h")
})
nodeJoinLsnr = new IgnitePredicate[Event]() {
override def apply(e: Event): Boolean = {
e match {
case de: DiscoveryEvent =>
setVarIfAbsent(nid8(de.eventNode()), "n")
val node = ignite.cluster.node(de.eventNode().id())
if (node != null) {
val ip = sortAddresses(node.addresses).headOption
if (ip.isDefined)
setVarIfAbsent(ip.get, "h")
}
else {
warn(
"New node not found: " + de.eventNode().id(),
"Visor must have discovery configuration and local " +
"host bindings identical with grid nodes."
)
}
}
true
}
}
ignite.events().localListen(nodeJoinLsnr, EVT_NODE_JOINED)
nodeLeftLsnr = new IgnitePredicate[Event]() {
override def apply(e: Event): Boolean = {
e match {
case (de: DiscoveryEvent) =>
val nv = mfind(nid8(de.eventNode()))
if (nv.isDefined)
mem.remove(nv.get._1)
val ip = sortAddresses(de.eventNode().addresses).headOption
if (ip.isDefined) {
val last = !ignite.cluster.nodes().exists(n =>
n.addresses.size > 0 && sortAddresses(n.addresses).head == ip.get
)
if (last) {
val hv = mfind(ip.get)
if (hv.isDefined)
mem.remove(hv.get._1)
}
}
}
true
}
}
ignite.events().localListen(nodeLeftLsnr, EVT_NODE_LEFT, EVT_NODE_FAILED)
nodeSegLsnr = new IgnitePredicate[Event] {
override def apply(e: Event): Boolean = {
e match {
case de: DiscoveryEvent =>
if (de.eventNode().id() == ignite.localNode.id) {
warn("Closing Visor console due to topology segmentation.")
warn("Contact your system administrator.")
nl()
close()
}
}
true
}
}
ignite.events().localListen(nodeSegLsnr, EVT_NODE_SEGMENTED)
nodeStopLsnr = new IgnitionListener {
def onStateChange(name: String, state: IgniteState) {
if (name == ignite.name && state == IgniteState.STOPPED) {
warn("Closing Visor console due to stopping of host grid instance.")
nl()
close()
}
}
}
Ignition.addListener(nodeStopLsnr)
logText("Visor joined topology: " + cfgPath)
logText("All live nodes, if any, will re-join.")
nl()
val t = VisorTextTable()
// Print advise.
println("Some useful commands:")
t += ("Type 'top'", "to see full topology.")
t += ("Type 'node'", "to see node statistics.")
t += ("Type 'cache'", "to see cache statistics.")
t += ("Type 'tasks'", "to see tasks statistics.")
t += ("Type 'config'", "to see node configuration.")
t.render()
println("\nType 'help' to get help.\n")
status()
}
/**
* Returns string with node id8, its memory variable, if available, and its
* IP address (first internal address), if node is alive.
*
* @param id Node ID.
* @return String.
*/
def nodeId8Addr(id: UUID): String = {
assert(id != null)
assert(isCon)
val g = ignite
if (g != null && g.localNode.id == id)
"<visor>"
else {
val n = ignite.cluster.node(id)
val id8 = nid8(id)
val v = mfind(id8)
id8 +
(if (v.isDefined) "(@" + v.get._1 + ")" else "") +
", " +
(if (n == null) NA else sortAddresses(n.addresses).headOption.getOrElse(NA))
}
}
/**
* Returns string with node id8 and its memory variable, if available.
*
* @param id Node ID.
* @return String.
*/
def nodeId8(id: UUID): String = {
assert(id != null)
assert(isCon)
val id8 = nid8(id)
val v = mfind(id8)
id8 + (if (v.isDefined) "(@" + v.get._1 + ")" else "")
}
/**
* Guards against invalid percent readings.
*
* @param v Value in '%' to guard.
* @return Percent as string. Any value below `0` and greater than `100` will return `<n/a>` string.
*/
def safePercent(v: Double): String = if (v < 0 || v > 100) NA else formatDouble(v) + " %"
/** Convert to task argument. */
def emptyTaskArgument[A](nid: UUID): VisorTaskArgument[Void] = new VisorTaskArgument(nid, false)
def emptyTaskArgument[A](nids: Iterable[UUID]): VisorTaskArgument[Void] =
new VisorTaskArgument(new JavaHashSet(nids), false)
/** Convert to task argument. */
def toTaskArgument[A](nid: UUID, arg: A): VisorTaskArgument[A] = new VisorTaskArgument(nid, arg, false)
/** Convert to task argument. */
def toTaskArgument[A](nids: Iterable[UUID], arg: A): VisorTaskArgument[A] =
new VisorTaskArgument(new JavaHashSet(nids), arg, false)
@throws[ClusterGroupEmptyException]("In case of empty topology.")
private def execute[A, R, J](grp: ClusterGroup, task: Class[_ <: VisorMultiNodeTask[A, R, J]], arg: A): R = {
if (grp.nodes().isEmpty)
throw new ClusterGroupEmptyException("Topology is empty.")
ignite.compute(grp).withNoFailover().execute(task, toTaskArgument(grp.nodes().map(_.id()), arg))
}
/**
* Execute task on node.
*
* @param nid Node id.
* @param task Task class
* @param arg Task argument.
* @tparam A Task argument type.
* @tparam R Task result type
* @tparam J Job class.
* @return Task result.
*/
@throws[ClusterGroupEmptyException]("In case of empty topology.")
def executeOne[A, R, J](nid: UUID, task: Class[_ <: VisorMultiNodeTask[A, R, J]], arg: A): R =
execute(ignite.cluster.forNodeId(nid), task, arg)
/**
* Execute task on random node from specified cluster group.
*
* @param grp Cluster group to take rundom node from
* @param task Task class
* @param arg Task argument.
* @tparam A Task argument type.
* @tparam R Task result type
* @tparam J Job class.
* @return Task result.
*/
@throws[ClusterGroupEmptyException]("In case of empty topology.")
def executeRandom[A, R, J](grp: ClusterGroup, task: Class[_ <: VisorMultiNodeTask[A, R, J]], arg: A): R =
execute(grp.forRandom(), task, arg)
/**
* Execute task on random node.
*
* @param task Task class
* @param arg Task argument.
* @tparam A Task argument type.
* @tparam R Task result type
* @tparam J Job class.
* @return Task result.
*/
@throws[ClusterGroupEmptyException]("In case of empty topology.")
def executeRandom[A, R, J](task: Class[_ <: VisorMultiNodeTask[A, R, J]], arg: A): R =
execute(ignite.cluster.forRandom(), task, arg)
/**
* Execute task on specified nodes.
*
* @param nids Node ids.
* @param task Task class
* @param arg Task argument.
* @tparam A Task argument type.
* @tparam R Task result type
* @tparam J Job class.
* @return Task result.
*/
@throws[ClusterGroupEmptyException]("In case of empty topology.")
def executeMulti[A, R, J](nids: Iterable[UUID], task: Class[_ <: VisorMultiNodeTask[A, R, J]], arg: A): R =
execute(ignite.cluster.forNodeIds(nids), task, arg)
/**
* Execute task on all nodes.
*
* @param task Task class
* @param arg Task argument.
* @tparam A Task argument type.
* @tparam R Task result type
* @tparam J Job class.
* @return Task result.
*/
@throws[ClusterGroupEmptyException]("In case of empty topology.")
def executeMulti[A, R, J](task: Class[_ <: VisorMultiNodeTask[A, R, J]], arg: A): R =
execute(ignite.cluster.forRemotes(), task, arg)
/**
* Gets caches configurations from specified node.
*
* @param nid Node ID to collect configuration from.
* @return Collection of cache configurations.
*/
@throws[ClusterGroupEmptyException]("In case of empty topology.")
def cacheConfigurations(nid: UUID): JavaCollection[VisorCacheConfiguration] =
executeOne(nid, classOf[VisorCacheConfigurationCollectorTask],
null.asInstanceOf[JavaCollection[IgniteUuid]]).values()
/**
* Asks user to select a node from the list.
*
* @param title Title displayed before the list of nodes.
* @return `Option` for ID of selected node.
*/
def askForNode(title: String): Option[UUID] = {
assert(title != null)
assert(isCon)
val t = VisorTextTable()
t #= ("#", "Node ID8(@), IP","Node Type", "Up Time", "CPUs", "CPU Load", "Free Heap")
val nodes = ignite.cluster.nodes().toList
if (nodes.isEmpty) {
warn("Topology is empty.")
None
}
else if (nodes.size == 1)
Some(nodes.head.id)
else {
nodes.indices foreach (i => {
val n = nodes(i)
val m = n.metrics
val usdMem = m.getHeapMemoryUsed
val maxMem = m.getHeapMemoryMaximum
val freeHeapPct = (maxMem - usdMem) * 100 / maxMem
val cpuLoadPct = m.getCurrentCpuLoad * 100
t += (
i,
nodeId8Addr(n.id),
if (n.isClient) "Client" else "Server",
X.timeSpan2HMS(m.getUpTime),
n.metrics.getTotalCpus,
safePercent(cpuLoadPct),
formatDouble(freeHeapPct) + " %"
)
})
println(title)
t.render()
val a = ask("\nChoose node number ('c' to cancel) [c]: ", "c")
if (a.toLowerCase == "c")
None
else {
try
Some(nodes(a.toInt).id)
catch {
case e: Throwable =>
warn("Invalid selection: " + a)
None
}
}
}
}
/**
* Asks user to select a host from the list.
*
* @param title Title displayed before the list of hosts.
* @return `Option` for projection of nodes located on selected host.
*/
def askForHost(title: String): Option[ClusterGroup] = {
assert(title != null)
assert(isCon)
val t = VisorTextTable()
t #= ("#", "Int./Ext. IPs", "Node ID8(@)", "OS", "CPUs", "MACs", "CPU Load")
val neighborhood = U.neighborhood(ignite.cluster.nodes()).values().toIndexedSeq
if (neighborhood.isEmpty) {
warn("Topology is empty.")
None
}
else {
neighborhood.indices foreach (i => {
val neighbors = neighborhood(i)
var ips = immutable.Set.empty[String]
var id8s = Seq.empty[String]
var macs = immutable.Set.empty[String]
var cpuLoadSum = 0.0
val n1 = neighbors.head
assert(n1 != null)
val cpus = n1.metrics.getTotalCpus
val os = "" +
n1.attribute("os.name") + " " +
n1.attribute("os.arch") + " " +
n1.attribute("os.version")
neighbors.foreach(n => {
id8s = id8s :+ nodeId8(n.id)
ips = ips ++ n.addresses
cpuLoadSum += n.metrics().getCurrentCpuLoad
macs = macs ++ n.attribute[String](ATTR_MACS).split(", ").map(_.grouped(2).mkString(":"))
})
t += (
i,
ips.toSeq,
id8s,
os,
cpus,
macs.toSeq,
safePercent(cpuLoadSum / neighbors.size() * 100)
)
})
println(title)
t.render()
val a = ask("\nChoose host number ('c' to cancel) [c]: ", "c")
if (a.toLowerCase == "c")
None
else {
try
Some(ignite.cluster.forNodes(neighborhood(a.toInt)))
catch {
case e: Throwable =>
warn("Invalid selection: " + a)
None
}
}
}
}
/**
* Asks user to choose configuration file.
*
* @return `Option` for file path.
*/
def askConfigFile(): Option[String] = {
val files = GridConfigurationFinder.getConfigFiles
if (files.isEmpty) {
warn("No configuration files found.")
None
}
else {
val t = VisorTextTable()
t #= ("#", "Configuration File")
(0 until files.size).foreach(i => t += (i, files(i).get1()))
println("Local configuration files:")
t.render()
val a = ask("\nChoose configuration file number ('c' to cancel) [0]: ", "0")
if (a.toLowerCase == "c")
None
else {
try
Some(files(a.toInt).get3.getPath)
catch {
case e: Throwable =>
nl()
warn("Invalid selection: " + a)
None
}
}
}
}
/**
* Asks user input.
*
* @param prompt Prompt string.
* @param dflt Default value for user input.
* @param passwd If `true`, input will be masked with '*' character. `false` by default.
*/
def ask(prompt: String, dflt: String, passwd: Boolean = false): String = {
assert(prompt != null)
assert(dflt != null)
readLineOpt(prompt, if (passwd) Some('*') else None) match {
case None => dflt
case Some(s) if s.length == 0 => dflt
case Some(s) => s
}
}
/**
* Safe `readLine` version.
*
* @param prompt User prompt.
* @param mask Mask character (if `None`, no masking will be applied).
*/
private def readLineOpt(prompt: String, mask: Option[Char] = None): Option[String] = {
assert(reader != null)
try {
Option(mask.fold(reader.readLine(prompt))(reader.readLine(prompt, _)))
}
catch {
case _: Throwable => None
}
}
/**
* Asks user to choose node id8.
*
* @return `Option` for node id8.
*/
def askNodeId(): Option[String] = {
assert(isConnected)
val ids = ignite.cluster.forRemotes().nodes().map(nid8).toList
ids.indices.foreach(i => println((i + 1) + ": " + ids(i)))
nl()
println("C: Cancel")
nl()
readLineOpt("Choose node: ") match {
case Some("c") | Some("C") | None => None
case Some(idx) =>
try
Some(ids(idx.toInt - 1))
catch {
case e: Throwable =>
if (idx.isEmpty)
warn("Index can't be empty.")
else
warn("Invalid index: " + idx + ".")
None
}
}
}
/**
* Adds close callback. Added function will be called every time
* command `close` is called.
*
* @param f Close callback to add.
*/
def addShutdownCallback(f: () => Unit) {
assert(f != null)
shutdownCbs = shutdownCbs :+ f
}
/**
* Adds close callback. Added function will be called every time
* command `close` is called.
*
* @param f Close callback to add.
*/
def addCloseCallback(f: () => Unit) {
assert(f != null)
cbs = cbs :+ f
}
/**
* Removes close callback.
*
* @param f Close callback to remove.
*/
def removeCloseCallback(f: () => Unit) {
assert(f != null)
cbs = cbs.filter(_ != f)
}
/**
* Removes all close callbacks.
*/
def removeCloseCallbacks() {
cbs = Seq.empty[() => Unit]
}
/**
* Gets visor uptime.
*/
def uptime = if (isCon) System.currentTimeMillis() - conTs else -1L
/**
* ==Command==
* Disconnects visor.
*
* ==Examples==
* <ex>close</ex>
* Disconnects from the grid.
*/
def close() {
if (!isConnected)
adviseToConnect()
else {
if (pool != null) {
pool.shutdown()
try
if (!pool.awaitTermination(5, TimeUnit.SECONDS))
pool.shutdownNow
catch {
case e: InterruptedException =>
pool.shutdownNow
Thread.currentThread.interrupt()
}
pool = new IgniteThreadPoolExecutor()
}
// Call all close callbacks.
cbs foreach(_.apply())
if (ignite != null && Ignition.state(ignite.name) == IgniteState.STARTED) {
if (nodeJoinLsnr != null)
ignite.events().stopLocalListen(nodeJoinLsnr)
if (nodeLeftLsnr != null)
ignite.events().stopLocalListen(nodeLeftLsnr)
if (nodeSegLsnr != null)
ignite.events().stopLocalListen(nodeSegLsnr)
}
if (nodeStopLsnr != null)
Ignition.removeListener(nodeStopLsnr)
if (ignite != null && conOwner)
try
Ignition.stop(ignite.name, true)
catch {
case e: Exception => warn(e.getMessage)
}
// Fall through and treat Visor console as closed
// even in case when grid didn't stop properly.
logText("Visor left topology.")
if (logStarted) {
stopLog()
nl()
}
isCon = false
conOwner = false
conTs = 0
ignite = null
nodeJoinLsnr = null
nodeLeftLsnr = null
nodeSegLsnr = null
nodeStopLsnr = null
cfgPath = null
// Clear the memory.
mclear()
nl()
status()
}
}
/**
* ==Command==
* quit from Visor console.
*
* ==Examples==
* <ex>quit</ex>
* Quit from Visor console.
*/
def quit() {
System.exit(0)
}
/**
* ==Command==
* Prints log status.
*
* ==Examples==
* <ex>log</ex>
* Prints log status.
*/
def log() {
val t = VisorTextTable()
t += ("Status", if (logStarted) "Started" else "Stopped")
if (logStarted) {
t += ("File path", logFile.getAbsolutePath)
t += ("File size", if (logFile.exists) formatMemory(logFile.length()))
}
t.render()
}
/**
* ==Command==
* Starts or stops logging.
*
* ==Examples==
* <ex>log -l -f=/home/user/visor-log</ex>
* Starts logging to file `visor-log` located at `/home/user`.
* <br>
* <ex>log -l -f=log/visor-log</ex>
* Starts logging to file `visor-log` located at <`Ignite home folder`>`/log`.
* <br>
* <ex>log -l -p=20</ex>
* Starts logging with querying events period of 20 seconds.
* <br>
* <ex>log -l -t=30</ex>
* Starts logging with topology snapshot logging period of 30 seconds.
* <br>
* <ex>log -s</ex>
* Stops logging.
*
* @param args Command arguments.
*/
def log(args: String) {
assert(args != null)
if (!isConnected)
adviseToConnect()
else {
def scold(errMsgs: Any*) {
assert(errMsgs != null)
warn(errMsgs: _*)
warn("Type 'help log' to see how to use this command.")
}
val argLst = parseArgs(args)
if (hasArgFlag("s", argLst))
if (!logStarted)
scold("Logging was not started.")
else
stopLog()
else if (hasArgFlag("l", argLst))
if (logStarted)
scold("Logging is already started.")
else
try
startLog(argValue("f", argLst), argValue("p", argLst), argValue("t", argLst),
hasArgFlag("dl", argLst))
catch {
case e: Exception => scold(e)
}
else
scold("Invalid arguments.")
}
}
/**
* Stops logging.
*/
private def stopLog() {
assert(logStarted)
logText("Log stopped.")
if (logTimer != null) {
logTimer.cancel()
logTimer.purge()
logTimer = null
}
if (topTimer != null) {
topTimer.cancel()
topTimer.purge()
topTimer = null
}
logStarted = false
println("<visor>: Log stopped: " + logFile.getAbsolutePath)
}
/** Unique Visor key to get events last order. */
final val EVT_LAST_ORDER_KEY = UUID.randomUUID().toString
/** Unique Visor key to get events throttle counter. */
final val EVT_THROTTLE_CNTR_KEY = UUID.randomUUID().toString
/**
* Starts logging. If logging is already started - no-op.
*
* @param pathOpt `Option` for log file path. If `None` - default is used.
* @param freqOpt `Option` for events fetching frequency If `None` - default is used.
* @param topFreqOpt `Option` for topology refresh frequency.
* @param rmtLogDisabled `True` if no events collected from remote nodes.
*/
private def startLog(pathOpt: Option[String], freqOpt: Option[String], topFreqOpt: Option[String],
rmtLogDisabled: Boolean) {
assert(pathOpt != null)
assert(freqOpt != null)
assert(!logStarted)
val path = pathOpt.getOrElse(DFLT_LOG_PATH)
val f = new File(path)
if (f.exists() && f.isDirectory)
throw new IllegalArgumentException("Specified path is a folder. Please input valid file path.")
val folder = Option(f.getParent).getOrElse("")
val fileName = f.getName
logFile = new File(U.resolveWorkDirectory(folder, false), fileName)
logFile.createNewFile()
if (!logFile.canWrite)
throw new IllegalArgumentException("Not enough permissions to write a log file.")
var freq = 0L
try
freq = freqOpt.getOrElse("10").toLong * 1000L
catch {
case e: NumberFormatException =>
throw new IllegalArgumentException("Invalid frequency: " + freqOpt.get)
}
if (freq <= 0)
throw new IllegalArgumentException("Frequency must be positive: " + freq)
if (freq > 60000)
warn("Frequency greater than a minute is too low (ignoring).")
var topFreq = 0L
try
topFreq = topFreqOpt.getOrElse("20").toLong * 1000L
catch {
case e: NumberFormatException =>
throw new IllegalArgumentException("Invalid topology frequency: " + topFreqOpt.get)
}
if (topFreq <= 0)
throw new IllegalArgumentException("Topology frequency must be positive: " + topFreq)
// Unique key for this JVM.
val key = UUID.randomUUID().toString + System.identityHashCode(classOf[java.lang.Object]).toString
logTimer = new Timer(true)
logTimer.schedule(new TimerTask() {
/** Events to be logged by Visor console (additionally to discovery events). */
private final val LOG_EVTS = Array(
EVT_JOB_TIMEDOUT,
EVT_JOB_FAILED,
EVT_JOB_FAILED_OVER,
EVT_JOB_REJECTED,
EVT_JOB_CANCELLED,
EVT_TASK_TIMEDOUT,
EVT_TASK_FAILED,
EVT_TASK_DEPLOY_FAILED,
EVT_TASK_DEPLOYED,
EVT_TASK_UNDEPLOYED,
EVT_CACHE_REBALANCE_STARTED,
EVT_CACHE_REBALANCE_STOPPED,
EVT_CLASS_DEPLOY_FAILED
)
override def run() {
if (ignite != null) {
try {
// Discovery events collected only locally.
val loc = collectEvents(ignite, EVT_LAST_ORDER_KEY, EVT_THROTTLE_CNTR_KEY,
LOG_EVTS ++ EVTS_DISCOVERY, new VisorEventMapper).toSeq
val evts = if (!rmtLogDisabled)
loc ++ executeMulti(classOf[VisorNodeEventsCollectorTask],
VisorNodeEventsCollectorTaskArg.createLogArg(key, LOG_EVTS)).toSeq
else
loc
if (evts.nonEmpty) {
var out: FileWriter = null
try {
out = new FileWriter(logFile, true)
evts.toList.sortBy(_.timestamp).foreach(e => {
logImpl(
out,
formatDateTime(e.timestamp),
nodeId8Addr(e.nid()),
U.compact(e.shortDisplay())
)
if (EVTS_DISCOVERY.contains(e.typeId()))
snapshot()
})
}
finally {
U.close(out, null)
}
}
}
catch {
case _: ClusterGroupEmptyCheckedException => // Ignore.
case e: Exception => logText("Failed to collect log.")
}
}
}
}, freq, freq)
topTimer = new Timer(true)
topTimer.schedule(new TimerTask() {
override def run() {
snapshot()
}
}, topFreq, topFreq)
logStarted = true
logText("Log started.")
println("<visor>: Log started: " + logFile.getAbsolutePath)
}
/**
* Does topology snapshot.
*/
private def snapshot() {
val g = ignite
if (g != null)
try
drawBar(g.cluster.metrics())
catch {
case e: ClusterGroupEmptyCheckedException => logText("Topology is empty.")
case e: Exception => ()
}
}
/**
*
* @param m Projection metrics.
*/
private def drawBar(m: ClusterMetrics) {
assert(m != null)
val pipe = "|"
def bar(cpuLoad: Double, memUsed: Double): String = {
val nCpu = if (cpuLoad < 0 || cpuLoad > 1) 0 else (cpuLoad * 20).toInt
val nMem = if (memUsed < 0 || memUsed > 1) 0 else (memUsed * 20).toInt
("" /: (0 until 20))((s: String, i: Int) => {
s + (i match {
case a if a == nMem => "^"
case a if a <= nCpu => "="
case _ => '.'
})
})
}
logText("H/N/C" + pipe +
U.neighborhood(ignite.cluster.nodes()).size.toString.padTo(4, ' ') + pipe +
m.getTotalNodes.toString.padTo(4, ' ') + pipe +
m.getTotalCpus.toString.padTo(4, ' ') + pipe +
bar(m.getAverageCpuLoad, m.getHeapMemoryUsed / m.getHeapMemoryTotal) + pipe
)
}
/**
* Logs text message.
*
* @param msg Message to log.
*/
def logText(msg: String) {
assert(msg != null)
if (logStarted) {
var out: FileWriter = null
try {
out = new FileWriter(logFile, true)
logImpl(
out,
formatDateTime(System.currentTimeMillis),
null,
msg
)
}
catch {
case e: IOException => ()
}
finally {
U.close(out, null)
}
}
}
/**
* @param out Writer.
* @param tstamp Timestamp of the log.
* @param node Node associated with the event.
* @param msg Message associated with the event.
*/
private def logImpl(
out: java.io.Writer,
tstamp: String,
node: String = null,
msg: String
) {
assert(out != null)
assert(tstamp != null)
assert(msg != null)
assert(logStarted)
if (node != null)
out.write(tstamp.padTo(18, ' ') + " | " + node + " => " + msg + "\n")
else
out.write(tstamp.padTo(18, ' ') + " | " + msg + "\n")
}
/**
* Prints out status and help in case someone calls `visor()`.
*
*/
def apply() {
status()
nl()
help()
}
lazy val commands = cmdLst.map(_.name) ++ cmdLst.flatMap(_.aliases)
def searchCmd(cmd: String) = cmdLst.find(c => c.name.equals(cmd) || (c.aliases != null && c.aliases.contains(cmd)))
/**
* Transform node ID to ID8 string.
*
* @param node Node to take ID from.
* @return Node ID in ID8 format.
*/
def nid8(node: ClusterNode): String = {
nid8(node.id())
}
/**
* Transform node ID to ID8 string.
*
* @param nid Node ID.
* @return Node ID in ID8 format.
*/
def nid8(nid: UUID): String = {
nid.toString.take(8).toUpperCase
}
/**
* Get node by ID8 string.
*
* @param id8 Node ID in ID8 format.
* @return Collection of nodes that has specified ID8.
*/
def nodeById8(id8: String) = {
ignite.cluster.nodes().filter(n => id8.equalsIgnoreCase(nid8(n)))
}
/**
* Introduction of `^^` operator for `Any` type that will call `break`.
*
* @param v `Any` value.
*/
implicit def toReturnable(v: Any) = new {
// Ignore the warning below.
def ^^ {
break()
}
}
/**
* Decode time frame from string.
*
* @param timeArg Optional time frame: <num>s|m|h|d
* @return Time in milliseconds.
*/
def timeFilter(timeArg: Option[String]): Long = {
if (timeArg.nonEmpty) {
val s = timeArg.get
val n = try
s.substring(0, s.length - 1).toLong
catch {
case _: NumberFormatException =>
throw new IllegalArgumentException("Time frame size is not numeric in: " + s)
}
if (n <= 0)
throw new IllegalArgumentException("Time frame size is not positive in: " + s)
val timeUnit = s.last match {
case 's' => 1000L
case 'm' => 1000L * 60L
case 'h' => 1000L * 60L * 60L
case 'd' => 1000L * 60L * 60L * 24L
case _ => throw new IllegalArgumentException("Invalid time frame suffix in: " + s)
}
n * timeUnit
}
else
Long.MaxValue
}
/**
* Sort addresses to properly display in Visor.
*
* @param addrs Addresses to sort.
* @return Sorted list.
*/
def sortAddresses(addrs: Iterable[String]) = {
def ipToLong(ip: String) = {
try {
val octets = if (ip.contains(".")) ip.split('.') else ip.split(':')
var dec = BigDecimal.valueOf(0L)
for (i <- octets.indices) dec += octets(i).toLong * math.pow(256, octets.length - 1 - i).toLong
dec
}
catch {
case _: Exception => BigDecimal.valueOf(0L)
}
}
/**
* Sort addresses to properly display in Visor.
*
* @param addr Address to detect type for.
* @return IP class type for sorting in order: public addresses IPv4 + private IPv4 + localhost + IPv6.
*/
def addrType(addr: String) = {
if (addr.contains(':'))
4 // IPv6
else {
try {
InetAddress.getByName(addr) match {
case ip if ip.isLoopbackAddress => 3 // localhost
case ip if ip.isSiteLocalAddress => 2 // private IPv4
case _ => 1 // other IPv4
}
}
catch {
case ignore: UnknownHostException => 5
}
}
}
addrs.map(addr => (addrType(addr), ipToLong(addr), addr)).toSeq.
sortWith((l, r) => if (l._1 == r._1) l._2.compare(r._2) < 0 else l._1 < r._1).map(_._3)
}
}
|
VladimirErshov/ignite
|
modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala
|
Scala
|
apache-2.0
| 79,582 |
package net.scalaleafs
import java.io.InputStream
import java.nio.charset.Charset
import scala.xml._
import scala.io.Source
import scala.xml.Source._
import scala.xml.parsing.NoBindingFactoryAdapter
import scala.xml.NamespaceBinding
object XHTML5Parser {
private val voidElements = Set("area", "base", "br", "col", "command", "embed", "hr", "img", "input", "keygen", "link", "meta", "param", "source", "track", "wbr")
private val entities = Map(
" " -> " ",
"¡" -> "¡",
"¢" -> "¢",
"£" -> "£",
"¤" -> "¤",
"¥" -> "¥",
"¦" -> "¦",
"§" -> "§",
"¨" -> "¨",
"©" -> "©",
"ª" -> "ª",
"«" -> "«",
"¬" -> "¬",
"­" -> "­",
"®" -> "®",
"¯" -> "¯",
"°" -> "°",
"±" -> "±",
"²" -> "²",
"³" -> "³",
"´" -> "´",
"µ" -> "µ",
"¶" -> "¶",
"·" -> "·",
"¸" -> "¸",
"¹" -> "¹",
"º" -> "º",
"»" -> "»",
"¼" -> "¼",
"½" -> "½",
"¾" -> "¾",
"¿" -> "¿",
"À" -> "À",
"Á" -> "Á",
"Â" -> "Â",
"Ã" -> "Ã",
"Ä" -> "Ä",
"Å" -> "Å",
"Æ" -> "Æ",
"Ç" -> "Ç",
"È" -> "È",
"É" -> "É",
"Ê" -> "Ê",
"Ë" -> "Ë",
"Ì" -> "Ì",
"Í" -> "Í",
"Î" -> "Î",
"Ï" -> "Ï",
"Ð" -> "Ð",
"Ñ" -> "Ñ",
"Ò" -> "Ò",
"Ó" -> "Ó",
"Ô" -> "Ô",
"Õ" -> "Õ",
"Ö" -> "Ö",
"×" -> "×",
"Ø" -> "Ø",
"Ù" -> "Ù",
"Ú" -> "Ú",
"Û" -> "Û",
"Ü" -> "Ü",
"Ý" -> "Ý",
"Þ" -> "Þ",
"ß" -> "ß",
"à" -> "à",
"á" -> "á",
"â" -> "â",
"ã" -> "ã",
"ä" -> "ä",
"å" -> "å",
"æ" -> "æ",
"ç" -> "ç",
"è" -> "è",
"é" -> "é",
"ê" -> "ê",
"ë" -> "ë",
"ì" -> "ì",
"í" -> "í",
"î" -> "î",
"ï" -> "ï",
"ð" -> "ð",
"ñ" -> "ñ",
"ò" -> "ò",
"ó" -> "ó",
"ô" -> "ô",
"õ" -> "õ",
"ö" -> "ö",
"÷" -> "÷",
"ø" -> "ø",
"ù" -> "ù",
"ú" -> "ú",
"û" -> "û",
"ü" -> "ü",
"ý" -> "ý",
"þ" -> "þ",
"ÿ" -> "ÿ")
def parse(in: InputStream): Node = {
val encoding = detectEncoding(in)
val contents = Source.fromInputStream(in, encoding).mkString
val contents2 = replaceEntities(contents)
adapter.loadXML(fromString(contents2), adapter.parser)
}
private val adapter = new NoBindingFactoryAdapter {
override def adapter = this
override def createNode(pre: String, label: String, attrs: MetaData, scope: NamespaceBinding, children: List[Node]): Elem =
Elem(pre, label, attrs, scope, voidElements.contains(label), children: _*)
}
private def replaceEntities(s : String) : String = {
val out = new StringBuilder
var lastIndex = 0
var index = s.indexOf('&')
while (index != -1) {
out.append(s.substring(lastIndex, index))
val i = s.indexOf(';', index + 1)
if (i != -1) {
val entity = s.substring(index, i + 1)
out.append(entities.getOrElse(entity, "?"))
index = i + 1
}
else {
out.append('&')
}
lastIndex = index
index = s.indexOf('&', index + 1)
}
out.append(s.substring(lastIndex))
out.toString
}
private def detectEncoding(in: InputStream): String = {
var encoding: String = System.getProperty("file.encoding")
in.mark(400)
var ignoreBytes = 0
var readEncoding = false
val buffer = new Array[Byte](400)
var read = in.read(buffer, 0, 4)
buffer(0) match {
case 0x00 =>
if (buffer(1) == 0x00 && buffer(2) == 0xFE && buffer(3) == 0xFF) {
ignoreBytes = 4
encoding = "UTF_32BE"
} else if (buffer(1) == 0x00 && buffer(2) == 0x00
&& buffer(3) == 0x3C) {
encoding = "UTF_32BE"
readEncoding = true
} else if (buffer(1) == 0x3C && buffer(2) == 0x00
&& buffer(3) == 0x3F) {
encoding = "UnicodeBigUnmarked"
readEncoding = true
}
case 0xFF =>
if (buffer(1) == 0xFE && buffer(2) == 0x00
&& buffer(3) == 0x00) {
ignoreBytes = 4
encoding = "UTF_32LE"
} else if (buffer(1) == 0xFE) {
ignoreBytes = 2
encoding = "UnicodeLittleUnmarked";
}
case 0x3C=>
readEncoding = true;
if (buffer(1) == 0x00 && buffer(2) == 0x00
&& buffer(3) == 0x00) {
encoding = "UTF_32LE"
} else if (buffer(1) == 0x00 && buffer(2) == 0x3F
&& buffer(3) == 0x00) {
encoding = "UnicodeLittleUnmarked"
} else if (buffer(1) == 0x3F && buffer(2) == 0x78
&& buffer(3) == 0x6D) {
encoding = "ASCII"
}
case 0xFE =>
if (buffer(1) == 0xFF) {
encoding = "UnicodeBigUnmarked"
ignoreBytes = 2
}
case 0xEF =>
if (buffer(1) == 0xBB && buffer(2) == 0xBF) {
encoding = "UTF8"
ignoreBytes = 3
}
case 0x4C =>
if (buffer(1) == 0x6F && buffer(2) == 0xA7
&& buffer(3) == 0x94) {
encoding = "CP037"
}
}
if (readEncoding) {
read = in.read(buffer, 4, buffer.length - 4)
val cs = Charset.forName(encoding)
val s = new String(buffer, 4, read, cs)
val pos = s.indexOf("encoding")
if (pos != -1) {
var delim : Char = ' '
var start = s.indexOf(delim = '\\'', pos)
if (start == -1)
start = s.indexOf(delim = '"', pos)
if (start != -1) {
val end = s.indexOf(delim, start + 1)
if (end != -1)
encoding = s.substring(start + 1, end)
}
}
}
in.reset()
while (ignoreBytes > 0) {
ignoreBytes -= 1
in.read()
}
encoding
}
}
|
scalastuff/scalaleafs
|
src/main/scala/net/scalaleafs/XHTML5Parser.scala
|
Scala
|
apache-2.0
| 6,643 |
package example
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.util.Random
/**
* This class implements a ScalaTest test suite for the methods in object
* `Lists` that need to be implemented as part of this assignment. A test
* suite is simply a collection of individual tests for some specific
* component of a program.
*
* A test suite is created by defining a class which extends the type
* `org.scalatest.FunSuite`. When running ScalaTest, it will automatically
* find this class and execute all of its tests.
*
* Adding the `@RunWith` annotation enables the test suite to be executed
* inside eclipse using the built-in JUnit test runner.
*
* You have two options for running this test suite:
*
* - Start the sbt console and run the "test" command
* - Right-click this file in eclipse and chose "Run As" - "JUnit Test"
*/
@RunWith(classOf[JUnitRunner])
class ListsSuite extends FunSuite {
private[this] val random = new Random(System.currentTimeMillis())
private[this] def r = random.nextInt
private[this] def r(p: Int) = random.nextInt(p)
/**
* Tests are written using the `test` operator which takes two arguments:
*
* - A description of the test. This description has to be unique, no two
* tests can have the same description.
* - The test body, a piece of Scala code that implements the test
*
* The most common way to implement a test body is using the method `assert`
* which tests that its argument evaluates to `true`. So one of the simplest
* successful tests is the following:
*/
test("one plus one is two")(assert(1 + 1 == 2))
/**
* In Scala, it is allowed to pass an argument to a method using the block
* syntax, i.e. `{ argument }` instead of parentheses `(argument)`.
*
* This allows tests to be written in a more readable manner:
*/
test("one plus one is three?") {
assert(1 + 1 === 3 - 1)
}
/**
* One problem with the previous (failing) test is that ScalaTest will
* only tell you that a test failed, but it will not tell you what was
* the reason for the failure. The output looks like this:
*
* {{{
* [info] - one plus one is three? *** FAILED ***
* }}}
*
* This situation can be improved by using a special equality operator
* `===` instead of `==` (this is only possible in ScalaTest). So if you
* run the next test, ScalaTest will show the following output:
*
* {{{
* [info] - details why one plus one is not three *** FAILED ***
* [info] 2 did not equal 3 (ListsSuite.scala:67)
* }}}
*
* We recommend to always use the `===` equality operator when writing tests.
*/
test("details why one plus one is not three") {
assert(1 + 1 === 6 / 3)
}
/**
* In order to test the exceptional behavior of a methods, ScalaTest offers
* the `intercept` operation.
*
* In the following example, we test the fact that the method `intNotZero`
* throws an `IllegalArgumentException` if its argument is `0`.
*/
test("intNotZero throws an exception if its argument is 0") {
intercept[IllegalArgumentException] {
intNotZero(0)
}
}
def intNotZero(x: Int): Int = {
if (x == 0) throw new IllegalArgumentException("zero is not allowed")
else x
}
/**
* Now we finally write some tests for the list functions that have to be
* implemented for this assignment. We fist import all members of the
* `List` object.
*/
import Lists._
/**
* We only provide two very basic tests for you. Write more tests to make
* sure your `sum` and `max` methods work as expected.
*
* In particular, write tests for corner cases: negative numbers, zeros,
* empty lists, lists with repeated elements, etc.
*
* It is allowed to have multiple `assert` statements inside one test,
* however it is recommended to write an individual `test` statement for
* every tested aspect of a method.
*/
test("sum of a few numbers") {
assert(sum(List(1,2,0)) === 3)
}
test("max of a few numbers") {
assert(max(List(3, 7, 2)) === 7)
}
test("max throws an exception if list is Nil") {
intercept[java.util.NoSuchElementException] {
max(Nil)
}
}
test("sum of Nil is 0") {
assert(sum(Nil) === 0)
}
test("max of a three element list is equals to max of its reverse") {
val list = List(r, r, r)
assert(max(list) === max(list.reverse))
}
test("sum of a three element list is equals to sum of its reverse") {
val list = List(r, r, r)
assert(sum(list) === sum(list.reverse))
}
test("max of a two element list is equals to max of its reverse") {
val list = List(r, r)
assert(max(list) === max(list.reverse))
}
test("sum of a two element list is equals to sum of its reverse") {
val list = List(r, r)
assert(sum(list) === sum(list.reverse))
}
test("sum of single element list is equal to this element") {
val elem = r
assert(sum(List(elem)) === elem)
}
test("max of single element list is equal to this element") {
val elem = r
assert(max(List(elem)) === elem)
}
test("sum of n-element list with element=x is x*n") {
val n = r(1000)
val x = r(1000)
assert(sum(List.fill(n)(x)) === n * x)
}
}
|
edvorg/scala-progfun
|
example/src/test/scala/example/ListsSuite.scala
|
Scala
|
gpl-3.0
| 5,330 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, Properties}
import scala.collection.JavaConverters._
import org.apache.spark.annotation.Stable
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{EliminateSubqueryAliases, NoSuchTableException, UnresolvedRelation}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions.Literal
import org.apache.spark.sql.catalyst.plans.logical.{AppendData, CreateTableAsSelect, CreateTableAsSelectStatement, InsertIntoStatement, LogicalPlan, OverwriteByExpression, OverwritePartitionsDynamic, ReplaceTableAsSelectStatement}
import org.apache.spark.sql.connector.catalog.{CatalogPlugin, CatalogV2Implicits, CatalogV2Util, Identifier, SupportsCatalogOptions, Table, TableCatalog, TableProvider, V1Table}
import org.apache.spark.sql.connector.catalog.TableCapability._
import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform}
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.{CreateTable, DataSource, DataSourceUtils, LogicalRelation}
import org.apache.spark.sql.execution.datasources.v2._
import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode
import org.apache.spark.sql.sources.BaseRelation
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
/**
* Interface used to write a [[Dataset]] to external storage systems (e.g. file systems,
* key-value stores, etc). Use `Dataset.write` to access this.
*
* @since 1.4.0
*/
@Stable
final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
private val df = ds.toDF()
/**
* Specifies the behavior when data or table already exists. Options include:
* <ul>
* <li>`SaveMode.Overwrite`: overwrite the existing data.</li>
* <li>`SaveMode.Append`: append the data.</li>
* <li>`SaveMode.Ignore`: ignore the operation (i.e. no-op).</li>
* <li>`SaveMode.ErrorIfExists`: throw an exception at runtime.</li>
* </ul>
* <p>
* When writing to data source v1, the default option is `ErrorIfExists`. When writing to data
* source v2, the default option is `Append`.
*
* @since 1.4.0
*/
def mode(saveMode: SaveMode): DataFrameWriter[T] = {
this.mode = saveMode
this
}
/**
* Specifies the behavior when data or table already exists. Options include:
* <ul>
* <li>`overwrite`: overwrite the existing data.</li>
* <li>`append`: append the data.</li>
* <li>`ignore`: ignore the operation (i.e. no-op).</li>
* <li>`error` or `errorifexists`: default option, throw an exception at runtime.</li>
* </ul>
*
* @since 1.4.0
*/
def mode(saveMode: String): DataFrameWriter[T] = {
saveMode.toLowerCase(Locale.ROOT) match {
case "overwrite" => mode(SaveMode.Overwrite)
case "append" => mode(SaveMode.Append)
case "ignore" => mode(SaveMode.Ignore)
case "error" | "errorifexists" | "default" => mode(SaveMode.ErrorIfExists)
case _ => throw new IllegalArgumentException(s"Unknown save mode: $saveMode. Accepted " +
"save modes are 'overwrite', 'append', 'ignore', 'error', 'errorifexists', 'default'.")
}
}
/**
* Specifies the underlying output data source. Built-in options include "parquet", "json", etc.
*
* @since 1.4.0
*/
def format(source: String): DataFrameWriter[T] = {
this.source = source
this
}
/**
* Adds an output option for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a time zone ID
* to be used to format timestamps in the JSON/CSV datasources or partition values. The following
* formats of `timeZone` are supported:
* <ul>
* <li> Region-based zone ID: It should have the form 'area/city', such as
* 'America/Los_Angeles'.</li>
* <li> Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00'
* or '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</li>
* </ul>
* Other short names like 'CST' are not recommended to use because they can be ambiguous.
* If it isn't set, the current value of the SQL config `spark.sql.session.timeZone` is
* used by default.
* </li>
* </ul>
*
* @since 1.4.0
*/
def option(key: String, value: String): DataFrameWriter[T] = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataFrameWriter[T] = option(key, value.toString)
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataFrameWriter[T] = option(key, value.toString)
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataFrameWriter[T] = option(key, value.toString)
/**
* (Scala-specific) Adds output options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a time zone ID
* to be used to format timestamps in the JSON/CSV datasources or partition values. The following
* formats of `timeZone` are supported:
* <ul>
* <li> Region-based zone ID: It should have the form 'area/city', such as
* 'America/Los_Angeles'.</li>
* <li> Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00'
* or '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</li>
* </ul>
* Other short names like 'CST' are not recommended to use because they can be ambiguous.
* If it isn't set, the current value of the SQL config `spark.sql.session.timeZone` is
* used by default.
* </li>
* </ul>
*
* @since 1.4.0
*/
def options(options: scala.collection.Map[String, String]): DataFrameWriter[T] = {
this.extraOptions ++= options
this
}
/**
* Adds output options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a time zone ID
* to be used to format timestamps in the JSON/CSV datasources or partition values. The following
* formats of `timeZone` are supported:
* <ul>
* <li> Region-based zone ID: It should have the form 'area/city', such as
* 'America/Los_Angeles'.</li>
* <li> Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00'
* or '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</li>
* </ul>
* Other short names like 'CST' are not recommended to use because they can be ambiguous.
* If it isn't set, the current value of the SQL config `spark.sql.session.timeZone` is
* used by default.
* </li>
* </ul>
*
* @since 1.4.0
*/
def options(options: java.util.Map[String, String]): DataFrameWriter[T] = {
this.options(options.asScala)
this
}
/**
* Partitions the output by the given columns on the file system. If specified, the output is
* laid out on the file system similar to Hive's partitioning scheme. As an example, when we
* partition a dataset by year and then month, the directory layout would look like:
* <ul>
* <li>year=2016/month=01/</li>
* <li>year=2016/month=02/</li>
* </ul>
*
* Partitioning is one of the most widely used techniques to optimize physical data layout.
* It provides a coarse-grained index for skipping unnecessary data reads when queries have
* predicates on the partitioned columns. In order for partitioning to work well, the number
* of distinct values in each column should typically be less than tens of thousands.
*
* This is applicable for all file-based data sources (e.g. Parquet, JSON) starting with Spark
* 2.1.0.
*
* @since 1.4.0
*/
@scala.annotation.varargs
def partitionBy(colNames: String*): DataFrameWriter[T] = {
this.partitioningColumns = Option(colNames)
this
}
/**
* Buckets the output by the given columns. If specified, the output is laid out on the file
* system similar to Hive's bucketing scheme, but with a different bucket hash function
* and is not compatible with Hive's bucketing.
*
* This is applicable for all file-based data sources (e.g. Parquet, JSON) starting with Spark
* 2.1.0.
*
* @since 2.0
*/
@scala.annotation.varargs
def bucketBy(numBuckets: Int, colName: String, colNames: String*): DataFrameWriter[T] = {
this.numBuckets = Option(numBuckets)
this.bucketColumnNames = Option(colName +: colNames)
this
}
/**
* Sorts the output in each bucket by the given columns.
*
* This is applicable for all file-based data sources (e.g. Parquet, JSON) starting with Spark
* 2.1.0.
*
* @since 2.0
*/
@scala.annotation.varargs
def sortBy(colName: String, colNames: String*): DataFrameWriter[T] = {
this.sortColumnNames = Option(colName +: colNames)
this
}
/**
* Saves the content of the `DataFrame` at the specified path.
*
* @since 1.4.0
*/
def save(path: String): Unit = {
this.extraOptions += ("path" -> path)
save()
}
/**
* Saves the content of the `DataFrame` as the specified table.
*
* @since 1.4.0
*/
def save(): Unit = {
if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Hive data source can only be used with tables, you can not " +
"write files of Hive data source directly.")
}
assertNotBucketed("save")
val maybeV2Provider = lookupV2Provider()
if (maybeV2Provider.isDefined) {
val provider = maybeV2Provider.get
val sessionOptions = DataSourceV2Utils.extractSessionConfigs(
provider, df.sparkSession.sessionState.conf)
val options = sessionOptions ++ extraOptions
val dsOptions = new CaseInsensitiveStringMap(options.asJava)
def getTable: Table = {
// For file source, it's expensive to infer schema/partition at each write. Here we pass
// the schema of input query and the user-specified partitioning to `getTable`. If the
// query schema is not compatible with the existing data, the write can still success but
// following reads would fail.
if (provider.isInstanceOf[FileDataSourceV2]) {
provider.getTable(
df.schema.asNullable,
partitioningAsV2.toArray,
dsOptions.asCaseSensitiveMap())
} else {
DataSourceV2Utils.getTableFromProvider(provider, dsOptions, userSpecifiedSchema = None)
}
}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits._
val catalogManager = df.sparkSession.sessionState.catalogManager
mode match {
case SaveMode.Append | SaveMode.Overwrite =>
val (table, catalog, ident) = provider match {
case supportsExtract: SupportsCatalogOptions =>
val ident = supportsExtract.extractIdentifier(dsOptions)
val catalog = CatalogV2Util.getTableProviderCatalog(
supportsExtract, catalogManager, dsOptions)
(catalog.loadTable(ident), Some(catalog), Some(ident))
case _: TableProvider =>
val t = getTable
if (t.supports(BATCH_WRITE)) {
(t, None, None)
} else {
// Streaming also uses the data source V2 API. So it may be that the data source
// implements v2, but has no v2 implementation for batch writes. In that case, we
// fall back to saving as though it's a V1 source.
return saveToV1Source()
}
}
val relation = DataSourceV2Relation.create(table, catalog, ident, dsOptions)
checkPartitioningMatchesV2Table(table)
if (mode == SaveMode.Append) {
runCommand(df.sparkSession, "save") {
AppendData.byName(relation, df.logicalPlan, extraOptions.toMap)
}
} else {
// Truncate the table. TableCapabilityCheck will throw a nice exception if this
// isn't supported
runCommand(df.sparkSession, "save") {
OverwriteByExpression.byName(
relation, df.logicalPlan, Literal(true), extraOptions.toMap)
}
}
case createMode =>
provider match {
case supportsExtract: SupportsCatalogOptions =>
val ident = supportsExtract.extractIdentifier(dsOptions)
val catalog = CatalogV2Util.getTableProviderCatalog(
supportsExtract, catalogManager, dsOptions)
val location = Option(dsOptions.get("path")).map(TableCatalog.PROP_LOCATION -> _)
runCommand(df.sparkSession, "save") {
CreateTableAsSelect(
catalog,
ident,
partitioningAsV2,
df.queryExecution.analyzed,
Map(TableCatalog.PROP_PROVIDER -> source) ++ location,
extraOptions.toMap,
ignoreIfExists = createMode == SaveMode.Ignore)
}
case _: TableProvider =>
if (getTable.supports(BATCH_WRITE)) {
throw new AnalysisException(s"TableProvider implementation $source cannot be " +
s"written with $createMode mode, please use Append or Overwrite " +
"modes instead.")
} else {
// Streaming also uses the data source V2 API. So it may be that the data source
// implements v2, but has no v2 implementation for batch writes. In that case, we
// fallback to saving as though it's a V1 source.
saveToV1Source()
}
}
}
} else {
saveToV1Source()
}
}
private def saveToV1Source(): Unit = {
partitioningColumns.foreach { columns =>
extraOptions += (DataSourceUtils.PARTITIONING_COLUMNS_KEY ->
DataSourceUtils.encodePartitioningColumns(columns))
}
// Code path for data source v1.
runCommand(df.sparkSession, "save") {
DataSource(
sparkSession = df.sparkSession,
className = source,
partitionColumns = partitioningColumns.getOrElse(Nil),
options = extraOptions.toMap).planForWriting(mode, df.logicalPlan)
}
}
/**
* Inserts the content of the `DataFrame` to the specified table. It requires that
* the schema of the `DataFrame` is the same as the schema of the table.
*
* @note Unlike `saveAsTable`, `insertInto` ignores the column names and just uses position-based
* resolution. For example:
*
* @note SaveMode.ErrorIfExists and SaveMode.Ignore behave as SaveMode.Append in `insertInto` as
* `insertInto` is not a table creating operation.
*
* {{{
* scala> Seq((1, 2)).toDF("i", "j").write.mode("overwrite").saveAsTable("t1")
* scala> Seq((3, 4)).toDF("j", "i").write.insertInto("t1")
* scala> Seq((5, 6)).toDF("a", "b").write.insertInto("t1")
* scala> sql("select * from t1").show
* +---+---+
* | i| j|
* +---+---+
* | 5| 6|
* | 3| 4|
* | 1| 2|
* +---+---+
* }}}
*
* Because it inserts data to an existing table, format or options will be ignored.
*
* @since 1.4.0
*/
def insertInto(tableName: String): Unit = {
import df.sparkSession.sessionState.analyzer.{AsTableIdentifier, NonSessionCatalogAndIdentifier, SessionCatalogAndIdentifier}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.catalog.CatalogV2Util._
assertNotBucketed("insertInto")
if (partitioningColumns.isDefined) {
throw new AnalysisException(
"insertInto() can't be used together with partitionBy(). " +
"Partition columns have already been defined for the table. " +
"It is not necessary to use partitionBy()."
)
}
val session = df.sparkSession
val canUseV2 = lookupV2Provider().isDefined
session.sessionState.sqlParser.parseMultipartIdentifier(tableName) match {
case NonSessionCatalogAndIdentifier(catalog, ident) =>
insertInto(catalog, ident)
case SessionCatalogAndIdentifier(catalog, ident)
if canUseV2 && ident.namespace().length <= 1 =>
insertInto(catalog, ident)
case AsTableIdentifier(tableIdentifier) =>
insertInto(tableIdentifier)
case other =>
throw new AnalysisException(
s"Couldn't find a catalog to handle the identifier ${other.quoted}.")
}
}
private def insertInto(catalog: CatalogPlugin, ident: Identifier): Unit = {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
val table = catalog.asTableCatalog.loadTable(ident) match {
case _: V1Table =>
return insertInto(TableIdentifier(ident.name(), ident.namespace().headOption))
case t =>
DataSourceV2Relation.create(t, Some(catalog), Some(ident))
}
val command = mode match {
case SaveMode.Append | SaveMode.ErrorIfExists | SaveMode.Ignore =>
AppendData.byPosition(table, df.logicalPlan, extraOptions.toMap)
case SaveMode.Overwrite =>
val conf = df.sparkSession.sessionState.conf
val dynamicPartitionOverwrite = table.table.partitioning.size > 0 &&
conf.partitionOverwriteMode == PartitionOverwriteMode.DYNAMIC
if (dynamicPartitionOverwrite) {
OverwritePartitionsDynamic.byPosition(table, df.logicalPlan, extraOptions.toMap)
} else {
OverwriteByExpression.byPosition(table, df.logicalPlan, Literal(true), extraOptions.toMap)
}
}
runCommand(df.sparkSession, "insertInto") {
command
}
}
private def insertInto(tableIdent: TableIdentifier): Unit = {
runCommand(df.sparkSession, "insertInto") {
InsertIntoStatement(
table = UnresolvedRelation(tableIdent),
partitionSpec = Map.empty[String, Option[String]],
query = df.logicalPlan,
overwrite = mode == SaveMode.Overwrite,
ifPartitionNotExists = false)
}
}
private def getBucketSpec: Option[BucketSpec] = {
if (sortColumnNames.isDefined && numBuckets.isEmpty) {
throw new AnalysisException("sortBy must be used together with bucketBy")
}
numBuckets.map { n =>
BucketSpec(n, bucketColumnNames.get, sortColumnNames.getOrElse(Nil))
}
}
private def assertNotBucketed(operation: String): Unit = {
if (getBucketSpec.isDefined) {
if (sortColumnNames.isEmpty) {
throw new AnalysisException(s"'$operation' does not support bucketBy right now")
} else {
throw new AnalysisException(s"'$operation' does not support bucketBy and sortBy right now")
}
}
}
private def assertNotPartitioned(operation: String): Unit = {
if (partitioningColumns.isDefined) {
throw new AnalysisException(s"'$operation' does not support partitioning")
}
}
/**
* Saves the content of the `DataFrame` as the specified table.
*
* In the case the table already exists, behavior of this function depends on the
* save mode, specified by the `mode` function (default to throwing an exception).
* When `mode` is `Overwrite`, the schema of the `DataFrame` does not need to be
* the same as that of the existing table.
*
* When `mode` is `Append`, if there is an existing table, we will use the format and options of
* the existing table. The column order in the schema of the `DataFrame` doesn't need to be same
* as that of the existing table. Unlike `insertInto`, `saveAsTable` will use the column names to
* find the correct column positions. For example:
*
* {{{
* scala> Seq((1, 2)).toDF("i", "j").write.mode("overwrite").saveAsTable("t1")
* scala> Seq((3, 4)).toDF("j", "i").write.mode("append").saveAsTable("t1")
* scala> sql("select * from t1").show
* +---+---+
* | i| j|
* +---+---+
* | 1| 2|
* | 4| 3|
* +---+---+
* }}}
*
* In this method, save mode is used to determine the behavior if the data source table exists in
* Spark catalog. We will always overwrite the underlying data of data source (e.g. a table in
* JDBC data source) if the table doesn't exist in Spark catalog, and will always append to the
* underlying data of data source if the table already exists.
*
* When the DataFrame is created from a non-partitioned `HadoopFsRelation` with a single input
* path, and the data source provider can be mapped to an existing Hive builtin SerDe (i.e. ORC
* and Parquet), the table is persisted in a Hive compatible format, which means other systems
* like Hive will be able to read this table. Otherwise, the table is persisted in a Spark SQL
* specific format.
*
* @since 1.4.0
*/
def saveAsTable(tableName: String): Unit = {
import df.sparkSession.sessionState.analyzer.{AsTableIdentifier, NonSessionCatalogAndIdentifier, SessionCatalogAndIdentifier}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
val session = df.sparkSession
val canUseV2 = lookupV2Provider().isDefined
session.sessionState.sqlParser.parseMultipartIdentifier(tableName) match {
case nameParts @ NonSessionCatalogAndIdentifier(catalog, ident) =>
saveAsTable(catalog.asTableCatalog, ident, nameParts)
case nameParts @ SessionCatalogAndIdentifier(catalog, ident)
if canUseV2 && ident.namespace().length <= 1 =>
saveAsTable(catalog.asTableCatalog, ident, nameParts)
case AsTableIdentifier(tableIdentifier) =>
saveAsTable(tableIdentifier)
case other =>
throw new AnalysisException(
s"Couldn't find a catalog to handle the identifier ${other.quoted}.")
}
}
private def saveAsTable(
catalog: TableCatalog, ident: Identifier, nameParts: Seq[String]): Unit = {
val tableOpt = try Option(catalog.loadTable(ident)) catch {
case _: NoSuchTableException => None
}
val command = (mode, tableOpt) match {
case (_, Some(_: V1Table)) =>
return saveAsTable(TableIdentifier(ident.name(), ident.namespace().headOption))
case (SaveMode.Append, Some(table)) =>
checkPartitioningMatchesV2Table(table)
val v2Relation = DataSourceV2Relation.create(table, Some(catalog), Some(ident))
AppendData.byName(v2Relation, df.logicalPlan, extraOptions.toMap)
case (SaveMode.Overwrite, _) =>
ReplaceTableAsSelectStatement(
nameParts,
df.queryExecution.analyzed,
partitioningAsV2,
None,
Map.empty,
Some(source),
Map.empty,
extraOptions.get("path"),
extraOptions.get(TableCatalog.PROP_COMMENT),
extraOptions.toMap,
orCreate = true) // Create the table if it doesn't exist
case (other, _) =>
// We have a potential race condition here in AppendMode, if the table suddenly gets
// created between our existence check and physical execution, but this can't be helped
// in any case.
CreateTableAsSelectStatement(
nameParts,
df.queryExecution.analyzed,
partitioningAsV2,
None,
Map.empty,
Some(source),
Map.empty,
extraOptions.get("path"),
extraOptions.get(TableCatalog.PROP_COMMENT),
extraOptions.toMap,
ifNotExists = other == SaveMode.Ignore)
}
runCommand(df.sparkSession, "saveAsTable") {
command
}
}
private def saveAsTable(tableIdent: TableIdentifier): Unit = {
val catalog = df.sparkSession.sessionState.catalog
val tableExists = catalog.tableExists(tableIdent)
val db = tableIdent.database.getOrElse(catalog.getCurrentDatabase)
val tableIdentWithDB = tableIdent.copy(database = Some(db))
val tableName = tableIdentWithDB.unquotedString
(tableExists, mode) match {
case (true, SaveMode.Ignore) =>
// Do nothing
case (true, SaveMode.ErrorIfExists) =>
throw new AnalysisException(s"Table $tableIdent already exists.")
case (true, SaveMode.Overwrite) =>
// Get all input data source or hive relations of the query.
val srcRelations = df.logicalPlan.collect {
case LogicalRelation(src: BaseRelation, _, _, _) => src
case relation: HiveTableRelation => relation.tableMeta.identifier
}
val tableRelation = df.sparkSession.table(tableIdentWithDB).queryExecution.analyzed
EliminateSubqueryAliases(tableRelation) match {
// check if the table is a data source table (the relation is a BaseRelation).
case LogicalRelation(dest: BaseRelation, _, _, _) if srcRelations.contains(dest) =>
throw new AnalysisException(
s"Cannot overwrite table $tableName that is also being read from")
// check hive table relation when overwrite mode
case relation: HiveTableRelation
if srcRelations.contains(relation.tableMeta.identifier) =>
throw new AnalysisException(
s"Cannot overwrite table $tableName that is also being read from")
case _ => // OK
}
// Drop the existing table
catalog.dropTable(tableIdentWithDB, ignoreIfNotExists = true, purge = false)
createTable(tableIdentWithDB)
// Refresh the cache of the table in the catalog.
catalog.refreshTable(tableIdentWithDB)
case _ => createTable(tableIdent)
}
}
private def createTable(tableIdent: TableIdentifier): Unit = {
val storage = DataSource.buildStorageFormatFromOptions(extraOptions.toMap)
val tableType = if (storage.locationUri.isDefined) {
CatalogTableType.EXTERNAL
} else {
CatalogTableType.MANAGED
}
val tableDesc = CatalogTable(
identifier = tableIdent,
tableType = tableType,
storage = storage,
schema = new StructType,
provider = Some(source),
partitionColumnNames = partitioningColumns.getOrElse(Nil),
bucketSpec = getBucketSpec)
runCommand(df.sparkSession, "saveAsTable")(
CreateTable(tableDesc, mode, Some(df.logicalPlan)))
}
/** Converts the provided partitioning and bucketing information to DataSourceV2 Transforms. */
private def partitioningAsV2: Seq[Transform] = {
val partitioning = partitioningColumns.map { colNames =>
colNames.map(name => IdentityTransform(FieldReference(name)))
}.getOrElse(Seq.empty[Transform])
val bucketing =
getBucketSpec.map(spec => CatalogV2Implicits.BucketSpecHelper(spec).asTransform).toSeq
partitioning ++ bucketing
}
/**
* For V2 DataSources, performs if the provided partitioning matches that of the table.
* Partitioning information is not required when appending data to V2 tables.
*/
private def checkPartitioningMatchesV2Table(existingTable: Table): Unit = {
val v2Partitions = partitioningAsV2
if (v2Partitions.isEmpty) return
require(v2Partitions.sameElements(existingTable.partitioning()),
"The provided partitioning does not match of the table.\\n" +
s" - provided: ${v2Partitions.mkString(", ")}\\n" +
s" - table: ${existingTable.partitioning().mkString(", ")}")
}
/**
* Saves the content of the `DataFrame` to an external database table via JDBC. In the case the
* table already exists in the external database, behavior of this function depends on the
* save mode, specified by the `mode` function (default to throwing an exception).
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* You can set the following JDBC-specific option(s) for storing JDBC:
* <ul>
* <li>`truncate` (default `false`): use `TRUNCATE TABLE` instead of `DROP TABLE`.</li>
* </ul>
*
* In case of failures, users should turn off `truncate` option to use `DROP TABLE` again. Also,
* due to the different behavior of `TRUNCATE TABLE` among DBMS, it's not always safe to use this.
* MySQLDialect, DB2Dialect, MsSqlServerDialect, DerbyDialect, and OracleDialect supports this
* while PostgresDialect and default JDBCDirect doesn't. For unknown and unsupported JDBCDirect,
* the user option `truncate` is ignored.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`
* @param table Name of the table in the external database.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "batchsize" can be used to control the
* number of rows per insert. "isolationLevel" can be one of
* "NONE", "READ_COMMITTED", "READ_UNCOMMITTED", "REPEATABLE_READ",
* or "SERIALIZABLE", corresponding to standard transaction
* isolation levels defined by JDBC's Connection object, with default
* of "READ_UNCOMMITTED".
* @since 1.4.0
*/
def jdbc(url: String, table: String, connectionProperties: Properties): Unit = {
assertNotPartitioned("jdbc")
assertNotBucketed("jdbc")
// connectionProperties should override settings in extraOptions.
this.extraOptions ++= connectionProperties.asScala
// explicit url and dbtable should override all
this.extraOptions += ("url" -> url, "dbtable" -> table)
format("jdbc").save()
}
/**
* Saves the content of the `DataFrame` in JSON format (<a href="http://jsonlines.org/">
* JSON Lines text format or newline-delimited JSON</a>) at the specified path.
* This is equivalent to:
* {{{
* format("json").save(path)
* }}}
*
* You can set the following JSON-specific option(s) for writing JSON files:
* <ul>
* <li>`compression` (default `null`): compression codec to use when saving to file. This can be
* one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`,
* `snappy` and `deflate`). </li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to timestamp type.</li>
* <li>`encoding` (by default it is not set): specifies encoding (charset) of saved json
* files. If it is not set, the UTF-8 charset will be used. </li>
* <li>`lineSep` (default `\\n`): defines the line separator that should be used for writing.</li>
* <li>`ignoreNullFields` (default `true`): Whether to ignore null fields
* when generating JSON objects. </li>
* </ul>
*
* @since 1.4.0
*/
def json(path: String): Unit = {
format("json").save(path)
}
/**
* Saves the content of the `DataFrame` in Parquet format at the specified path.
* This is equivalent to:
* {{{
* format("parquet").save(path)
* }}}
*
* You can set the following Parquet-specific option(s) for writing Parquet files:
* <ul>
* <li>`compression` (default is the value specified in `spark.sql.parquet.compression.codec`):
* compression codec to use when saving to file. This can be one of the known case-insensitive
* shorten names(`none`, `uncompressed`, `snappy`, `gzip`, `lzo`, `brotli`, `lz4`, and `zstd`).
* This will override `spark.sql.parquet.compression.codec`.</li>
* </ul>
*
* @since 1.4.0
*/
def parquet(path: String): Unit = {
format("parquet").save(path)
}
/**
* Saves the content of the `DataFrame` in ORC format at the specified path.
* This is equivalent to:
* {{{
* format("orc").save(path)
* }}}
*
* You can set the following ORC-specific option(s) for writing ORC files:
* <ul>
* <li>`compression` (default is the value specified in `spark.sql.orc.compression.codec`):
* compression codec to use when saving to file. This can be one of the known case-insensitive
* shorten names(`none`, `snappy`, `zlib`, and `lzo`). This will override
* `orc.compress` and `spark.sql.orc.compression.codec`. If `orc.compress` is given,
* it overrides `spark.sql.orc.compression.codec`.</li>
* </ul>
*
* @since 1.5.0
*/
def orc(path: String): Unit = {
format("orc").save(path)
}
/**
* Saves the content of the `DataFrame` in a text file at the specified path.
* The DataFrame must have only one column that is of string type.
* Each row becomes a new line in the output file. For example:
* {{{
* // Scala:
* df.write.text("/path/to/output")
*
* // Java:
* df.write().text("/path/to/output")
* }}}
* The text files will be encoded as UTF-8.
*
* You can set the following option(s) for writing text files:
* <ul>
* <li>`compression` (default `null`): compression codec to use when saving to file. This can be
* one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`,
* `snappy` and `deflate`). </li>
* <li>`lineSep` (default `\\n`): defines the line separator that should be used for writing.</li>
* </ul>
*
* @since 1.6.0
*/
def text(path: String): Unit = {
format("text").save(path)
}
/**
* Saves the content of the `DataFrame` in CSV format at the specified path.
* This is equivalent to:
* {{{
* format("csv").save(path)
* }}}
*
* You can set the following CSV-specific option(s) for writing CSV files:
* <ul>
* <li>`sep` (default `,`): sets a single character as a separator for each
* field and value.</li>
* <li>`quote` (default `"`): sets a single character used for escaping quoted values where
* the separator can be part of the value. If an empty string is set, it uses `u0000`
* (null character).</li>
* <li>`escape` (default `\\`): sets a single character used for escaping quotes inside
* an already quoted value.</li>
* <li>`charToEscapeQuoteEscaping` (default `escape` or `\\0`): sets a single character used for
* escaping the escape for the quote character. The default value is escape character when escape
* and quote characters are different, `\\0` otherwise.</li>
* <li>`escapeQuotes` (default `true`): a flag indicating whether values containing
* quotes should always be enclosed in quotes. Default is to escape all values containing
* a quote character.</li>
* <li>`quoteAll` (default `false`): a flag indicating whether all values should always be
* enclosed in quotes. Default is to only escape values containing a quote character.</li>
* <li>`header` (default `false`): writes the names of columns as the first line.</li>
* <li>`nullValue` (default empty string): sets the string representation of a null value.</li>
* <li>`emptyValue` (default `""`): sets the string representation of an empty value.</li>
* <li>`encoding` (by default it is not set): specifies encoding (charset) of saved csv
* files. If it is not set, the UTF-8 charset will be used.</li>
* <li>`compression` (default `null`): compression codec to use when saving to file. This can be
* one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`,
* `snappy` and `deflate`). </li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">
* Datetime Patterns</a>.
* This applies to timestamp type.</li>
* <li>`ignoreLeadingWhiteSpace` (default `true`): a flag indicating whether or not leading
* whitespaces from values being written should be skipped.</li>
* <li>`ignoreTrailingWhiteSpace` (default `true`): a flag indicating defines whether or not
* trailing whitespaces from values being written should be skipped.</li>
* <li>`lineSep` (default `\\n`): defines the line separator that should be used for writing.
* Maximum length is 1 character.</li>
* </ul>
*
* @since 2.0.0
*/
def csv(path: String): Unit = {
format("csv").save(path)
}
/**
* Wrap a DataFrameWriter action to track the QueryExecution and time cost, then report to the
* user-registered callback functions.
*/
private def runCommand(session: SparkSession, name: String)(command: LogicalPlan): Unit = {
val qe = session.sessionState.executePlan(command)
// call `QueryExecution.toRDD` to trigger the execution of commands.
SQLExecution.withNewExecutionId(qe, Some(name))(qe.toRdd)
}
private def lookupV2Provider(): Option[TableProvider] = {
DataSource.lookupDataSourceV2(source, df.sparkSession.sessionState.conf) match {
// TODO(SPARK-28396): File source v2 write path is currently broken.
case Some(_: FileDataSourceV2) => None
case other => other
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = df.sparkSession.sessionState.conf.defaultDataSourceName
private var mode: SaveMode = SaveMode.ErrorIfExists
private val extraOptions = new scala.collection.mutable.HashMap[String, String]
private var partitioningColumns: Option[Seq[String]] = None
private var bucketColumnNames: Option[Seq[String]] = None
private var numBuckets: Option[Int] = None
private var sortColumnNames: Option[Seq[String]] = None
}
|
spark-test/spark
|
sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
|
Scala
|
apache-2.0
| 39,746 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import kafka.api.LeaderAndIsr
import kafka.common.StateChangeFailedException
import kafka.server.KafkaConfig
import kafka.utils.Logging
import kafka.zk.{KafkaZkClient, TopicPartitionStateZNode}
import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.KeeperException.Code
import scala.collection.mutable
/**
* This class represents the state machine for replicas. It defines the states that a replica can be in, and
* transitions to move the replica to another legal state. The different states that a replica can be in are -
* 1. NewReplica : The controller can create new replicas during partition reassignment. In this state, a
* replica can only get become follower state change request. Valid previous
* state is NonExistentReplica
* 2. OnlineReplica : Once a replica is started and part of the assigned replicas for its partition, it is in this
* state. In this state, it can get either become leader or become follower state change requests.
* Valid previous state are NewReplica, OnlineReplica or OfflineReplica
* 3. OfflineReplica : If a replica dies, it moves to this state. This happens when the broker hosting the replica
* is down. Valid previous state are NewReplica, OnlineReplica
* 4. ReplicaDeletionStarted: If replica deletion starts, it is moved to this state. Valid previous state is OfflineReplica
* 5. ReplicaDeletionSuccessful: If replica responds with no error code in response to a delete replica request, it is
* moved to this state. Valid previous state is ReplicaDeletionStarted
* 6. ReplicaDeletionIneligible: If replica deletion fails, it is moved to this state. Valid previous state is ReplicaDeletionStarted
* 7. NonExistentReplica: If a replica is deleted successfully, it is moved to this state. Valid previous state is
* ReplicaDeletionSuccessful
*/
class ReplicaStateMachine(config: KafkaConfig,
stateChangeLogger: StateChangeLogger,
controllerContext: ControllerContext,
topicDeletionManager: TopicDeletionManager,
zkClient: KafkaZkClient,
replicaState: mutable.Map[PartitionAndReplica, ReplicaState],
controllerBrokerRequestBatch: ControllerBrokerRequestBatch) extends Logging {
private val controllerId = config.brokerId
this.logIdent = s"[ReplicaStateMachine controllerId=$controllerId] "
/**
* Invoked on successful controller election.
*/
def startup() {
info("Initializing replica state")
initializeReplicaState()
info("Triggering online replica state changes")
handleStateChanges(controllerContext.allLiveReplicas().toSeq, OnlineReplica)
info(s"Started replica state machine with initial state -> $replicaState")
}
/**
* Invoked on controller shutdown.
*/
def shutdown() {
replicaState.clear()
info("Stopped replica state machine")
}
/**
* Invoked on startup of the replica's state machine to set the initial state for replicas of all existing partitions
* in zookeeper
*/
private def initializeReplicaState() {
controllerContext.allPartitions.foreach { partition =>
val replicas = controllerContext.partitionReplicaAssignment(partition)
replicas.foreach { replicaId =>
val partitionAndReplica = PartitionAndReplica(partition, replicaId)
if (controllerContext.isReplicaOnline(replicaId, partition))
replicaState.put(partitionAndReplica, OnlineReplica)
else
// mark replicas on dead brokers as failed for topic deletion, if they belong to a topic to be deleted.
// This is required during controller failover since during controller failover a broker can go down,
// so the replicas on that broker should be moved to ReplicaDeletionIneligible to be on the safer side.
replicaState.put(partitionAndReplica, ReplicaDeletionIneligible)
}
}
}
def handleStateChanges(replicas: Seq[PartitionAndReplica], targetState: ReplicaState,
callbacks: Callbacks = new Callbacks()): Unit = {
if (replicas.nonEmpty) {
try {
controllerBrokerRequestBatch.newBatch()
replicas.groupBy(_.replica).map { case (replicaId, replicas) =>
val partitions = replicas.map(_.topicPartition)
doHandleStateChanges(replicaId, partitions, targetState, callbacks)
}
controllerBrokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch)
} catch {
case e: Throwable => error(s"Error while moving some replicas to $targetState state", e)
}
}
}
/**
* This API exercises the replica's state machine. It ensures that every state transition happens from a legal
* previous state to the target state. Valid state transitions are:
* NonExistentReplica --> NewReplica
* --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
* partition to every live broker
*
* NewReplica -> OnlineReplica
* --add the new replica to the assigned replica list if needed
*
* OnlineReplica,OfflineReplica -> OnlineReplica
* --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
* partition to every live broker
*
* NewReplica,OnlineReplica,OfflineReplica,ReplicaDeletionIneligible -> OfflineReplica
* --send StopReplicaRequest to the replica (w/o deletion)
* --remove this replica from the isr and send LeaderAndIsr request (with new isr) to the leader replica and
* UpdateMetadata request for the partition to every live broker.
*
* OfflineReplica -> ReplicaDeletionStarted
* --send StopReplicaRequest to the replica (with deletion)
*
* ReplicaDeletionStarted -> ReplicaDeletionSuccessful
* -- mark the state of the replica in the state machine
*
* ReplicaDeletionStarted -> ReplicaDeletionIneligible
* -- mark the state of the replica in the state machine
*
* ReplicaDeletionSuccessful -> NonExistentReplica
* -- remove the replica from the in memory partition replica assignment cache
*
* @param replicaId The replica for which the state transition is invoked
* @param partitions The partitions on this replica for which the state transition is invoked
* @param targetState The end state that the replica should be moved to
*/
private def doHandleStateChanges(replicaId: Int, partitions: Seq[TopicPartition], targetState: ReplicaState,
callbacks: Callbacks): Unit = {
val replicas = partitions.map(partition => PartitionAndReplica(partition, replicaId))
replicas.foreach(replica => replicaState.getOrElseUpdate(replica, NonExistentReplica))
val (validReplicas, invalidReplicas) = replicas.partition(replica => isValidTransition(replica, targetState))
invalidReplicas.foreach(replica => logInvalidTransition(replica, targetState))
targetState match {
case NewReplica =>
validReplicas.foreach { replica =>
val partition = replica.topicPartition
controllerContext.partitionLeadershipInfo.get(partition) match {
case Some(leaderIsrAndControllerEpoch) =>
if (leaderIsrAndControllerEpoch.leaderAndIsr.leader == replicaId) {
val exception = new StateChangeFailedException(s"Replica $replicaId for partition $partition cannot be moved to NewReplica state as it is being requested to become leader")
logFailedStateChange(replica, replicaState(replica), OfflineReplica, exception)
} else {
controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId),
replica.topicPartition,
leaderIsrAndControllerEpoch,
controllerContext.partitionReplicaAssignment(replica.topicPartition),
isNew = true)
logSuccessfulTransition(replicaId, partition, replicaState(replica), NewReplica)
replicaState.put(replica, NewReplica)
}
case None =>
logSuccessfulTransition(replicaId, partition, replicaState(replica), NewReplica)
replicaState.put(replica, NewReplica)
}
}
case OnlineReplica =>
validReplicas.foreach { replica =>
val partition = replica.topicPartition
replicaState(replica) match {
case NewReplica =>
val assignment = controllerContext.partitionReplicaAssignment(partition)
if (!assignment.contains(replicaId)) {
controllerContext.updatePartitionReplicaAssignment(partition, assignment :+ replicaId)
}
case _ =>
controllerContext.partitionLeadershipInfo.get(partition) match {
case Some(leaderIsrAndControllerEpoch) =>
controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId),
replica.topicPartition,
leaderIsrAndControllerEpoch,
controllerContext.partitionReplicaAssignment(partition), isNew = false)
case None =>
}
}
logSuccessfulTransition(replicaId, partition, replicaState(replica), OnlineReplica)
replicaState.put(replica, OnlineReplica)
}
case OfflineReplica =>
validReplicas.foreach { replica =>
controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId), replica.topicPartition,
deletePartition = false, (_, _) => ())
}
val (replicasWithLeadershipInfo, replicasWithoutLeadershipInfo) = validReplicas.partition { replica =>
controllerContext.partitionLeadershipInfo.contains(replica.topicPartition)
}
val updatedLeaderIsrAndControllerEpochs = removeReplicasFromIsr(replicaId, replicasWithLeadershipInfo.map(_.topicPartition))
updatedLeaderIsrAndControllerEpochs.foreach { case (partition, leaderIsrAndControllerEpoch) =>
if (!topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic)) {
val recipients = controllerContext.partitionReplicaAssignment(partition).filterNot(_ == replicaId)
controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(recipients,
partition,
leaderIsrAndControllerEpoch,
controllerContext.partitionReplicaAssignment(partition), isNew = false)
}
val replica = PartitionAndReplica(partition, replicaId)
logSuccessfulTransition(replicaId, partition, replicaState(replica), OfflineReplica)
replicaState.put(replica, OfflineReplica)
}
replicasWithoutLeadershipInfo.foreach { replica =>
logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), OfflineReplica)
replicaState.put(replica, OfflineReplica)
}
case ReplicaDeletionStarted =>
validReplicas.foreach { replica =>
logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), ReplicaDeletionStarted)
replicaState.put(replica, ReplicaDeletionStarted)
controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId),
replica.topicPartition,
deletePartition = true,
callbacks.stopReplicaResponseCallback)
}
case ReplicaDeletionIneligible =>
validReplicas.foreach { replica =>
logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), ReplicaDeletionIneligible)
replicaState.put(replica, ReplicaDeletionIneligible)
}
case ReplicaDeletionSuccessful =>
validReplicas.foreach { replica =>
logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), ReplicaDeletionSuccessful)
replicaState.put(replica, ReplicaDeletionSuccessful)
}
case NonExistentReplica =>
validReplicas.foreach { replica =>
val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(replica.topicPartition)
controllerContext.updatePartitionReplicaAssignment(replica.topicPartition, currentAssignedReplicas.filterNot(_ == replica.replica))
logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), NonExistentReplica)
replicaState.remove(replica)
}
}
}
/**
* Repeatedly attempt to remove a replica from the isr of multiple partitions until there are no more remaining partitions
* to retry.
* @param replicaId The replica being removed from isr of multiple partitions
* @param partitions The partitions from which we're trying to remove the replica from isr
* @return The updated LeaderIsrAndControllerEpochs of all partitions for which we successfully removed the replica from isr.
*/
private def removeReplicasFromIsr(replicaId: Int, partitions: Seq[TopicPartition]):
Map[TopicPartition, LeaderIsrAndControllerEpoch] = {
var results = Map.empty[TopicPartition, LeaderIsrAndControllerEpoch]
var remaining = partitions
while (remaining.nonEmpty) {
val (successfulRemovals, removalsToRetry, failedRemovals) = doRemoveReplicasFromIsr(replicaId, remaining)
results ++= successfulRemovals
remaining = removalsToRetry
failedRemovals.foreach { case (partition, e) =>
val replica = PartitionAndReplica(partition, replicaId)
logFailedStateChange(replica, replicaState(replica), OfflineReplica, e)
}
}
results
}
/**
* Try to remove a replica from the isr of multiple partitions.
* Removing a replica from isr updates partition state in zookeeper.
*
* @param replicaId The replica being removed from isr of multiple partitions
* @param partitions The partitions from which we're trying to remove the replica from isr
* @return A tuple of three values:
* 1. The updated LeaderIsrAndControllerEpochs of all partitions for which we successfully removed the replica from isr.
* 2. The partitions that we should retry due to a zookeeper BADVERSION conflict. Version conflicts can occur if
* the partition leader updated partition state while the controller attempted to update partition state.
* 3. Exceptions corresponding to failed removals that should not be retried.
*/
private def doRemoveReplicasFromIsr(replicaId: Int, partitions: Seq[TopicPartition]):
(Map[TopicPartition, LeaderIsrAndControllerEpoch],
Seq[TopicPartition],
Map[TopicPartition, Exception]) = {
val (leaderAndIsrs, partitionsWithNoLeaderAndIsrInZk, failedStateReads) = getTopicPartitionStatesFromZk(partitions)
val (leaderAndIsrsWithReplica, leaderAndIsrsWithoutReplica) = leaderAndIsrs.partition { case (_, leaderAndIsr) => leaderAndIsr.isr.contains(replicaId) }
val adjustedLeaderAndIsrs = leaderAndIsrsWithReplica.mapValues { leaderAndIsr =>
val newLeader = if (replicaId == leaderAndIsr.leader) LeaderAndIsr.NoLeader else leaderAndIsr.leader
val adjustedIsr = if (leaderAndIsr.isr.size == 1) leaderAndIsr.isr else leaderAndIsr.isr.filter(_ != replicaId)
leaderAndIsr.newLeaderAndIsr(newLeader, adjustedIsr)
}
val UpdateLeaderAndIsrResult(successfulUpdates, updatesToRetry, failedUpdates) = zkClient.updateLeaderAndIsr(
adjustedLeaderAndIsrs, controllerContext.epoch)
val exceptionsForPartitionsWithNoLeaderAndIsrInZk = partitionsWithNoLeaderAndIsrInZk.flatMap { partition =>
if (!topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic)) {
val exception = new StateChangeFailedException(s"Failed to change state of replica $replicaId for partition $partition since the leader and isr path in zookeeper is empty")
Option(partition -> exception)
} else None
}.toMap
val leaderIsrAndControllerEpochs = (leaderAndIsrsWithoutReplica ++ successfulUpdates).map { case (partition, leaderAndIsr) =>
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerContext.epoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
partition -> leaderIsrAndControllerEpoch
}
(leaderIsrAndControllerEpochs, updatesToRetry, failedStateReads ++ exceptionsForPartitionsWithNoLeaderAndIsrInZk ++ failedUpdates)
}
/**
* Gets the partition state from zookeeper
* @param partitions the partitions whose state we want from zookeeper
* @return A tuple of three values:
* 1. The LeaderAndIsrs of partitions whose state we successfully read from zookeeper
* 2. The partitions that had no leader and isr state in zookeeper. This happens if the controller
* didn't finish partition initialization.
* 3. Exceptions corresponding to failed zookeeper lookups or states whose controller epoch exceeds our current epoch.
*/
private def getTopicPartitionStatesFromZk(partitions: Seq[TopicPartition]):
(Map[TopicPartition, LeaderAndIsr],
Seq[TopicPartition],
Map[TopicPartition, Exception]) = {
val leaderAndIsrs = mutable.Map.empty[TopicPartition, LeaderAndIsr]
val partitionsWithNoLeaderAndIsrInZk = mutable.Buffer.empty[TopicPartition]
val failed = mutable.Map.empty[TopicPartition, Exception]
val getDataResponses = try {
zkClient.getTopicPartitionStatesRaw(partitions)
} catch {
case e: Exception =>
partitions.foreach(partition => failed.put(partition, e))
return (leaderAndIsrs.toMap, partitionsWithNoLeaderAndIsrInZk, failed.toMap)
}
getDataResponses.foreach { getDataResponse =>
val partition = getDataResponse.ctx.get.asInstanceOf[TopicPartition]
if (getDataResponse.resultCode == Code.OK) {
val leaderIsrAndControllerEpochOpt = TopicPartitionStateZNode.decode(getDataResponse.data, getDataResponse.stat)
if (leaderIsrAndControllerEpochOpt.isEmpty) {
partitionsWithNoLeaderAndIsrInZk += partition
} else {
val leaderIsrAndControllerEpoch = leaderIsrAndControllerEpochOpt.get
if (leaderIsrAndControllerEpoch.controllerEpoch > controllerContext.epoch) {
val exception = new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
s"means the current controller with epoch ${controllerContext.epoch} went through a soft failure and another " +
s"controller was elected with epoch ${leaderIsrAndControllerEpoch.controllerEpoch}. Aborting state change by this controller")
failed.put(partition, exception)
} else {
leaderAndIsrs.put(partition, leaderIsrAndControllerEpoch.leaderAndIsr)
}
}
} else if (getDataResponse.resultCode == Code.NONODE) {
partitionsWithNoLeaderAndIsrInZk += partition
} else {
failed.put(partition, getDataResponse.resultException.get)
}
}
(leaderAndIsrs.toMap, partitionsWithNoLeaderAndIsrInZk, failed.toMap)
}
def isAtLeastOneReplicaInDeletionStartedState(topic: String): Boolean = {
controllerContext.replicasForTopic(topic).exists(replica => replicaState(replica) == ReplicaDeletionStarted)
}
def replicasInState(topic: String, state: ReplicaState): Set[PartitionAndReplica] = {
replicaState.filter { case (replica, s) => replica.topic.equals(topic) && s == state }.keySet.toSet
}
def areAllReplicasForTopicDeleted(topic: String): Boolean = {
controllerContext.replicasForTopic(topic).forall(replica => replicaState(replica) == ReplicaDeletionSuccessful)
}
def isAnyReplicaInState(topic: String, state: ReplicaState): Boolean = {
replicaState.exists { case (replica, s) => replica.topic.equals(topic) && s == state}
}
private def isValidTransition(replica: PartitionAndReplica, targetState: ReplicaState) =
targetState.validPreviousStates.contains(replicaState(replica))
private def logSuccessfulTransition(replicaId: Int, partition: TopicPartition, currState: ReplicaState, targetState: ReplicaState): Unit = {
stateChangeLogger.withControllerEpoch(controllerContext.epoch)
.trace(s"Changed state of replica $replicaId for partition $partition from $currState to $targetState")
}
private def logInvalidTransition(replica: PartitionAndReplica, targetState: ReplicaState): Unit = {
val currState = replicaState(replica)
val e = new IllegalStateException(s"Replica $replica should be in the ${targetState.validPreviousStates.mkString(",")} " +
s"states before moving to $targetState state. Instead it is in $currState state")
logFailedStateChange(replica, currState, targetState, e)
}
private def logFailedStateChange(replica: PartitionAndReplica, currState: ReplicaState, targetState: ReplicaState, t: Throwable): Unit = {
stateChangeLogger.withControllerEpoch(controllerContext.epoch)
.error(s"Controller $controllerId epoch ${controllerContext.epoch} initiated state change of replica ${replica.replica} " +
s"for partition ${replica.topicPartition} from $currState to $targetState failed", t)
}
}
sealed trait ReplicaState {
def state: Byte
def validPreviousStates: Set[ReplicaState]
}
case object NewReplica extends ReplicaState {
val state: Byte = 1
val validPreviousStates: Set[ReplicaState] = Set(NonExistentReplica)
}
case object OnlineReplica extends ReplicaState {
val state: Byte = 2
val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible)
}
case object OfflineReplica extends ReplicaState {
val state: Byte = 3
val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible)
}
case object ReplicaDeletionStarted extends ReplicaState {
val state: Byte = 4
val validPreviousStates: Set[ReplicaState] = Set(OfflineReplica)
}
case object ReplicaDeletionSuccessful extends ReplicaState {
val state: Byte = 5
val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionStarted)
}
case object ReplicaDeletionIneligible extends ReplicaState {
val state: Byte = 6
val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionStarted)
}
case object NonExistentReplica extends ReplicaState {
val state: Byte = 7
val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionSuccessful)
}
|
Esquive/kafka
|
core/src/main/scala/kafka/controller/ReplicaStateMachine.scala
|
Scala
|
apache-2.0
| 23,547 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze.server
import cats.effect.IO
import cats.effect.Resource
import fs2.io.net.tls.TLSParameters
import org.http4s.Http4sSuite
import org.http4s.HttpApp
import org.http4s.dsl.io._
import org.http4s.server.Server
import org.http4s.server.ServerRequestKeys
import org.http4s.testing.ErrorReporting
import java.net.URL
import java.nio.charset.StandardCharsets
import java.security.KeyStore
import javax.net.ssl._
import scala.concurrent.duration._
import scala.io.Source
import scala.util.Try
/** Test cases for mTLS support in blaze server
*/
class BlazeServerMtlsSpec extends Http4sSuite {
{
val hostnameVerifier: HostnameVerifier = new HostnameVerifier {
override def verify(s: String, sslSession: SSLSession): Boolean = true
}
// For test cases, don't do any host name verification. Certificates are self-signed and not available to all hosts
HttpsURLConnection.setDefaultHostnameVerifier(hostnameVerifier)
}
def builder: BlazeServerBuilder[IO] =
BlazeServerBuilder[IO]
.withResponseHeaderTimeout(1.second)
val service: HttpApp[IO] = HttpApp {
case req @ GET -> Root / "dummy" =>
val output = req.attributes
.lookup(ServerRequestKeys.SecureSession)
.flatten
.map { session =>
assertNotEquals(session.sslSessionId, "")
assertNotEquals(session.cipherSuite, "")
assertNotEquals(session.keySize, 0)
session.X509Certificate.head.getSubjectX500Principal.getName
}
.getOrElse("Invalid")
Ok(output)
case req @ GET -> Root / "noauth" =>
req.attributes
.lookup(ServerRequestKeys.SecureSession)
.flatten
.foreach { session =>
assertNotEquals(session.sslSessionId, "")
assertNotEquals(session.cipherSuite, "")
assertNotEquals(session.keySize, 0)
assertEquals(session.X509Certificate, Nil)
}
Ok("success")
case _ => NotFound()
}
def serverR(sslParameters: SSLParameters): Resource[IO, Server] =
builder
.bindAny()
.withSslContextAndParameters(sslContext, sslParameters)
.withHttpApp(service)
.resource
lazy val sslContext: SSLContext = {
val ks = KeyStore.getInstance("JKS")
ks.load(getClass.getResourceAsStream("/keystore.jks"), "password".toCharArray)
val kmf = KeyManagerFactory.getInstance("SunX509")
kmf.init(ks, "password".toCharArray)
val js = KeyStore.getInstance("JKS")
js.load(getClass.getResourceAsStream("/keystore.jks"), "password".toCharArray)
val tmf = TrustManagerFactory.getInstance("SunX509")
tmf.init(js)
val sc = SSLContext.getInstance("TLSv1.2")
sc.init(kmf.getKeyManagers, tmf.getTrustManagers, null)
sc
}
/** Used for no mTLS client. Required to trust self-signed certificate.
*/
lazy val noAuthClientContext: SSLContext = {
val js = KeyStore.getInstance("JKS")
js.load(getClass.getResourceAsStream("/keystore.jks"), "password".toCharArray)
val tmf = TrustManagerFactory.getInstance("SunX509")
tmf.init(js)
val sc = SSLContext.getInstance("TLSv1.2")
sc.init(null, tmf.getTrustManagers, null)
sc
}
def get(server: Server, path: String, clientAuth: Boolean = true): String =
ErrorReporting.silenceOutputStreams {
val url = new URL(s"https://${server.address}$path")
val conn = url.openConnection().asInstanceOf[HttpsURLConnection]
conn.setRequestMethod("GET")
if (clientAuth)
conn.setSSLSocketFactory(sslContext.getSocketFactory)
else
conn.setSSLSocketFactory(noAuthClientContext.getSocketFactory)
Try {
Source.fromInputStream(conn.getInputStream, StandardCharsets.UTF_8.name).getLines().mkString
}.recover { case ex: Throwable =>
ex.getMessage
}.toOption
.getOrElse("")
}
def blazeServer(sslParameters: SSLParameters) =
ResourceFixture(serverR(sslParameters))
/** Test "required" auth mode
*/
blazeServer(TLSParameters(needClientAuth = true).toSSLParameters)
.test("Server should send mTLS request correctly") { server =>
assertEquals(get(server, "/dummy", true), "CN=Test,OU=Test,O=Test,L=CA,ST=CA,C=US")
}
blazeServer(TLSParameters(needClientAuth = true).toSSLParameters)
.test("Server should fail for invalid client auth") { server =>
assertNotEquals(get(server, "/dummy", false), "CN=Test,OU=Test,O=Test,L=CA,ST=CA,C=US")
}
/** Test "requested" auth mode
*/
blazeServer(TLSParameters(wantClientAuth = true).toSSLParameters)
.test("Server should send mTLS request correctly with optional auth") { server =>
assertEquals(get(server, "/dummy", true), "CN=Test,OU=Test,O=Test,L=CA,ST=CA,C=US")
}
blazeServer(TLSParameters(wantClientAuth = true).toSSLParameters)
.test("Server should send mTLS request correctly without clientAuth") { server =>
assertEquals(get(server, "/noauth", false), "success")
}
}
|
http4s/http4s
|
blaze-server/src/test/scala/org/http4s/blaze/server/BlazeServerMtlsSpec.scala
|
Scala
|
apache-2.0
| 5,587 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import junit.framework.Assert._
import kafka.api.{ApiVersion, KAFKA_082}
import kafka.utils.{TestUtils, CoreUtils}
import org.apache.kafka.common.config.ConfigException
import org.apache.kafka.common.protocol.SecurityProtocol
import org.junit.Test
import org.scalatest.junit.JUnit3Suite
class KafkaConfigTest extends JUnit3Suite {
@Test
def testLogRetentionTimeHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals(60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMinutesProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "30")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMillisProp, "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "30")
props.put(KafkaConfig.LogRetentionTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMillisProp, "1800000")
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "10")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionUnlimited() {
val props1 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props2 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props3 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props4 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props5 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
props1.put("log.retention.ms", "-1")
props2.put("log.retention.minutes", "-1")
props3.put("log.retention.hours", "-1")
val cfg1 = KafkaConfig.fromProps(props1)
val cfg2 = KafkaConfig.fromProps(props2)
val cfg3 = KafkaConfig.fromProps(props3)
assertEquals("Should be -1", -1, cfg1.logRetentionTimeMillis)
assertEquals("Should be -1", -1, cfg2.logRetentionTimeMillis)
assertEquals("Should be -1", -1, cfg3.logRetentionTimeMillis)
props4.put("log.retention.ms", "-1")
props4.put("log.retention.minutes", "30")
val cfg4 = KafkaConfig.fromProps(props4)
assertEquals("Should be -1", -1, cfg4.logRetentionTimeMillis)
props5.put("log.retention.ms", "0")
intercept[IllegalArgumentException] {
val cfg5 = KafkaConfig.fromProps(props5)
}
}
@Test
def testLogRetentionValid {
val props1 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val props2 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val props3 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props1.put("log.retention.ms", "0")
props2.put("log.retention.minutes", "0")
props3.put("log.retention.hours", "0")
intercept[IllegalArgumentException] {
val cfg1 = KafkaConfig.fromProps(props1)
}
intercept[IllegalArgumentException] {
val cfg2 = KafkaConfig.fromProps(props2)
}
intercept[IllegalArgumentException] {
val cfg3 = KafkaConfig.fromProps(props3)
}
}
@Test
def testAdvertiseDefaults() {
val port = "9999"
val hostName = "fake-host"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.remove(KafkaConfig.ListenersProp)
props.put(KafkaConfig.HostNameProp, hostName)
props.put(KafkaConfig.PortProp, port)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, hostName)
assertEquals(endpoint.port, port.toInt)
}
@Test
def testAdvertiseConfigured() {
val advertisedHostName = "routable-host"
val advertisedPort = "1234"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName)
props.put(KafkaConfig.AdvertisedPortProp, advertisedPort)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, advertisedHostName)
assertEquals(endpoint.port, advertisedPort.toInt)
}
@Test
def testAdvertisePortDefault() {
val advertisedHostName = "routable-host"
val port = "9999"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName)
props.put(KafkaConfig.PortProp, port)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, advertisedHostName)
assertEquals(endpoint.port, port.toInt)
}
@Test
def testAdvertiseHostNameDefault() {
val hostName = "routable-host"
val advertisedPort = "9999"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.HostNameProp, hostName)
props.put(KafkaConfig.AdvertisedPortProp, advertisedPort)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, hostName)
assertEquals(endpoint.port, advertisedPort.toInt)
}
@Test
def testDuplicateListeners() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
// listeners with duplicate port
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9091,TRACE://localhost:9091")
assert(!isValidKafkaConfig(props))
// listeners with duplicate protocol
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9091,PLAINTEXT://localhost:9092")
assert(!isValidKafkaConfig(props))
// advertised listeners with duplicate port
props.put(KafkaConfig.AdvertisedListenersProp, "PLAINTEXT://localhost:9091,TRACE://localhost:9091")
assert(!isValidKafkaConfig(props))
}
@Test
def testBadListenerProtocol() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
props.put(KafkaConfig.ListenersProp, "BAD://localhost:9091")
assert(!isValidKafkaConfig(props))
}
@Test
def testListenerDefaults() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
// configuration with host and port, but no listeners
props.put(KafkaConfig.HostNameProp, "myhost")
props.put(KafkaConfig.PortProp, "1111")
val conf = KafkaConfig.fromProps(props)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://myhost:1111"), conf.listeners)
// configuration with null host
props.remove(KafkaConfig.HostNameProp)
val conf2 = KafkaConfig.fromProps(props)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://:1111"), conf2.listeners)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://:1111"), conf2.advertisedListeners)
assertEquals(null, conf2.listeners(SecurityProtocol.PLAINTEXT).host)
// configuration with advertised host and port, and no advertised listeners
props.put(KafkaConfig.AdvertisedHostNameProp, "otherhost")
props.put(KafkaConfig.AdvertisedPortProp, "2222")
val conf3 = KafkaConfig.fromProps(props)
assertEquals(conf3.advertisedListeners, CoreUtils.listenerListToEndPoints("PLAINTEXT://otherhost:2222"))
}
@Test
def testVersionConfiguration() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
val conf = KafkaConfig.fromProps(props)
assertEquals(ApiVersion.latestVersion, conf.interBrokerProtocolVersion)
props.put(KafkaConfig.InterBrokerProtocolVersionProp,"0.8.2.0")
val conf2 = KafkaConfig.fromProps(props)
assertEquals(KAFKA_082, conf2.interBrokerProtocolVersion)
// check that 0.8.2.0 is the same as 0.8.2.1
props.put(KafkaConfig.InterBrokerProtocolVersionProp,"0.8.2.1")
val conf3 = KafkaConfig.fromProps(props)
assertEquals(KAFKA_082, conf3.interBrokerProtocolVersion)
//check that latest is newer than 0.8.2
assert(ApiVersion.latestVersion.onOrAfter(conf3.interBrokerProtocolVersion))
}
private def isValidKafkaConfig(props: Properties): Boolean = {
try {
KafkaConfig.fromProps(props)
true
} catch {
case e: IllegalArgumentException => false
}
}
@Test
def testUncleanLeaderElectionDefault() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionDisabled() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, String.valueOf(false))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, false)
}
@Test
def testUncleanElectionEnabled() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, String.valueOf(true))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionInvalid() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, "invalid")
intercept[ConfigException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testLogRollTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRollTimeMillisProp, "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeBothMsAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRollTimeMillisProp, "1800000")
props.put(KafkaConfig.LogRollTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRollTimeMillis )
}
@Test
def testDefaultCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "producer")
}
@Test
def testValidCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put("compression.type", "gzip")
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "gzip")
}
@Test
def testInvalidCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.CompressionTypeProp, "abc")
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props)
}
}
}
|
jhspaybar/kafka
|
core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala
|
Scala
|
apache-2.0
| 13,721 |
package test_data
import scala.xml.Elem
object ClaimBuilder {
def goodClaim: Elem = {
<DWPBody xmlns:ds="http://www.w3.org/2000/09/xmldsig#" xmlns="http://www.govtalk.gov.uk/dwp/carers-allowance"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.govtalk.gov.uk/dwp/carers-allowance file:/Users/jmi/Temp/CarersAllowance_Schema.xsd">
<Version>{XMLData.LATEST_VERSION}</Version>
<DWPCATransaction>
<TransactionId>NFM33DB</TransactionId>
<DateTimeGenerated>02-10-2010 14:36</DateTimeGenerated>
<DWPCAClaim>
<DateOfClaim>
<QuestionLabel>When do you want your Carer's Allowance claim to start?</QuestionLabel>
<Answer>01-01-2010</Answer>
</DateOfClaim>
<Claimant>
<Surname>CaseThree</Surname>
<OtherNames>Test Middle</OtherNames>
<OtherSurnames>Smithson</OtherSurnames>
<Title>Mr</Title>
<DateOfBirth>01-01-1931</DateOfBirth>
<NationalInsuranceNumber>JB486278C</NationalInsuranceNumber>
<Address>
<Line>3 Preston Road</Line>
<Line>Preston</Line>
<Line>Lancashire</Line>
<PostCode>PR1 2TH</PostCode>
</Address>
<DayTimePhoneNumber>01772 888901</DayTimePhoneNumber>
<MobileNumber>0771 5419808</MobileNumber>
<MaritalStatus>Single</MaritalStatus>
<TextPhoneContact>
<QuestionLabel>text.phone</QuestionLabel>
<Answer>No</Answer>
</TextPhoneContact>
</Claimant>
<Caree>
<Surname>CaseThree</Surname>
<OtherNames>Cloe Scott</OtherNames>
<OtherSurnames>Watson</OtherSurnames>
<Title>Dame</Title>
<DateOfBirth>03-07-1953</DateOfBirth>
<NationalInsuranceNumber>BA234567A</NationalInsuranceNumber>
<Address>
<Line>3a Preston Road</Line>
<Line>Preston</Line>
<Line>Lancashire</Line>
<PostCode>PR1 2TH</PostCode>
</Address>
<DayTimePhoneNumber>01234 567890</DayTimePhoneNumber>
<RelationToClaimant>
<QuestionLabel>What's their relationshipt to you?</QuestionLabel>
<Answer>Mother</Answer>
</RelationToClaimant>
<Cared35Hours>
<QuestionLabel>Do you spend 35 hours or more each week caring for this person?</QuestionLabel>
<Answer>Yes</Answer>
</Cared35Hours>
<BreaksSinceClaim>
<QuestionLabel>Have you had any breaks in caring for this person since claim date?</QuestionLabel>
<Answer>Yes</Answer>
</BreaksSinceClaim>
<CareBreak>
<StartDateTime>10-07-2010 10:00</StartDateTime>
<EndDateTime>17-07-2010 17:45</EndDateTime>
<ReasonClaimant>
<QuestionLabel>Where were you during the break?</QuestionLabel>
<Other>Friend's place</Other>
<Answer>Other</Answer>
</ReasonClaimant>
<ReasonCaree>
<QuestionLabel>Where was the person you care for during the break?</QuestionLabel>
<Answer>At Home</Answer>
</ReasonCaree>
<MedicalCare>
<QuestionLabel>Did you or the person you care for receive any medical treatment or professional care during the break?</QuestionLabel>
<Answer>No</Answer>
</MedicalCare>
</CareBreak>
<CareBreak>
<StartDateTime>10-09-2010 12:00</StartDateTime>
<EndDateTime>17-09-2010 15:15</EndDateTime>
<ReasonClaimant>
<QuestionLabel>Where were you during the break?</QuestionLabel>
<Answer>Hospital</Answer>
</ReasonClaimant>
<ReasonCaree>
<QuestionLabel>Where was the person you care for during the break?</QuestionLabel>
<Answer>At Home</Answer>
</ReasonCaree>
<MedicalCare>
<QuestionLabel>Did you or the person you care for receive any medical treatment or professional care during the break?</QuestionLabel>
<Answer>Yes</Answer>
</MedicalCare>
</CareBreak>
<CareBreak>
<StartDateTime>10-10-2010 10:30</StartDateTime>
</CareBreak>
<Cared35HoursBefore>
<QuestionLabel>care.35.before</QuestionLabel>
<Answer>No</Answer>
</Cared35HoursBefore>
<DateStartCaring>
<QuestionLabel>care.started</QuestionLabel>
<Answer>05-01-2010</Answer>
</DateStartCaring>
<LiveSameAddress>
<QuestionLabel>caree.sameadrees</QuestionLabel>
<Answer>Yes</Answer>
</LiveSameAddress>
<ArmedForcesIndependencePayment>
<QuestionLabel>Does this person get Armed Forces Independence Payment?</QuestionLabel>
<Answer>No</Answer>
</ArmedForcesIndependencePayment>
</Caree>
<Residency>
<NormallyLiveInGB>
<QuestionLabel>live.normally.GB</QuestionLabel>
<Answer>No</Answer>
</NormallyLiveInGB>
<CountryNormallyLive>
<QuestionLabel>live.normally.country</QuestionLabel>
<Answer>France</Answer>
</CountryNormallyLive>
<Nationality>British</Nationality>
<TimeOutsideGBLast3Years>
<QuestionLabel>Time.out.GB</QuestionLabel>
<Answer>Yes</Answer>
</TimeOutsideGBLast3Years>
<PeriodAbroad>
<Period>
<DateFrom>
<QuestionLabel>abroad.date.from</QuestionLabel>
<Answer>08-09-2010</Answer>
</DateFrom>
<DateTo>
<QuestionLabel>abroad.date.to</QuestionLabel>
<Answer>08-12-2010</Answer>
</DateTo>
</Period>
<Reason>
<QuestionLabel>abroad.reason</QuestionLabel>
<Other>Funeral</Other>
<Answer>Other</Answer>
</Reason>
<Country>
<QuestionLabel>country.went</QuestionLabel>
<Answer>France</Answer>
</Country>
<CareePresent>
<QuestionLabel>caree.present</QuestionLabel>
<Answer>No</Answer>
</CareePresent>
</PeriodAbroad>
<PeriodAbroad>
<Period>
<DateFrom>
<QuestionLabel>abroad.date.from</QuestionLabel>
<Answer>06-09-2011</Answer>
</DateFrom>
<DateTo>
<QuestionLabel>abroad.date.to</QuestionLabel>
<Answer>06-12-2011</Answer>
</DateTo>
</Period>
<Reason>
<QuestionLabel>abroad.reason</QuestionLabel>
<Answer>Holiday</Answer>
</Reason>
<Country>
<QuestionLabel>country.went</QuestionLabel>
<Answer>Spain</Answer>
</Country>
<CareePresent>
<QuestionLabel>caree.present</QuestionLabel>
<Answer>Yes</Answer>
</CareePresent>
</PeriodAbroad>
</Residency>
<CourseOfEducation>
<QuestionLabel>Have you been on a course of education since your claim date?</QuestionLabel>
<Answer>Yes</Answer>
</CourseOfEducation>
<FullTimeEducation>
<CourseDetails>
<Type>BA honours in Business</Type>
<Title>Bussines Studies</Title>
<DateStarted>
<QuestionLabel>education.started</QuestionLabel>
<Answer>01-01-2013</Answer>
</DateStarted>
<!-- <DateStopped>
<QuestionLabel>education.ended</QuestionLabel>
<Answer>2013-05-04</Answer>
</DateStopped>
-->
<ExpectedEndDate>
<QuestionLabel>education.end.expected</QuestionLabel>
<Answer>05-01-2014</Answer>
</ExpectedEndDate>
</CourseDetails>
<LocationDetails>
<Name>Oxford College</Name>
<Address>
<Line>1 Oxford Road</Line>
<Line>Oxford</Line>
<Line>Oxfordshire</Line>
<PostCode>OX12 3RT</PostCode>
</Address>
<PhoneNumber>01776 829920</PhoneNumber>
<FaxNumber>01776 829920</FaxNumber>
<StudentReferenceNumber>91982</StudentReferenceNumber>
<Tutor>My Tutor</Tutor>
</LocationDetails>
</FullTimeEducation>
<SelfEmployed>
<QuestionLabel>Have you been self-employed at any time since (this is one week before your claim date)?</QuestionLabel>
<Answer>Yes</Answer>
</SelfEmployed>
<SelfEmployment>
<SelfEmployedNow>
<QuestionLabel>selfepmloyed.now</QuestionLabel>
<Answer>No</Answer>
</SelfEmployedNow>
<RecentJobDetails>
<DateStarted>
<QuestionLabel>selfemployed.started</QuestionLabel>
<Answer>05-01-2010</Answer>
</DateStarted>
<NatureBusiness>
<QuestionLabel>selfemployed.business</QuestionLabel>
<Answer>Fruit and veg, delivery service</Answer>
</NatureBusiness>
<TradingYear>
<DateFrom>
<QuestionLabel>trading.from</QuestionLabel>
<Answer>05-01-2013</Answer>
</DateFrom>
<DateTo>
<QuestionLabel>trading.to</QuestionLabel>
<Answer>30-04-2014</Answer>
</DateTo>
</TradingYear>
<SameIncomeOutgoingLevels>
<QuestionLabel>selfemployed.level</QuestionLabel>
<Answer>No</Answer>
</SameIncomeOutgoingLevels>
<WhyWhenChange>
<QuestionLabel>selfemployed.level.change</QuestionLabel>
<Answer>Depends on the seasons, and productivity of the fruit.</Answer>
</WhyWhenChange>
<DateEnded>
<QuestionLabel>selfemployed.ended</QuestionLabel>
<Answer>01-09-2013</Answer>
</DateEnded>
<TradingCeased>
<QuestionLabel>selfemployed.ceased</QuestionLabel>
<Answer>Yes</Answer>
</TradingCeased>
</RecentJobDetails>
<!-- <CurrentJobDetails>
<DateStarted>
<QuestionLabel>QuestionLabel34</QuestionLabel>
<Answer>2006-05-04</Answer>
</DateStarted>
<NatureBusiness>
<QuestionLabel>QuestionLabel35</QuestionLabel>
<Answer>Answer23</Answer>
</NatureBusiness>
<TradingYear>
<DateFrom>
<QuestionLabel>QuestionLabel36</QuestionLabel>
<Answer>2006-05-04</Answer>
</DateFrom>
<DateTo>
<QuestionLabel>QuestionLabel37</QuestionLabel>
<Answer>2006-05-04</Answer>
</DateTo>
</TradingYear>
<SameIncomeOutgoingLevels>
<QuestionLabel>QuestionLabel38</QuestionLabel>
<Answer>Yes</Answer>
</SameIncomeOutgoingLevels>
<WhyWhenChange>
<QuestionLabel>QuestionLabel39</QuestionLabel>
<Answer>Answer25</Answer>
</WhyWhenChange>
</CurrentJobDetails>-->
<CareExpensesChildren>
<QuestionLabel>self.child.expenses</QuestionLabel>
<Answer>Yes</Answer>
</CareExpensesChildren>
<ChildCareExpenses>
<CarerName>
<QuestionLabel>self.child.carer</QuestionLabel>
<Answer>Mr John Smith</Answer>
</CarerName>
<Expense>
<Payment>
<QuestionLabel>self.child.care.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>120.00</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>self.child.care.frequency</QuestionLabel>
<Answer>Weekly</Answer>
</Frequency>
</Expense>
<RelationshipCarerToClaimant>
<QuestionLabel>self.child.rel.claimant</QuestionLabel>
<Answer>Uncle</Answer>
</RelationshipCarerToClaimant>
<RelationshipCarerToPartner>
<QuestionLabel>self.child.rel.partner</QuestionLabel>
<Other>None</Other>
<Answer>Other</Answer>
</RelationshipCarerToPartner>
<RelationToChild>
<QuestionLabel>self.child.rel.child</QuestionLabel>
<Answer>Uncle</Answer>
</RelationToChild>
</ChildCareExpenses>
<CareExpensesCaree>
<QuestionLabel>self.care.expenses</QuestionLabel>
<Answer>Yes</Answer>
</CareExpensesCaree>
<CareExpenses>
<CarerName>
<QuestionLabel>self.care.carer</QuestionLabel>
<Answer>Mrs Terry Smith</Answer>
</CarerName>
<Expense>
<Payment>
<QuestionLabel>self.care.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>400.00</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>self.care.frequency</QuestionLabel>
<Other>Other expenses frequency</Other>
<Answer>Other</Answer>
</Frequency>
</Expense>
<RelationshipCarerToClaimant>
<QuestionLabel>self.care.rel.claimant</QuestionLabel>
<Other>None</Other>
<Answer>Other</Answer>
</RelationshipCarerToClaimant>
<RelationshipCarerToPartner>
<QuestionLabel>self.care.rel.partner</QuestionLabel>
<Answer>Aunt</Answer>
</RelationshipCarerToPartner>
<RelationshipCarerToCaree>
<QuestionLabel>self.care.rel.caree</QuestionLabel>
<Answer>Aunt</Answer>
</RelationshipCarerToCaree>
</CareExpenses>
<PaidForPension>
<QuestionLabel>self.pension</QuestionLabel>
<Answer>Yes</Answer>
</PaidForPension>
<PensionScheme>
<Payment>
<QuestionLabel>self.pension.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>15.23</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>self.pension.frequency</QuestionLabel>
<Answer>Weekly</Answer>
</Frequency>
</PensionScheme>
</SelfEmployment>
<Employed>
<QuestionLabel>Have you been employed at any time since (this is six months before your claim date)?</QuestionLabel>
<Answer>Yes</Answer>
</Employed>
<Employment>
<CurrentlyEmployed>
<QuestionLabel>employed.currently</QuestionLabel>
<Answer>Yes</Answer>
</CurrentlyEmployed>
<JobDetails>
<Employer>
<DateJobStarted>
<QuestionLabel>job.started</QuestionLabel>
<Answer>01-01-2013</Answer>
</DateJobStarted>
<!-- <DateJobEnded>
<QuestionLabel>job.ended</QuestionLabel>
<Answer>2013-07-01</Answer>
</DateJobEnded> -->
<JobType>
<QuestionLabel>job.title</QuestionLabel>
<Answer>Hacker</Answer>
</JobType>
<ClockPayrollNumber>12345678</ClockPayrollNumber>
<Name>Tesco's</Name>
<Address>
<Line>23 Yeadon Way</Line>
<Line>Blackpool</Line>
<Line>Lancashire</Line>
<PostCode>FY4 5TH</PostCode>
</Address>
<EmployersPhoneNumber>01253 667889</EmployersPhoneNumber>
<HaveFinishedJob>
<QuestionLabel>job.finished</QuestionLabel>
<Answer>No</Answer>
</HaveFinishedJob>
<!-- <P45LeavingDate>
<QuestionLabel>job.p45</QuestionLabel>
<Answer>2013-07-01</Answer>
</P45LeavingDate> -->
</Employer>
<Pay>
<WeeklyHoursWorked>
<QuestionLabel>job.hours</QuestionLabel>
<Answer>25</Answer>
</WeeklyHoursWorked>
<DateLastPaid>
<QuestionLabel>job.lastpaid</QuestionLabel>
<Answer>02-07-2013</Answer>
</DateLastPaid>
<GrossPayment>
<QuestionLabel>job.pay</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>600.00</Amount>
</Answer>
</GrossPayment>
<IncludedInWage>
<QuestionLabel>job.pay.include</QuestionLabel>
<Answer>All amounts due</Answer>
</IncludedInWage>
<PayFrequency>
<QuestionLabel>job.pay.frequency</QuestionLabel>
<Answer>Four-Weekly</Answer>
</PayFrequency>
<UsualPayDay>
<QuestionLabel>job.day</QuestionLabel>
<Answer>Wednesday usually</Answer>
</UsualPayDay>
<ConstantEarnings>
<QuestionLabel>job.same.amount</QuestionLabel>
<Answer>Yes</Answer>
</ConstantEarnings>
</Pay>
<OweMoney>
<QuestionLabel>job.owe</QuestionLabel>
<Answer>No</Answer>
</OweMoney>
<CareExpensesChildren>
<QuestionLabel>chld.expenses</QuestionLabel>
<Answer>Yes</Answer>
</CareExpensesChildren>
<ChildCareExpenses>
<CarerName>
<QuestionLabel>child.carer</QuestionLabel>
<Answer>Mr Grandfather Senior</Answer>
</CarerName>
<Expense>
<Payment>
<QuestionLabel>child.care.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>120.12</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>child.care.frequency</QuestionLabel>
<Answer>Weekly</Answer>
</Frequency>
</Expense>
<RelationshipCarerToClaimant>
<QuestionLabel>child.care.rel.claimant</QuestionLabel>
<Answer>Father</Answer>
</RelationshipCarerToClaimant>
<RelationshipCarerToPartner>
<QuestionLabel>cild.care.rel.partner</QuestionLabel>
<Answer>Stepfather</Answer>
</RelationshipCarerToPartner>
<RelationToChild>
<QuestionLabel>child.care.rel.child</QuestionLabel>
<Answer>Grandfather</Answer>
</RelationToChild>
</ChildCareExpenses>
<CareExpensesCaree>
<QuestionLabel>care.expenses</QuestionLabel>
<Answer>Yes</Answer>
</CareExpensesCaree>
<CareExpenses>
<CarerName>
<QuestionLabel>care.carer</QuestionLabel>
<Answer>Carers UK Ltd</Answer>
</CarerName>
<Expense>
<Payment>
<QuestionLabel>care.carer.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>150.55</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>care.carer.frequency</QuestionLabel>
<Other>Annually</Other>
<Answer>Other</Answer>
</Frequency>
</Expense>
<RelationshipCarerToClaimant>
<QuestionLabel>care.carer.rel.claimant</QuestionLabel>
<Other>None</Other>
<Answer>Other</Answer>
</RelationshipCarerToClaimant>
<RelationshipCarerToPartner>
<QuestionLabel>care.carer.rel.partner</QuestionLabel>
<Other>None</Other>
<Answer>Other</Answer>
</RelationshipCarerToPartner>
<RelationshipCarerToCaree>
<QuestionLabel>care.carer.rel.caree</QuestionLabel>
<Other>None</Other>
<Answer>Other</Answer>
</RelationshipCarerToCaree>
</CareExpenses>
<PaidForOccupationalPension>
<QuestionLabel>pension.occupational</QuestionLabel>
<Answer>Yes</Answer>
</PaidForOccupationalPension>
<OccupationalPension>
<Payment>
<QuestionLabel>pension.occ.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>350.10</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>pension.occ.frequency</QuestionLabel>
<Other>Other frequency fo occupational scheme</Other>
<Answer>Other</Answer>
</Frequency>
</OccupationalPension>
<PaidForPersonalPension>
<QuestionLabel>pension.personal</QuestionLabel>
<Answer>Yes</Answer>
</PaidForPersonalPension>
<PersonalPension>
<Payment>
<QuestionLabel>pension.per.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>200.1</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>pension.per.frequency</QuestionLabel>
<Answer>Monthly</Answer>
</Frequency>
</PersonalPension>
<PaidForJobExpenses>
<QuestionLabel>job.expenses</QuestionLabel>
<Answer>Yes</Answer>
</PaidForJobExpenses>
<JobExpenses>
<Expense>
<QuestionLabel>job.expense</QuestionLabel>
<Answer>Petrol money for driving</Answer>
</Expense>
</JobExpenses>
<OtherEmployment>
<QuestionLabel>Other.eployment</QuestionLabel>
<Answer>No</Answer>
</OtherEmployment>
</JobDetails>
</Employment>
<HavePartner>
<QuestionLabel>Have you had a partner/spouse living with you since your claim date?</QuestionLabel>
<Answer>Yes</Answer>
</HavePartner>
<Partner>
<Surname>CaseThree</Surname>
<OtherNames>Test Middle clΓ©mence</OtherNames>
<OtherSurnames>Dixon JΓ£o SΓren GΓ³rnictwo</OtherSurnames>
<Title>Mrs</Title>
<DateOfBirth>28-09-1937</DateOfBirth>
<NationalInsuranceNumber>BA234567A</NationalInsuranceNumber>
<!-- <Address>
<Line>Line4</Line>
<PostCode>GIR 0AA</PostCode>
</Address>
<MobileNumber> </MobileNumber> -->
<DayTimePhoneNumber>0987654321</DayTimePhoneNumber>
<NationalityPartner>British</NationalityPartner>
<RelationshipStatus>
<SeparatedFromPartner>
<QuestionLabel>partner.separated</QuestionLabel>
<Answer>No</Answer>
</SeparatedFromPartner>
</RelationshipStatus>
<IsCaree>
<QuestionLabel>Is your partner/spouse the person you are claiming Carer's Allowance for?</QuestionLabel>
<Answer>No</Answer>
</IsCaree>
</Partner>
<OtherBenefits>
<ClaimantBenefits>
<StatePension>
<QuestionLabel>Do you get State Pension?</QuestionLabel>
<Answer>Yes</Answer>
</StatePension>
</ClaimantBenefits>
<OtherMoneySSP>
<QuestionLabel>ssp.money</QuestionLabel>
<Answer>Yes</Answer>
</OtherMoneySSP>
<OtherMoneySSPDetails>
<Payment>
<Payment>
<QuestionLabel>ssp.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>12</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>ssp.frequency</QuestionLabel>
<Other>Every day and twice on Sundays</Other>
<Answer>Other</Answer>
</Frequency>
</Payment>
<Name>Burger King</Name>
<Address>
<Line>102 Preston Road</Line>
<Line>Preston</Line>
<Line>Lancashire</Line>
<PostCode>PR45 6YH</PostCode>
</Address>
</OtherMoneySSPDetails>
<OtherMoneySP>
<QuestionLabel>sp.money</QuestionLabel>
<Answer>Yes</Answer>
</OtherMoneySP>
<OtherMoneySPDetails>
<Payment>
<Payment>
<QuestionLabel>sp.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>120</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>sp.frequency</QuestionLabel>
<Answer>Weekly</Answer>
</Frequency>
</Payment>
<Name>KFC</Name>
<Address>
<Line>104 Preston Road</Line>
<Line>Preston</Line>
<Line>Lancashire</Line>
<PostCode>PR45 6YH</PostCode>
</Address>
</OtherMoneySPDetails>
<OtherMoney>
<QuestionLabel>Have you or your Partner or Spouse claimed or received any other benefits since the date you want to claim?</QuestionLabel>
<Answer>Yes</Answer>
</OtherMoney>
<OtherMoneyDetails>
<Payment>
<Payment>
<QuestionLabel>Other.amount</QuestionLabel>
<Answer>
<Currency>GBP</Currency>
<Amount>123.57</Amount>
</Answer>
</Payment>
<Frequency>
<QuestionLabel>Other.frequency</QuestionLabel>
<Other>Quarterly</Other>
<Answer>Other</Answer>
</Frequency>
</Payment>
<Name>
<QuestionLabel>Other.who</QuestionLabel>
<Answer>The Man</Answer>
</Name>
</OtherMoneyDetails>
<EEA>
<EEAClaimPensionsBenefits>
<QuestionLabel>eea.pension</QuestionLabel>
<Answer>Yes</Answer>
</EEAClaimPensionsBenefits>
<EEAReceivePensionsBenefits>
<QuestionLabel>eea.pension</QuestionLabel>
<Answer>Yes</Answer>
</EEAReceivePensionsBenefits>
<EEAWorkingInsurance>
<QuestionLabel>eea.insurance</QuestionLabel>
<Answer>No</Answer>
</EEAWorkingInsurance>
</EEA>
</OtherBenefits>
<Payment>
<PaymentFrequency>
<QuestionLabel>payment.frequency</QuestionLabel>
<Answer>Weekly</Answer>
</PaymentFrequency>
<InitialAccountQuestion>
<QuestionLabel>payment.way</QuestionLabel>
<Answer>UK bank or building society</Answer>
</InitialAccountQuestion>
<Account>
<HolderName>Mr Test Casetwo</HolderName>
<BuildingSocietyDetails>
<AccountNumber>12345678</AccountNumber>
<!-- <RollNumber>RollNumber0</RollNumber> -->
<SortCode>090126</SortCode>
<Name>Santander</Name>
</BuildingSocietyDetails>
</Account>
</Payment>
<OtherInformation>
<WelshCommunication>
<QuestionLabel>welsh.communication</QuestionLabel>
<Answer>No</Answer>
</WelshCommunication>
<AdditionalInformation>
<QuestionLabel>anything.else</QuestionLabel>
<Answer>It takes too long to claim. But I can write a very long answer here. 2000 characters.</Answer>
</AdditionalInformation>
</OtherInformation>
<Declaration>
<DeclarationStatement>
<Content>The declarations below sets out your legal responsibilities in respect of your claim.</Content>
<Content>I declare that I understand the Carer's Allowance Claim Notes and that the information provided on this claim form is correct and complete.</Content>
<Content>I understand that I must report all changes in my circumstances or that of the person that I am caring for which may affect my entitlement promptly and by failing to do so I may be liable to prosecution or face a financial penalty.</Content>
<Content>I will phone 08456084321 or write to Carer's Allowance Unit, Palatine House, Preston, Lancaster, PR1 1HB to report a change in my circumstances or that of the person that I am caring for.</Content>
<Content>If I give false or incomplete information or fail to report changes in my circumstances or that of the person that I am caring for promptly, I understand that my Carer's Allowance may be stopped or reduced and any overpayment of Carer's Allowance may be recovered. In addition I may be prosecuted or face a financial penalty.</Content>
</DeclarationStatement>
<DeclarationQuestion>
<QuestionLabel>Please tick this box if this claim form has been filled in by someone else, if so, please ensure that you understand the declarations above as another person cannot make the declarations on your behalf.</QuestionLabel>
<Answer>Yes</Answer>
</DeclarationQuestion>
<DeclarationQuestion>
<QuestionLabel>Please tick this box to confirm that you understand and make the declarations above.</QuestionLabel>
<Answer>Yes</Answer>
</DeclarationQuestion>
</Declaration>
<Disclaimer>
<DisclaimerStatement>
<Content>I understand that if I am paid Carer's Allowance it may affect the benefits paid to DP's name I understand that if I am paid Carer's Allowance it may affect the benefits paid to DP's name>>I understand that if I am paid Carer's Allowance it may affect the benefits paid to DP's name</Content>
<Content>If the person you are caring for receives certain benefits, the amount they receive may be affected by your claim for Carer's Allowance. Because of this we need both of you to understand the potential consequences of your claim to Carer's Allowance.</Content>
<Content>If DP's name gets a Severe Disability Premium with their income-based Jobseeker's Allowance, Income Support, income-related Employment and Support Allowance, Housing Benefit, they may no longer get that premium if we pay Carer's Allowance to you.If DP's name>> gets a Severe Disability Premium with their income-based Jobseeker's Allowance, Income Support, income-related Employment and Support Allowance, Housing Benefit, they may no longer get that premium if we pay Carer's Allowance to you.If DP's name gets a Severe Disability Premium with their income-based Jobseeker's Allowance, Income Support, income-related Employment and Support Allowance, Housing Benefit, they may no longer get that premium if we pay Carer's Allowance to you.</Content>
<Content>If DP's name Pension Credit includes an extra amount for severe disability, they may no longer get that extra amount if we pay Carer's Allowance to you.If DP's name Pension Credit includes an extra amount for severe disability, they may no longer get that extra amount if we pay Carer's Allowance to you.If DP's name Pension Credit includes an extra amount for severe disability, they may no longer get that extra amount if we pay Carer's Allowance to you.</Content>
<Content>This could also affect any reduction in Council Tax DP's name may be entitled to. To find out more about it, please contact the Local Authority.This could also affect any reduction in Council Tax DP's name>> may be entitled to. To find out more about it, please contact the Local Authority.This could also affect any reduction in Council Tax DP's name may be entitled to. To find out more about it, please contact the Local Authority.</Content>
<Content>We will need to check DP's name entitlement to Disability Living Allowance, Personal Independence Payment, Attendance Allowance, Constant Attendance Allowance or Armed Forces Independence Payment when considering your claim.We will need to check DP's name entitlement to Disability Living Allowance, Personal Independence Payment, Attendance Allowance, Constant Attendance Allowance or Armed Forces Independence Payment when considering your claim.We will need to check DP's name entitlement to Disability Living Allowance, Personal Independence Payment, Attendance Allowance, Constant Attendance Allowance or Armed Forces Independence Payment when considering your claim.</Content>
<Content>We may contact DP's name or their representative to establish whether 35 hours caring per week is taking place.We may contact DP's name or their representative to establish whether 35 hours caring per week is taking place.We may contact DP's name or their representative to establish whether 35 hours caring per week is taking place.</Content>
</DisclaimerStatement>
<DisclaimerQuestion>
<QuestionLabel>Please tick this box to declare that you have understood the notes and you have made / will make the person you are caring for / or their representative aware that there could be a change to their benefits.</QuestionLabel>
<Answer>Yes</Answer>
</DisclaimerQuestion>
</Disclaimer>
<EvidenceList>
<RecipientAddress>
<Line>CA Freepost</Line>
<Line>Palatine House</Line>
<Line>Preston</Line>
<PostCode>PR1 1HN</PostCode>
</RecipientAddress>
<Evidence>
<Title>Document you need to send us</Title>
<Content>You must send us all the documents we ask for. If you do not, any benefit you may be entitled to my be delayed because of this claim. 1</Content>
<Content>You must send us all the documents we ask for. If you do not, any benefit you may be entitled to my be delayed because of this claim. 2</Content>
<Content>You must send us all the documents we ask for. If you do not, any benefit you may be entitled to my be delayed because of this claim. 3</Content>
</Evidence>
<Evidence>
<Title>Pay Details</Title>
<Content>You need to send us the last payslip before 10 Ocotber 2013 and all the payslips you have since then.</Content>
</Evidence>
<Evidence>
<Title>Statement Signed</Title>
<Content>You need to send us the completed and signed statement.</Content>
</Evidence>
</EvidenceList>
<Consents>
<Consent>
<QuestionLabel>Do you agree to us getting information from any current or previous employer you have told us about on this form?</QuestionLabel>
<Answer>Yes</Answer>
</Consent>
<Consent>
<QuestionLabel>Please tell us why</QuestionLabel>
<Answer>Yes</Answer>
</Consent>
<Consent>
<QuestionLabel></QuestionLabel>
<Answer>Yes</Answer>
</Consent>
<Consent>
<QuestionLabel>Please tell us why</QuestionLabel>
<Answer>Yes</Answer>
</Consent>
<Consent>
<QuestionLabel>Do you agree to us getting information from any other person or organisation you have told us about on this form?</QuestionLabel>
<Answer>Yes</Answer>
</Consent>
</Consents>
</DWPCAClaim>
</DWPCATransaction>
<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:SignedInfo>
<ds:CanonicalizationMethod Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>
<ds:SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#dsa-sha1"/>
<ds:Reference URI="DWPCATransaction">
<ds:DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<ds:DigestValue>SadVcIUbeepTfvhp2BzI2V3EPYo=</ds:DigestValue>
</ds:Reference>
</ds:SignedInfo>
<ds:SignatureValue>V6NzTYMiickLrbenHakT1UTnawk7CxWpqPtOh++XyCpg94LlWT682A==</ds:SignatureValue>
</ds:Signature>
</DWPBody>
}
def badClaim: Elem = {
<DWPBody xmlns:ds="http://www.w3.org/2000/09/xmldsig#" xmlns="http://www.govtalk.gov.uk/dwp/carers-allowance"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.govtalk.gov.uk/dwp/carers-allowance file:/Users/jmi/Temp/CarersAllowance_Schema.xsd">
<Version>{XMLData.LATEST_VERSION}</Version>
<DWPCATransaction>
<TransactionId>NFM33DB</TransactionId>
<DateTimeGenerated>02-10-2010 14:36</DateTimeGenerated>
</DWPCATransaction>
</DWPBody>
}
}
|
Department-for-Work-and-Pensions/RenderingService
|
test/test_data/ClaimBuilder.scala
|
Scala
|
mit
| 40,510 |
package com.mz.training.actions
import akka.actor.SupervisorStrategy.Stop
import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.util.Timeout
import com.mz.training.common.jdbc.JDBCConnectionActor
import com.mz.training.common.jdbc.JDBCConnectionActor.{Commit, Committed, Rollback}
import com.mz.training.domains.address.{AddressRepositoryActor, AddressServiceActor}
import com.mz.training.domains.user.UserServiceActor.{RegistrateUser, UserRegistrated}
import com.mz.training.domains.user.{UserRepositoryActor, UserServiceActor}
import scala.concurrent.duration._
/**
* Created by zemo on 27/10/15.
*/
class UserActionActor extends Actor with ActorLogging {
val jdbcConActor = context.actorOf(JDBCConnectionActor.props)
context.watch(jdbcConActor)
val userRepositoryProps = UserRepositoryActor.props(jdbcConActor)
val addressRepositoryProps = AddressRepositoryActor.props(jdbcConActor)
val addressService = AddressServiceActor.props(userRepositoryProps, addressRepositoryProps)
val userService = context.actorOf(UserServiceActor.props(userRepositoryProps, addressService))
context.watch(userService)
implicit val timeout: Timeout = 5 seconds
override def receive: Receive = {
case RegistrateUser(user, address) => {
userService ! RegistrateUser(user, address)
context.become(registerUser(sender))
}
}
private def registerUser(orgSender: ActorRef): Receive = {
case UserRegistrated() => {
log.debug("Registrate user - success!")
jdbcConActor ! Commit
}
case akka.actor.Status.Failure(e) => {
log.error(e, e.getMessage)
jdbcConActor ! Rollback
orgSender ! e
}
case Committed => {
orgSender ! true
self ! Stop
}
}
}
|
michalzeman/angular2-training
|
akka-http-server/src/main/scala/com/mz/training/actions/UserActionActor.scala
|
Scala
|
mit
| 1,745 |
package com.karasiq.shadowcloud.test.actors
import java.nio.file.Files
import akka.actor.PoisonPill
import akka.pattern.ask
import akka.stream.scaladsl.{Keep, Source}
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.util.ByteString
import com.karasiq.common.encoding.{Base64, HexString}
import com.karasiq.shadowcloud.actors.ChunkIODispatcher.ChunkPath
import com.karasiq.shadowcloud.actors.RegionDispatcher.{GetFileAvailability, ReadChunk, WriteChunk}
import com.karasiq.shadowcloud.actors._
import com.karasiq.shadowcloud.actors.events.StorageEvents
import com.karasiq.shadowcloud.actors.messages.StorageEnvelope
import com.karasiq.shadowcloud.index.IndexData
import com.karasiq.shadowcloud.index.diffs.{FolderIndexDiff, IndexDiff}
import com.karasiq.shadowcloud.storage._
import com.karasiq.shadowcloud.storage.props.StorageProps
import com.karasiq.shadowcloud.storage.props.StorageProps.Quota
import com.karasiq.shadowcloud.storage.replication.ChunkWriteAffinity
import com.karasiq.shadowcloud.storage.repository.wrappers.PathNodesMapper
import com.karasiq.shadowcloud.storage.repository.{PathTreeRepository, Repository}
import com.karasiq.shadowcloud.storage.utils.IndexMerger.RegionKey
import com.karasiq.shadowcloud.storage.utils.{IndexIOResult, IndexMerger, IndexRepositoryStreams}
import com.karasiq.shadowcloud.test.utils.{CoreTestUtils, SCExtensionSpec, TestUtils}
import org.scalatest.{FlatSpecLike, SequentialNestedSuiteExecution}
import scala.collection.concurrent.TrieMap
import scala.concurrent.duration._
//noinspection TypeAnnotation
// Uses local filesystem
class RegionDispatcherTest extends SCExtensionSpec with FlatSpecLike with SequentialNestedSuiteExecution {
val testRegionId = "testRegion"
val testStorageId = "testStorage"
val chunk = TestUtils.testChunk
val folder = CoreTestUtils.randomFolder()
val folderDiff = FolderIndexDiff.createFolders(folder)
val indexRepository = Repository.forIndex(
PathTreeRepository.toCategorized(PathNodesMapper.encode(Repositories.fromDirectory(Files.createTempDirectory("vrt-index")), Base64))
)
val chunksDir = Files.createTempDirectory("vrt-chunks")
val fileRepository = Repository.forChunks(PathTreeRepository.toCategorized(Repositories.fromDirectory(chunksDir)))
val storageProps = StorageProps.fromDirectory(chunksDir.getParent)
val index = system.actorOf(StorageIndex.props(testStorageId, storageProps, indexRepository), "index")
val chunkIO = system.actorOf(ChunkIODispatcher.props(testStorageId, storageProps, fileRepository), "chunkIO")
val healthProvider = StorageHealthProviders.fromDirectory(chunksDir, Quota.empty.copy(limitSpace = Some(100L * 1024 * 1024)))
val initialHealth = healthProvider.health.futureValue
val storage = system.actorOf(StorageDispatcher.props(testStorageId, storageProps, index, chunkIO, healthProvider), "storage")
val testRegion = system.actorOf(RegionDispatcher.props(testRegionId, CoreTestUtils.regionConfig(testRegionId)), testRegionId)
"Virtual region" should "write chunk" in {
storageSubscribe()
// Write chunk
(testRegion ? WriteChunk(chunk)).futureValue shouldBe WriteChunk.Success(chunk, chunk)
receiveWhile(idle = 3 seconds) {
case StorageEnvelope(`testStorageId`, StorageEvents.ChunkWritten(ChunkPath(`testRegionId`, chunk.checksum.hash), writtenChunk)) β
writtenChunk shouldBe chunk
case StorageEnvelope(testStorageId, StorageEvents.HealthUpdated(health)) β
if (health.usedSpace != 0) {
health.totalSpace shouldBe initialHealth.totalSpace
health.usedSpace shouldBe (initialHealth.usedSpace + chunk.checksum.encSize)
health.writableSpace shouldBe (initialHealth.writableSpace - chunk.checksum.encSize)
}
case StorageEnvelope(`testStorageId`, StorageEvents.PendingIndexUpdated(`testRegionId`, diff)) β
diff.folders shouldBe empty
diff.time should be > TestUtils.testTimestamp
diff.chunks.newChunks shouldBe Set(chunk)
diff.chunks.deletedChunks shouldBe empty
}
val storedChunks = fileRepository.subRepository(testRegionId).keys.runWith(TestSink.probe)
storedChunks.requestNext(chunk.checksum.hash)
storedChunks.expectComplete()
storageUnsubscribe()
}
it should "read chunk" in {
testRegion ! ReadChunk(chunk.withoutData)
val result = fishForSpecificMessage(5 seconds) { case ReadChunk.Success(_, result) β result }
result shouldBe chunk
result.data.encrypted shouldBe chunk.data.encrypted
}
it should "deduplicate chunk" in {
val wrongChunk = chunk.copy(
encryption = CoreTestUtils.aesEncryption.createParameters(),
data = chunk.data.copy(encrypted = TestUtils.randomBytes(chunk.data.plain.length))
)
wrongChunk shouldNot be(chunk)
val result = testRegion ? WriteChunk(wrongChunk)
result.futureValue shouldBe WriteChunk.Success(chunk, chunk)
}
it should "create availability report" in {
val report = (testRegion ? GetFileAvailability(folder.files.head.copy(chunks = Seq(chunk)))).mapTo[GetFileAvailability.Success].futureValue
report.result.chunksByStorage shouldBe Map(testStorageId β Set(chunk))
report.result.percentagesByStorage shouldBe Map(testStorageId β 100.0)
}
it should "repair chunk" in {
// Create new storage
val indexMap = TrieMap.empty[(String, String), ByteString]
val chunksMap = TrieMap.empty[(String, String), ByteString]
val indexRepository = Repository.forIndex(Repository.toCategorized(Repositories.fromConcurrentMap(indexMap)))
val chunkRepository = Repository.forChunks(Repository.toCategorized(Repositories.fromConcurrentMap(chunksMap)))
val index = system.actorOf(StorageIndex.props("testMemStorage", storageProps, indexRepository), "index1")
val chunkIO = system.actorOf(ChunkIODispatcher.props("testMemStorage", storageProps, chunkRepository), "chunkIO1")
val healthProvider = StorageHealthProviders.fromMaps(indexMap, chunksMap)
val initialHealth = healthProvider.health.futureValue
val newStorage = system.actorOf(StorageDispatcher.props("testMemStorage", storageProps, index, chunkIO, healthProvider), "storage1")
testRegion ! RegionDispatcher.AttachStorage("testMemStorage", storageProps, newStorage, initialHealth)
expectNoMessage(1 second)
// Replicate chunk
val result = testRegion ? RegionDispatcher.RewriteChunk(chunk, Some(ChunkWriteAffinity(mandatory = Seq(testStorageId, "testMemStorage"))))
result.futureValue shouldBe WriteChunk.Success(chunk, chunk)
chunksMap.head shouldBe ((testRegionId, HexString.encode(chunk.checksum.hash)), chunk.data.encrypted)
// Drop storage
testRegion ! RegionDispatcher.DetachStorage("testMemStorage")
newStorage ! PoisonPill
expectNoMessage(1 second)
}
it should "add folder" in {
storageSubscribe()
val diff = FolderIndexDiff.createFolders(folder)
testRegion ! RegionDispatcher.WriteIndex(diff)
receiveWhile(25 seconds) {
case RegionDispatcher.WriteIndex.Success(`diff`, result) β
result.time shouldBe >(TestUtils.testTimestamp)
assert(FolderIndexDiff.equalsIgnoreOrder(result.folders, folderDiff))
// result.chunks.newChunks shouldBe Set(chunk)
result.chunks.deletedChunks shouldBe empty
case StorageEnvelope(storageId, StorageEvents.PendingIndexUpdated(regionId, diff)) β
storageId shouldBe testStorageId
regionId shouldBe testRegionId
assert(FolderIndexDiff.equalsIgnoreOrder(diff.folders, folderDiff))
}
}
it should "write index" in {
(testRegion ? RegionDispatcher.Synchronize).futureValue
val StorageEnvelope(`testStorageId`, StorageEvents.IndexUpdated(`testRegionId`, sequenceNr, diff, remote)) = receiveOne(5 seconds)
sequenceNr shouldBe 1L
diff.time shouldBe >(TestUtils.testTimestamp)
assert(FolderIndexDiff.equalsIgnoreOrder(diff.folders, folderDiff))
diff.chunks.newChunks shouldBe Set(chunk)
diff.chunks.deletedChunks shouldBe empty
remote shouldBe false
expectNoMessage(1 second)
storageUnsubscribe()
}
it should "read index" in {
val streams = IndexRepositoryStreams(testRegionId, CoreTestUtils.storageConfig(testStorageId), system)
val regionRepo = indexRepository.subRepository(testRegionId)
// Write #2
val remoteDiff = CoreTestUtils.randomDiff
val (sideWrite, sideWriteResult) = TestSource
.probe[(Long, IndexData)]
.via(streams.write(regionRepo))
.toMat(TestSink.probe)(Keep.both)
.run()
sideWrite.sendNext((2, IndexData(`testRegionId`, 2L, remoteDiff)))
sideWrite.sendComplete()
val IndexIOResult(2, IndexData(`testRegionId`, 2L, `remoteDiff`), StorageIOResult.Success(_, _)) = sideWriteResult.requestNext()
sideWriteResult.expectComplete()
// Synchronize
storageSubscribe()
(testRegion ? RegionDispatcher.Synchronize).futureValue
val StorageEnvelope(`testStorageId`, StorageEvents.IndexUpdated(`testRegionId`, 2L, `remoteDiff`, true)) = receiveOne(5 seconds)
expectNoMessage(1 second)
// Verify
storage ! StorageIndex.Envelope(testRegionId, RegionIndex.GetIndex)
val RegionIndex.GetIndex.Success(_, IndexMerger.State(Seq((1L, firstDiff), (2L, `remoteDiff`)), IndexDiff.empty)) = receiveOne(1 second)
assert(FolderIndexDiff.equalsIgnoreOrder(firstDiff.folders, folderDiff))
firstDiff.chunks.newChunks shouldBe Set(chunk)
firstDiff.chunks.deletedChunks shouldBe empty
// Delete #1
whenReady(Source.single(1L).runWith(regionRepo.delete)) { deleteResult β
deleteResult.isSuccess shouldBe true
(testRegion ? RegionDispatcher.Synchronize).futureValue
val StorageEnvelope(`testStorageId`, StorageEvents.IndexDeleted(`testRegionId`, sequenceNrs)) = receiveOne(5 seconds)
sequenceNrs shouldBe Set(1L)
storage ! StorageIndex.Envelope(testRegionId, RegionIndex.GetIndex)
val RegionIndex.GetIndex.Success(_, IndexMerger.State(Seq((2L, `remoteDiff`)), IndexDiff.empty)) = receiveOne(1 second)
expectNoMessage(1 second)
testRegion ! RegionDispatcher.GetIndexSnapshot()
val RegionDispatcher.GetIndexSnapshot.Success(_, IndexMerger.State(Seq((RegionKey(_, `testStorageId`, 2L), `remoteDiff`)), _)) =
receiveOne(1 second)
}
storageUnsubscribe()
}
it should "compact index" in {
// Read
storage ! StorageIndex.Envelope(testRegionId, RegionIndex.GetIndex)
val RegionIndex.GetIndex.Success(_, IndexMerger.State(Seq((2L, oldDiff)), IndexDiff.empty)) = receiveOne(1 second)
// Write diff #3
val newDiff = CoreTestUtils.randomDiff.folders
testRegion ! RegionDispatcher.WriteIndex(newDiff)
val RegionDispatcher.WriteIndex.Success(`newDiff`, _) = receiveOne(1 second)
// Compact
storage ! StorageIndex.Envelope(testRegionId, RegionIndex.Compact)
(storage ? StorageIndex.Envelope(testRegionId, RegionIndex.Synchronize)).futureValue
expectNoMessage(1 second)
// Verify
storage ! StorageIndex.GetIndexes
val StorageIndex.GetIndexes.Success(`testStorageId`, states) = receiveOne(1 seconds)
states.keySet shouldBe Set(testRegionId)
val IndexMerger.State(Seq((4L, resultDiff)), IndexDiff.empty) = states(testRegionId)
resultDiff.folders shouldBe oldDiff.folders.merge(newDiff)
resultDiff.chunks shouldBe oldDiff.chunks
resultDiff.time should be > oldDiff.time
}
override protected def beforeAll(): Unit = {
super.beforeAll()
storage ! StorageIndex.OpenIndex(testRegionId)
testRegion ! RegionDispatcher.AttachStorage(testStorageId, storageProps, storage, initialHealth)
awaitAssert(RegionDispatcher.GetHealth.unwrapFuture(testRegion ? RegionDispatcher.GetHealth).futureValue shouldBe 'fullyOnline, 30 seconds, 1 second)
}
private def storageUnsubscribe(): Unit = {
sc.eventStreams.storage.unsubscribe(testActor)
}
private def storageSubscribe(): Unit = {
sc.eventStreams.storage.subscribe(testActor, testStorageId)
}
}
|
Karasiq/shadowcloud
|
core/assembly/src/test/scala/com/karasiq/shadowcloud/test/actors/RegionDispatcherTest.scala
|
Scala
|
apache-2.0
| 12,133 |
package tuner.gui
import scala.swing.Frame
import tuner.Config
import tuner.Tuner
import tuner.project.Project
abstract class Window(val project:Project) extends Frame {
// register this window with Tuner
Tuner.listenTo(this)
def toFront = {
visible = true
peer.toFront
}
}
|
gabysbrain/tuner
|
src/main/scala/tuner/gui/Window.scala
|
Scala
|
mit
| 296 |
package controllers
import com.google.inject.Inject
import models.BusinessDetailsModel
import models.CacheKeyPrefix
import models.ConfirmFormModel
import models.EligibilityModel
import models.PaymentModel
import models.RetainModel
import models.VehicleAndKeeperLookupFormModel
import org.apache.commons.codec.binary.Base64
import play.api.mvc.{Action, Controller, Request, Result}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.control.NonFatal
import uk.gov.dvla.vehicles.presentation.common
import common.LogFormats.{DVLALogger, anonymize}
import common.clientsidesession.{ClearTextClientSideSessionFactory, ClientSideSessionFactory}
import common.clientsidesession.CookieImplicits.{RichCookies, RichResult}
import common.filters.CsrfPreventionAction.CsrfPreventionToken
import common.model.VehicleAndKeeperDetailsModel
import utils.helpers.Config
import views.vrm_retention.Payment.PaymentTransNoCacheKey
import views.vrm_retention.RelatedCacheKeys.removeCookiesOnExit
import views.vrm_retention.VehicleLookup.TransactionIdCacheKey
import webserviceclients.audit2.AuditRequest
import webserviceclients.paymentsolve.PaymentSolveBeginRequest
import webserviceclients.paymentsolve.PaymentSolveCancelRequest
import webserviceclients.paymentsolve.PaymentSolveGetRequest
import webserviceclients.paymentsolve.PaymentSolveService
import webserviceclients.paymentsolve.RefererFromHeader
final class Payment @Inject()(paymentSolveService: PaymentSolveService,
refererFromHeader: RefererFromHeader,
auditService2: webserviceclients.audit2.AuditService)
(implicit clientSideSessionFactory: ClientSideSessionFactory,
config: Config,
dateService: common.services.DateService) extends Controller with DVLALogger {
def begin = Action.async { implicit request =>
(request.cookies.getString(TransactionIdCacheKey),
request.cookies.getModel[VehicleAndKeeperLookupFormModel],
request.cookies.getModel[EligibilityModel],
request.cookies.getModel[RetainModel],
request.cookies.getModel[ConfirmFormModel]) match {
case (Some(transactionId), Some(vehiclesLookupForm), Some(eligibilityModel), None, Some(confirmFormModel)) =>
callBeginWebPaymentService(transactionId, vehiclesLookupForm.registrationNumber)
case _ => Future.successful {
Redirect(routes.Confirm.present())
}
}
}
// The token is checked in the common project, we do nothing with it here.
def callback(token: String) = Action.async { implicit request =>
val msg = "Callback method on Payment controller invoked. " +
"We have now returned from Logic Group payment pages and will now call getWebPayment on the Payment ms " +
"to check if the payment was authorised..."
logMessage(request.cookies.trackingId(), Info, msg)
Future.successful(Redirect(routes.Payment.getWebPayment()))
}
def getWebPayment = Action.async { implicit request =>
(request.cookies.getString(TransactionIdCacheKey), request.cookies.getModel[PaymentModel]) match {
case (Some(transactionId), Some(paymentModel)) =>
callGetWebPaymentService(transactionId, paymentModel.trxRef.get, paymentModel.isPrimaryUrl)
case _ => Future.successful {
paymentFailure(
"Payment getWebPayment missing TransactionIdCacheKey or PaymentTransactionReferenceCacheKey cookie"
)
}
}
}
def cancel = Action.async { implicit request =>
(request.cookies.getString(TransactionIdCacheKey), request.cookies.getModel[PaymentModel]) match {
case (Some(transactionId), Some(paymentModel)) =>
val trackingId = request.cookies.trackingId()
auditService2.send(
AuditRequest.from(
trackingId = trackingId,
pageMovement = AuditRequest.PaymentToExit,
transactionId = transactionId,
timestamp = dateService.dateTimeISOChronology,
documentReferenceNumber = request.cookies.getModel[VehicleAndKeeperLookupFormModel].map(_.referenceNumber),
vehicleAndKeeperDetailsModel = request.cookies.getModel[VehicleAndKeeperDetailsModel],
replacementVrm = Some(request.cookies.getModel[EligibilityModel].get.replacementVRM),
keeperEmail = request.cookies.getModel[ConfirmFormModel].flatMap(_.keeperEmail),
businessDetailsModel = request.cookies.getModel[BusinessDetailsModel]
), trackingId
)
Future.successful {
redirectToLeaveFeedback
}
case _ => Future.successful {
paymentFailure("Payment cancel missing TransactionIdCacheKey or PaymentTransactionReferenceCacheKey cookie")
}
}
}
private def paymentFailure(message: String)(implicit request: Request[_]) = {
logMessage(request.cookies.trackingId(),Error, message)
val trackingId = request.cookies.trackingId()
auditService2.send(
AuditRequest.from(
trackingId = trackingId,
pageMovement = AuditRequest.PaymentToPaymentFailure,
transactionId = request.cookies.getString(TransactionIdCacheKey)
.getOrElse(ClearTextClientSideSessionFactory.DefaultTrackingId.value),
timestamp = dateService.dateTimeISOChronology,
documentReferenceNumber = request.cookies.getModel[VehicleAndKeeperLookupFormModel].map(_.referenceNumber),
vehicleAndKeeperDetailsModel = request.cookies.getModel[VehicleAndKeeperDetailsModel],
replacementVrm = Some(request.cookies.getModel[EligibilityModel].get.replacementVRM),
keeperEmail = request.cookies.getModel[ConfirmFormModel].flatMap(_.keeperEmail),
businessDetailsModel = request.cookies.getModel[BusinessDetailsModel],
paymentModel = request.cookies.getModel[PaymentModel],
rejectionCode = Some(message)
), trackingId
)
Redirect(routes.PaymentFailure.present())
}
private def callBeginWebPaymentService(transactionId: String, vrm: String)
(implicit request: Request[_],
token: CsrfPreventionToken): Future[Result] = {
refererFromHeader.fetch match {
case Some(referer) =>
val tokenBase64URLSafe = Base64.encodeBase64URLSafeString(token.value.getBytes)
val paymentCallback = refererFromHeader.paymentCallbackUrl(
referer = referer,
tokenBase64URLSafe = tokenBase64URLSafe
)
val transNo = request.cookies.getString(PaymentTransNoCacheKey).get
val paymentSolveBeginRequest = PaymentSolveBeginRequest(
transactionId = transactionId,
transNo = transNo,
vrm = vrm,
purchaseAmount = config.purchaseAmountInPence.toInt,
paymentCallback = paymentCallback
)
val trackingId = request.cookies.trackingId()
paymentSolveService.invoke(paymentSolveBeginRequest, trackingId).map {
case (OK, response) if response.beginResponse.status == Payment.CardDetailsStatus =>
val msg = "Presenting payment view with embedded iframe source set to the following redirectUrl " +
s"from Logic Group: ${response.redirectUrl.get}. We are now entering Logic Group payment pages."
logMessage(request.cookies.trackingId(), Info, msg)
Ok(views.html.vrm_retention.payment(paymentRedirectUrl = response.redirectUrl.get))
.withCookie(PaymentModel.from(trxRef = response.trxRef.get, isPrimaryUrl = response.isPrimaryUrl))
// The POST from payment service will not contain a REFERER in the header, so use a cookie.
.withCookie(REFERER, routes.Payment.begin().url)
case (_, response) =>
paymentFailure(s"The begin web request to Solve encountered a problem with request " +
s"${anonymize(vrm)}, response: ${response.beginResponse.response}, " +
s"status: ${response.beginResponse.status}, redirect to PaymentFailure")
}.recover {
case NonFatal(e) =>
paymentFailure(
message = s"Payment Solve web service call with paymentSolveBeginRequest failed. Exception " + e.toString
)
}
case _ => Future.successful(paymentFailure(message = "Payment callBeginWebPaymentService no referer"))
}
}
private def paymentNotAuthorised(trxRef: String)(implicit request: Request[_]) = {
val msg = s"Payment not authorised for ${anonymize(trxRef)}, redirecting to PaymentNotAuthorised"
logMessage(request.cookies.trackingId(), Debug, msg)
val paymentModel = request.cookies.getModel[PaymentModel].get
val trackingId = request.cookies.trackingId()
auditService2.send(
AuditRequest.from(
trackingId = trackingId,
pageMovement = AuditRequest.PaymentToPaymentNotAuthorised,
transactionId = request.cookies.getString(TransactionIdCacheKey)
.getOrElse(ClearTextClientSideSessionFactory.DefaultTrackingId.value),
timestamp = dateService.dateTimeISOChronology,
documentReferenceNumber = request.cookies.getModel[VehicleAndKeeperLookupFormModel].map(_.referenceNumber),
vehicleAndKeeperDetailsModel = request.cookies.getModel[VehicleAndKeeperDetailsModel],
replacementVrm = Some(request.cookies.getModel[EligibilityModel].get.replacementVRM),
keeperEmail = request.cookies.getModel[ConfirmFormModel].flatMap(_.keeperEmail),
businessDetailsModel = request.cookies.getModel[BusinessDetailsModel],
paymentModel = Some(paymentModel)
), trackingId
)
Redirect(routes.PaymentNotAuthorised.present()).withCookie(paymentModel)
}
private def callGetWebPaymentService(transactionId: String, trxRef: String, isPrimaryUrl: Boolean)
(implicit request: Request[_]): Future[Result] = {
val transNo = request.cookies.getString(PaymentTransNoCacheKey).get
val paymentSolveGetRequest = PaymentSolveGetRequest(
transNo = transNo,
trxRef = trxRef,
isPrimaryUrl = isPrimaryUrl
)
val trackingId = request.cookies.trackingId()
paymentSolveService.invoke(paymentSolveGetRequest, trackingId).map {
case (OK, response) if response.getResponse.status == Payment.AuthorisedStatus =>
val paymentModel = request.cookies.getModel[PaymentModel].get
paymentModel.authCode = response.authcode
paymentModel.maskedPAN = response.maskedPAN
paymentModel.cardType = response.cardType
paymentModel.merchantId = response.merchantTransactionId
paymentModel.paymentType = response.paymentType
paymentModel.totalAmountPaid = response.purchaseAmount
paymentModel.paymentStatus = Some(Payment.AuthorisedStatus)
val msg = "The payment was successfully authorised, now redirecting to retain - " +
s"status: ${response.getResponse.status}, response: ${response.getResponse.response}."
logMessage(request.cookies.trackingId(), Info, msg)
Redirect(routes.Retain.retain())
.discardingCookie(REFERER) // Not used again.
.withCookie(paymentModel)
case (_, response) =>
logMessage(request.cookies.trackingId(), Error,
"The payment was not authorised - " +
s"status: ${response.getResponse.status}, response: ${response.getResponse.response}.")
paymentNotAuthorised(trxRef)
}.recover {
case NonFatal(e) =>
paymentFailure(message = "Payment Solve web service call with paymentSolveGetRequest failed: " + e.toString)
}
}
private def callCancelWebPaymentService(transactionId: String, trxRef: String, isPrimaryUrl: Boolean)
(implicit request: Request[_]): Future[Result] = {
val transNo = request.cookies.getString(PaymentTransNoCacheKey).get
val paymentSolveCancelRequest = PaymentSolveCancelRequest(
transNo = transNo,
trxRef = trxRef,
isPrimaryUrl = isPrimaryUrl
)
val trackingId = request.cookies.trackingId()
paymentSolveService.invoke(paymentSolveCancelRequest, trackingId).map { response =>
if (response._2.status == Payment.CancelledStatus) {
logMessage(trackingId, Info, "The web request to Solve was cancelled.")
} else {
logMessage(trackingId, Error, "The cancel was not successful, " +
s"response: ${response._2.response}, status: ${response._2.status}.")
}
auditService2.send(
AuditRequest.from(
trackingId = trackingId,
pageMovement = AuditRequest.PaymentToExit,
transactionId = request.cookies.getString(TransactionIdCacheKey)
.getOrElse(ClearTextClientSideSessionFactory.DefaultTrackingId.value),
timestamp = dateService.dateTimeISOChronology,
documentReferenceNumber = request.cookies.getModel[VehicleAndKeeperLookupFormModel].map(_.referenceNumber),
vehicleAndKeeperDetailsModel = request.cookies.getModel[VehicleAndKeeperDetailsModel],
replacementVrm = Some(request.cookies.getModel[EligibilityModel].get.replacementVRM),
keeperEmail = request.cookies.getModel[ConfirmFormModel].flatMap(_.keeperEmail),
businessDetailsModel = request.cookies.getModel[BusinessDetailsModel]
),
trackingId
)
redirectToLeaveFeedback
}.recover {
case NonFatal(e) =>
logMessage(
trackingId,
Error,
"Payment Solve web service call with paymentSolveCancelRequest failed. Exception " + e.toString
)
redirectToLeaveFeedback
}
}
private def redirectToLeaveFeedback(implicit request: Request[_]) = {
Redirect(routes.LeaveFeedback.present()).
discardingCookies(removeCookiesOnExit)
}
}
object Payment {
final val CardDetailsStatus = "CARD_DETAILS"
final val AuthorisedStatus = "AUTHORISED"
final val CancelledStatus = "CANCELLED"
final val SettledStatus = "SETTLED"
}
|
dvla/vrm-retention-online
|
app/controllers/Payment.scala
|
Scala
|
mit
| 14,143 |
package org.finra.datagenerator.scaffolding.config
/**
* Created by dkopel on 12/13/16.
*/
case class ConfigBundle(name: ConfigBundleName, defs: Map[ConfigName, ConfigDefinition[_]]) {
def apply[T](key: ConfigName): ConfigDefinition[_] = defs(key)
}
|
FINRAOS/DataGenerator
|
rubber-scaffolding/rubber-commons/src/main/scala/org/finra/datagenerator/scaffolding/config/ConfigBundle.scala
|
Scala
|
apache-2.0
| 258 |
package com.jensraaby.sxt
import org.scalatest.prop.Checkers
import org.scalatest.{FlatSpec, Matchers}
trait SXTSuite extends FlatSpec with Matchers with Checkers
|
jensraaby/SXT
|
src/test/scala/com/jensraaby/sxt/SXTSuite.scala
|
Scala
|
apache-2.0
| 165 |
package info.andreaswolf.roadhopper.simulation
import akka.actor.{ActorLogging, ActorRef, Actor}
import akka.pattern.ask
import com.graphhopper.util.shapes.GHPoint3D
import info.andreaswolf.roadhopper.road.{RoadSegment, Route}
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Future, ExecutionContext}
case class RequestRoadAhead(position: Int)
case class RoadAhead(time: Int, roadParts: List[RoadSegment], isEnding: Boolean = false) {
def length: Double = roadParts.map(_.length).sum
}
class TwoStepJourneyActor(val timer: ActorRef, val vehicle: ActorRef, val route: Route)
extends SimulationActor with ActorLogging {
val length = route.parts.map(_.length).sum
var remainingSegments = route.parts.tail
var currentSegment = route.parts.head
var travelledUntilCurrentSegment = 0.0
var currentTime = 0
var currentPosition: Option[GHPoint3D] = None
var active = true
registerReceiver({
case RequestRoadAhead(travelledDistance) =>
// TODO dynamically calculate the distance to get (e.g. based on speed) or get it passed with the request
// check if we have probably advanced past the current segment
checkCurrentSegment(travelledDistance)
// make sure we only get segments after the current segment
val remainingOnCurrentSegment = currentSegment.length - (travelledDistance - travelledUntilCurrentSegment)
// if the length to get is 0, we will be on the current segment for all of the look-ahead distance
var lengthToGet = Math.max(0, 150.0 - remainingOnCurrentSegment)
val offsetOnCurrentSegment = travelledDistance - travelledUntilCurrentSegment
val segmentsAhead = new ListBuffer[RoadSegment]
// rare edge case: we travelled exactly to the end of the segment => we must skip it here
if (remainingOnCurrentSegment > 0.0) {
segmentsAhead append RoadSegment.fromExisting(offsetOnCurrentSegment, currentSegment)
}
remainingSegments.foreach(segment => {
if (lengthToGet > 0) {
segmentsAhead append segment
lengthToGet -= segment.length
}
})
currentPosition = Some(segmentsAhead.head.start)
// if there are no more journey parts left after the current ones, this journey will end
val journeyEndsAfterFilteredSegments: Boolean = remainingSegments.length == segmentsAhead.length - 1
sender ! RoadAhead(currentTime, segmentsAhead.toList, journeyEndsAfterFilteredSegments)
log.debug(f"Travelled until here: $travelledDistance, LengthToGet: $lengthToGet%.2f;" +
f" got length: ${segmentsAhead.toList.map(_.length).sum}%.2f;" +
f" segments: ${segmentsAhead.length - 1}/${remainingSegments.length}")
// inform the vehicle about its current position (= the start of the first road segment ahead)
vehicle ! UpdatePosition(segmentsAhead.head.start)
})
/**
* Handler for [[Start]] messages.
* <p/>
* The simulation will only continue after the Future has been completed. You can, but donβt need to override this
* method in your actor. If you donβt override it, the step will be completed immediately (by the successful Future
* returned)
*/
override def start()(implicit exec: ExecutionContext): Future[Any] = timer ? ScheduleStep(10, self)
/**
* Handler for [[StepUpdate]] messages.
* <p/>
* The simulation will only continue after the Future has been completed. You can, but donβt need to override this
* method in your actor. If you donβt override it, the step will be completed immediately (by the successful Future
* returned)
*/
override def stepUpdate()(implicit exec: ExecutionContext): Future[Any] = Future {
if (currentPosition.isDefined) {
vehicle ? UpdatePosition(currentPosition.get)
} else {
Future.successful()
}
}
/**
* Handler for [[StepAct]] messages.
* <p/>
* The simulation will only continue after the Future has been completed. You can, but donβt need to override this
* method in your actor. If you donβt override it, the step will be completed immediately (by the successful Future
* returned)
*/
override def stepAct()(implicit exec: ExecutionContext): Future[Any] = {
if (!active) {
return Future.successful()
}
val futures = new ListBuffer[Future[Any]]()
val statusFuture: Future[JourneyStatus] = (vehicle ? GetStatus()).asInstanceOf[Future[JourneyStatus]]
futures.append(statusFuture)
// react to the journey status we got
futures.append(statusFuture flatMap { status: JourneyStatus =>
Future {
log.debug(f"to travel: ${Math.abs(status.travelledDistance - length)}%.2f")
if (status.vehicleState.speed == 0.0 && Math.abs(status.travelledDistance - length) < 1.5) {
active = false
timer ! Stop()
}
}
})
futures.append(timer ? ScheduleStep(time + 10, self))
Future.sequence(futures.toList)
}
/**
* Checks if we are still on the current segment or if we moved beyond it and need to adjust the segment
* and the vehicle orientation
*
* @param position The current position on the journey, i.e. along the road to travel
* @return true if we are still within the road to travel, false if the journey has ended
*/
def checkCurrentSegment(position: Double): Boolean = {
// are we at or beyond the current segmentβs end?
if (travelledUntilCurrentSegment + currentSegment.length - position > 10e-3) {
return true
}
// there are more segments ahead, so just continue with the next one
if (remainingSegments.nonEmpty) {
travelledUntilCurrentSegment += currentSegment.length
val nextSegment = remainingSegments.head
// instruct the vehicle to turn to the new segment
vehicle ! Turn(currentSegment.calculateNecessaryTurn(nextSegment))
currentSegment = nextSegment
remainingSegments = remainingSegments.tail
log.debug("RoadSegment ended, new segment length: " + currentSegment.length.formatted("%.2f"))
log.debug("Remaining segments: " + remainingSegments.length)
true
} else {
log.info("Journey ended after " + travelledUntilCurrentSegment + " (not accurate!)")
// the shutdown will only be executed when all existing messages have been processed; therefore, we only tell the
// timer to stop, but leave shutting down the system up to it
timer ! Stop()
false
}
}
}
|
andreaswolf/roadhopper
|
src/main/scala/info/andreaswolf/roadhopper/simulation/JourneyActor.scala
|
Scala
|
mit
| 6,220 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.data
import java.text.SimpleDateFormat
import java.util.TimeZone
import org.apache.accumulo.core.client.BatchWriterConfig
import org.apache.accumulo.core.data.{Range => aRange}
import org.apache.accumulo.core.security.Authorizations
import org.geotools.data._
import org.geotools.factory.Hints
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.filter.text.cql2.CQL
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithDataStore
import org.locationtech.geomesa.accumulo.data.tables.{GeoMesaTable, RecordTable}
import org.locationtech.geomesa.accumulo.index.{AttributeIdxStrategy, QueryStrategyDecider}
import org.locationtech.geomesa.features.avro.AvroSimpleFeatureFactory
import org.locationtech.geomesa.features.kryo.KryoFeatureSerializer
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.BeforeExample
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class AccumuloFeatureWriterTest extends Specification with TestWithDataStore with BeforeExample {
override def before = clearTablesHard()
sequential
val spec = "name:String:index=true,age:Integer,dtg:Date,geom:Geometry:srid=4326"
val sdf = new SimpleDateFormat("yyyyMMdd")
sdf.setTimeZone(TimeZone.getTimeZone("Zulu"))
val dateToIndex = sdf.parse("20140102")
val geomToIndex = WKTUtils.read("POINT(45.0 49.0)")
"AccumuloFeatureWriter" should {
"provide ability to update a single feature that it wrote and preserve feature IDs" in {
/* create a feature */
val originalFeature1 = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "id1")
originalFeature1.setDefaultGeometry(geomToIndex)
originalFeature1.setAttribute("name", "fred")
originalFeature1.setAttribute("age", 50.asInstanceOf[Any])
/* create a second feature */
val originalFeature2 = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "id2")
originalFeature2.setDefaultGeometry(geomToIndex)
originalFeature2.setAttribute("name", "tom")
originalFeature2.setAttribute("age", 60.asInstanceOf[Any])
/* create a third feature */
val originalFeature3 = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "id3")
originalFeature3.setDefaultGeometry(geomToIndex)
originalFeature3.setAttribute("name", "kyle")
originalFeature3.setAttribute("age", 2.asInstanceOf[Any])
addFeatures(Seq(originalFeature1, originalFeature2, originalFeature3))
/* turn fred into billy */
val filter = CQL.toFilter("name = 'fred'")
fs.modifyFeatures(Array("name", "age"), Array("billy", 25.asInstanceOf[AnyRef]), filter)
/* delete kyle */
val deleteFilter = CQL.toFilter("name = 'kyle'")
fs.removeFeatures(deleteFilter)
/* query everything */
val cqlFilter = Filter.INCLUDE
/* Let's read out what we wrote...we should only get tom and billy back out */
val features = fs.getFeatures(new Query(sftName, Filter.INCLUDE)).features().toSeq
features must haveSize(2)
features.map(f => (f.getAttribute("name"), f.getAttribute("age"))) must
containTheSameElementsAs(Seq(("tom", 60), ("billy", 25)))
features.map(f => (f.getAttribute("name"), f.getID)) must
containTheSameElementsAs(Seq(("tom", "id2"), ("billy", "id1")))
}
"be able to replace all features in a store using a general purpose FeatureWriter" in {
/* repopulate it */
val c = new DefaultFeatureCollection
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"))
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"))
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"))
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"))
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5"))
val writer = ds.getFeatureWriter(sftName, Transaction.AUTO_COMMIT)
c.foreach {f =>
val writerCreatedFeature = writer.next()
writerCreatedFeature.setAttributes(f.getAttributes)
writerCreatedFeature.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
writerCreatedFeature.getUserData.put(Hints.PROVIDED_FID, f.getID)
writer.write()
}
writer.close()
val features = fs.getFeatures(Filter.INCLUDE).features().toSeq
features must haveSize(5)
features.map(f => (f.getAttribute("name"), f.getID)) must
containTheSameElementsAs(Seq(("will", "fid1"), ("george", "fid2"), ("sue", "fid3"), ("karen", "fid4"), ("bob", "fid5")))
}
"be able to update all features based on some ecql" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
addFeatures(toAdd)
val filter = CQL.toFilter("(age > 50 AND age < 99) or (name = 'karen')")
fs.modifyFeatures(Array("age"), Array(60.asInstanceOf[AnyRef]), filter)
val updated = fs.getFeatures(ECQL.toFilter("age = 60")).features.toSeq
updated.map(f => (f.getAttribute("name"), f.getAttribute("age"))) must
containTheSameElementsAs(Seq(("will", 60), ("karen", 60), ("bob", 60)))
updated.map(f => (f.getAttribute("name"), f.getID)) must
containTheSameElementsAs(Seq(("will", "fid1"), ("karen", "fid4"), ("bob", "fid5")))
}
"provide ability to remove features" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
addFeatures(toAdd)
val writer = ds.getFeatureWriter(sftName, Filter.INCLUDE, Transaction.AUTO_COMMIT)
while (writer.hasNext) {
writer.next()
writer.remove()
}
writer.close()
val features = fs.getFeatures(Filter.INCLUDE).features().toSeq
features must beEmpty
forall(GeoMesaTable.getTableNames(sft, ds)) { name =>
val scanner = connector.createScanner(name, new Authorizations())
try {
scanner.iterator().hasNext must beFalse
} finally {
scanner.close()
}
}
}
"provide ability to add data inside transactions" in {
val c = new DefaultFeatureCollection
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Array("dude1", 15.asInstanceOf[AnyRef], null, geomToIndex), "fid10"))
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Array("dude2", 16.asInstanceOf[AnyRef], null, geomToIndex), "fid11"))
c.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Array("dude3", 17.asInstanceOf[AnyRef], null, geomToIndex), "fid12"))
val trans = new DefaultTransaction("trans1")
fs.setTransaction(trans)
try {
fs.addFeatures(c)
trans.commit()
val features = fs.getFeatures(ECQL.toFilter("(age = 15) or (age = 16) or (age = 17)")).features().toSeq
features.map(f => (f.getAttribute("name"), f.getAttribute("age"))) must
containTheSameElementsAs(Seq(("dude1", 15), ("dude2", 16), ("dude3", 17)))
} catch {
case e: Exception =>
trans.rollback()
throw e
} finally {
trans.close()
fs.setTransaction(Transaction.AUTO_COMMIT)
}
}
"provide ability to remove inside transactions" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("dude1", 15.asInstanceOf[AnyRef], null, geomToIndex), "fid10"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("dude2", 16.asInstanceOf[AnyRef], null, geomToIndex), "fid11"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("dude3", 17.asInstanceOf[AnyRef], null, geomToIndex), "fid12")
)
addFeatures(toAdd)
val trans = new DefaultTransaction("trans1")
fs.setTransaction(trans)
try {
fs.removeFeatures(CQL.toFilter("name = 'dude1' or name='dude2' or name='dude3'"))
trans.commit()
val features = fs.getFeatures(Filter.INCLUDE).features().toSeq
features must haveSize(3)
features.map(f => f.getAttribute("name")) must containTheSameElementsAs(Seq("will", "george", "sue"))
} catch {
case e: Exception =>
trans.rollback()
throw e
} finally {
trans.close()
fs.setTransaction(Transaction.AUTO_COMMIT)
}
}
"issue delete keys when geometry changes" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
addFeatures(toAdd)
val filter = CQL.toFilter("name = 'bob' or name = 'karen'")
val writer = ds.getFeatureWriter(sftName, filter, Transaction.AUTO_COMMIT)
while (writer.hasNext) {
val sf = writer.next
sf.setDefaultGeometry(WKTUtils.read("POINT(50.0 50)"))
writer.write()
}
writer.close()
// Verify old geo bbox doesn't return them
val features45 = fs.getFeatures(ECQL.toFilter("BBOX(geom, 44.9,48.9,45.1,49.1)")).features().toSeq
features45.map(_.getAttribute("name")) must containTheSameElementsAs(Seq("will", "george", "sue"))
// Verify that new geometries are written with a bbox query that uses the index
val features50 = fs.getFeatures(ECQL.toFilter("BBOX(geom, 49.9,49.9,50.1,50.1)")).features().toSeq
features50.map(_.getAttribute("name")) must containTheSameElementsAs(Seq("bob", "karen"))
// get them all
val all = fs.getFeatures(ECQL.toFilter("BBOX(geom, 44.0,44.0,51.0,51.0)")).features().toSeq
all.map(_.getAttribute("name")) must containTheSameElementsAs(Seq("will", "george", "sue", "bob", "karen"))
// get none
val none = fs.getFeatures(ECQL.toFilter("BBOX(geom, 30.0,30.0,31.0,31.0)")).features().toSeq
none must beEmpty
}
"issue delete keys when datetime changes" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
addFeatures(toAdd)
val filter = CQL.toFilter("name = 'will' or name='george'")
val writer = ds.getFeatureWriter(sftName, filter, Transaction.AUTO_COMMIT)
val newDate = sdf.parse("20140202")
while (writer.hasNext) {
val sf = writer.next
sf.setAttribute("dtg", newDate)
writer.write()
}
writer.close()
// Verify old daterange doesn't return them
val jan = fs.getFeatures(ECQL.toFilter("dtg DURING 2013-12-29T00:00:00Z/2014-01-04T00:00:00Z")).features().toSeq
jan.map(_.getAttribute("name")) must containTheSameElementsAs(Seq("sue", "bob", "karen"))
// Verify new date range returns things
val feb = fs.getFeatures(ECQL.toFilter("dtg DURING 2014-02-01T00:00:00Z/2014-02-03T00:00:00Z")).features().toSeq
feb.map(_.getAttribute("name")) must containTheSameElementsAs(Seq("will","george"))
// Verify large date range returns everything
val all = fs.getFeatures(ECQL.toFilter("dtg DURING 2014-01-01T00:00:00Z/2014-02-03T00:00:00Z")).features().toSeq
all.map(_.getAttribute("name")) must containTheSameElementsAs(Seq("will", "george", "sue", "bob", "karen"))
// Verify other date range returns nothing
val none = fs.getFeatures(ECQL.toFilter("dtg DURING 2013-01-01T00:00:00Z/2013-12-31T00:00:00Z")).features().toSeq
none must beEmpty
}
"verify that start end times are excluded in filter" in { // TODO this should be moved somewhere else...
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
addFeatures(toAdd)
val afterFilter = fs.getFeatures(ECQL.toFilter("dtg AFTER 2014-02-02T00:00:00Z")).features.toSeq
afterFilter must beEmpty
val beforeFilter = fs.getFeatures(ECQL.toFilter("dtg BEFORE 2014-01-02T00:00:00Z")).features.toSeq
beforeFilter must beEmpty
}
"ensure that feature IDs are not changed when spatiotemporal indexes change" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
addFeatures(toAdd)
val writer = ds.getFeatureWriter(sftName, Filter.INCLUDE, Transaction.AUTO_COMMIT)
val newDate = sdf.parse("20120102")
while (writer.hasNext) {
val sf = writer.next
sf.setAttribute("dtg", newDate)
sf.setDefaultGeometry(WKTUtils.read("POINT(10.0 10.0)"))
writer.write()
}
writer.close()
val features = fs.getFeatures(Filter.INCLUDE).features().toSeq
features.size mustEqual toAdd.size
val compare = features.sortBy(_.getID).zip(toAdd.sortBy(_.getID))
forall(compare) { case (updated, original) =>
updated.getID mustEqual original.getID
updated.getDefaultGeometry must not be equalTo(original.getDefaultGeometry)
updated.getAttribute("dtg") must not be equalTo(original.getAttribute("dtg"))
}
}
"verify delete and add same key works" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
addFeatures(toAdd)
val filter = CQL.toFilter("name = 'will'")
val hints = ds.strategyHints(sft)
val q = new Query(sft.getTypeName, filter)
QueryStrategyDecider.chooseStrategies(sft, q, hints, None).head must beAnInstanceOf[AttributeIdxStrategy]
import org.locationtech.geomesa.utils.geotools.Conversions._
// Retrieve Will's ID before deletion.
val featuresBeforeDelete = fs.getFeatures(filter).features().toSeq
featuresBeforeDelete must haveSize(1)
val willId = featuresBeforeDelete.head.getID
fs.removeFeatures(filter)
// NB: We really need a test which reads from the attribute table directly since missing records entries
// will result in attribute queries
// This verifies that 'will' has been deleted from the attribute table.
val attributeTableFeatures = fs.getFeatures(filter).features().toSeq
attributeTableFeatures must beEmpty
// This verifies that 'will' has been deleted from the record table.
val recordTableFeatures =fs.getFeatures(ECQL.toFilter(s"IN('$willId')")).features().toSeq
recordTableFeatures must beEmpty
// This verifies that 'will' has been deleted from the ST idx table.
val stTableFeatures = fs.getFeatures(ECQL.toFilter("BBOX(geom, 44.0,44.0,51.0,51.0)")).features().toSeq
stTableFeatures.count(_.getID == willId) mustEqual 0
val featureCollection = new DefaultFeatureCollection(sftName, sft)
val geom = WKTUtils.read("POINT(10.0 10.0)")
val date = sdf.parse("20120102")
/* create a feature */
featureCollection.add(AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], date, geom), "fid1"))
fs.addFeatures(featureCollection)
val features =fs.getFeatures(filter).features().toSeq
features must haveSize(1)
}
"create z3 based uuids" in {
val toAdd = Seq(
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("will", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid1"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("george", 33.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid2"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("sue", 99.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid3"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("karen", 50.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid4"),
AvroSimpleFeatureFactory.buildAvroFeature(sft, Seq("bob", 56.asInstanceOf[AnyRef], dateToIndex, geomToIndex), "fid5")
)
// space out the adding slightly so we ensure they sort how we want - resolution is to the ms
// also ensure we don't set use_provided_fid
toAdd.foreach { f =>
val featureCollection = new DefaultFeatureCollection(sftName, sft)
f.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.FALSE)
f.getUserData.remove(Hints.PROVIDED_FID)
featureCollection.add(f)
// write the feature to the store
fs.addFeatures(featureCollection)
Thread.sleep(2)
}
val scanner = ds.connector.createScanner(ds.getTableName(sftName, RecordTable), new Authorizations)
val serializer = new KryoFeatureSerializer(sft)
val rows = scanner.toList
scanner.close()
// trim off table prefix to get the UUIDs
val rowKeys = rows.map(_.getKey.getRow.toString).map(r => r.substring(r.length - 36))
rowKeys must haveLength(5)
// ensure that the z3 range is the same
rowKeys.map(_.substring(0, 18)).toSet must haveLength(1)
// ensure that the second part of the UUID is random
rowKeys.map(_.substring(19)).toSet must haveLength(5)
val ids = rows.map(e => serializer.deserialize(e.getValue.get).getID)
ids must haveLength(5)
forall(ids)(_ must not(beMatching("fid\\\\d")))
// ensure they share a common prefix, since they have the same dtg/geom
ids.map(_.substring(0, 18)).toSet must haveLength(1)
// ensure that the second part of the UUID is random
ids.map(_.substring(19)).toSet must haveLength(5)
}
}
def clearTablesHard(): Unit = {
GeoMesaTable.getTables(sft).map(ds.getTableName(sft.getTypeName, _)).foreach { name =>
val deleter = connector.createBatchDeleter(name, new Authorizations(), 5, new BatchWriterConfig())
deleter.setRanges(Seq(new aRange()))
deleter.delete()
deleter.close()
}
}
}
|
drackaer/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/data/AccumuloFeatureWriterTest.scala
|
Scala
|
apache-2.0
| 22,902 |
package models
import play.api._
import play.api.db._
import play.api.Play.current
import anorm._
import anorm.SqlParser._
import java.util.Date
import java.io.{ IOException, FileOutputStream, FileInputStream, File }
import java.util.zip.{ ZipEntry, ZipInputStream }
import play.api.templates.Html
case class Site(id: Long, name: String)
case class Currency(id: Long, name: String)
object Extensions {
// Define a custom String with more methods
class StringW(s: String) {
def nl2br = Html(s.replace("\\n", "<br>"))
}
// Implicit conversion from String to our enriched String
implicit def pimpString(s: String) = new StringW(s)
}
object Misc{
// random password pgeneration
// stolen from http://www.bindschaedler.com/2012/04/07/elegant-random-string-generation-in-scala/
// Random generator
//val random = new scala.util.Random
val random = new scala.util.Random(new java.security.SecureRandom())
// Generate a random string of length n from the given alphabet
def randomString(alphabet: String)(n: Int): String =
Stream.continually(random.nextInt(alphabet.size)).map(alphabet).take(n).mkString
// Generate a random alphabnumeric string of length n
def randomAlphanumericString(n: Int) =
randomString("ABCDEFGHIJKLMNOPQRSTUVWXYSabcdefghijklmnopqrstuvwxyz0123456789")(n)
// end random password generation
}
object Utils{
val formatter = {
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.text.NumberFormat;
val symbols= DecimalFormatSymbols.getInstance();
symbols.setGroupingSeparator('\\'');
new DecimalFormat("000",symbols)
}
def generatePassword(n:Int = 5):String = {
import java.security.SecureRandom;
import java.math.BigInteger;
val random = new SecureRandom()
return new BigInteger(130, random).toString(32).substring(0,n)
}
// constants
val path ="/var/data-museum/" // live server path...
val url = "data/" // live server data file url
//val path ="/var/www/html/data-museum/" //testing data url in apache public html
//val url = "http://192.168.1.77/data-museum/" // testing - on port 80 @ arx
//val url = "http://roggwil.akehir.com/data-museum/" // testing - on port 80 @ mobi
val site_id = 1
val admin_fees:Double = 400.0
def flag() : Option[Int] = {
DB.withConnection { implicit c =>
val row =
SQL("""
SELECT install_flag
FROM system
""")
.apply().head
row[Option[Int]]("install_flag")
}
}
def returnJSONfromSeq(in: Seq[(String,String)]): String ={
var out:String =""
for ((k,v) <- in)
{
out = out+ "{\\"k\\":\\""+k.toString+"\\",\\"v\\":\\""+v+"\\"},"
}
// remove last character
out=out.dropRight(1)
return "["+out+"]"
}
/*
duplicate a DB row
@arg table: table from whic row needs to be copied
@arg id: id to be copied
@return: new id
*/
def duplicateRow(id: Long, table: String) : Option[Long] = {
val query:String = "CREATE TABLE table_temp SELECT * FROM "+table+" WHERE id={id}"
doQuery("DROP TABLE IF EXISTS table_temp")
DB.withConnection{implicit c =>
SQL(query)
.on('id -> id) //'
.executeUpdate()
}
doQuery("UPDATE table_temp SET id = (1+(SELECT id FROM "+table+" ORDER BY id DESC LIMIT 0,1))")
doQuery("INSERT INTO "+table+" SELECT * FROM table_temp")
// get new id
val new_id = getLastId(table)
doQuery("DROP TABLE IF EXISTS table_temp")
new_id
}
def doQuery(query: String){
DB.withConnection{implicit c =>
SQL(query)
.executeUpdate()
}
}
def getLastId(table: String) : Option[Long] = {
DB.withConnection { implicit c =>
val row =
SQL("""
SELECT id
FROM """+table+"""
WHERE 1
ORDER BY id DESC
LIMIT 0,1
""")
.apply().head
row[Option[Long]]("id")
}
}
object File{
import java.io.{OutputStream, InputStream, File, FileOutputStream}
import java.util.zip.{ZipEntry, ZipFile}
import scala.collection.JavaConversions._
val BUFSIZE = 4096
val buffer = new Array[Byte](BUFSIZE)
def unZip(source: String, targetFolder: String) = {
val zipFile = new ZipFile(source)
unzipAllFile(zipFile.entries.toList, getZipEntryInputStream(zipFile)_, new File(targetFolder))
}
def getZipEntryInputStream(zipFile: ZipFile)(entry: ZipEntry) = zipFile.getInputStream(entry)
def unzipAllFile(entryList: List[ZipEntry], inputGetter: (ZipEntry) => InputStream, targetFolder: File): Boolean = {
entryList match {
case entry :: entries =>
if (entry.isDirectory)
new File(targetFolder, entry.getName).mkdirs
else
saveFile(inputGetter(entry), new FileOutputStream(new File(targetFolder, entry.getName)))
unzipAllFile(entries, inputGetter, targetFolder)
case _ =>
true
}
}
def saveFile(fis: InputStream, fos: OutputStream) = {
writeToFile(bufferReader(fis)_, fos)
fis.close
fos.close
}
def bufferReader(fis: InputStream)(buffer: Array[Byte]) = (fis.read(buffer), buffer)
def writeToFile(reader: (Array[Byte]) => Tuple2[Int, Array[Byte]], fos: OutputStream): Boolean = {
val (length, data) = reader(buffer)
if (length >= 0) {
fos.write(data, 0, length)
writeToFile(reader, fos)
} else
true
}
def upload(request: play.api.mvc.Request[play.api.libs.Files.TemporaryFile], id: Long, table: String) : String = {
import java.io.File
val resultString = try {
val file = new File(getFilename(id, table))
request.body.moveTo(file, true)
"file has been uploaded"
} catch {
case e: Exception => "an error has occurred while uploading the file"
}
"{success: true}"
}
def serve(id: Long, timestamp: String, table: String, fieldname: String = "name") : Option[(String,String)] ={
try{
DB.withConnection { implicit c =>
val row =
SQL("""
SELECT id, timestamp, name
FROM """+table+"""
WHERE 1
AND id={id}
AND timestamp={timestamp}
ORDER BY id DESC
LIMIT 0,1
""")
.on(
'id -> id,
'timestamp -> timestamp
)
.apply().head
val url = getFilename(row[Long]("id"), table)
Some(row[String](fieldname),url)
}
}
catch{
case ex: NoSuchElementException => {
None
}
}
}
def delete(id: Long, table: String) {
// 1. remove from database
DB.withConnection { implicit c =>
SQL("""
DELETE FROM """+table+"""
WHERE id={id}
""")
.on('id -> id)
.executeUpdate
}
// 2. remove from server
import java.io.File
val file = new File(getFilename(id, table))
file.delete()
}
def getFilename(id: Long, table: String) : String = {
println(path+table+"."+id.toString)
path+table+"."+id.toString
}
}
object MySQL{
def fieldTaken(value:String, field:String ="email", table:String="user"): Boolean = {
val query:String =
"""
SELECT COUNT(*) as c
FROM """+table+"""
WHERE """+field+"""={value}
"""
DB.withConnection{implicit c =>
val rows = SQL(query)
.on('value -> value)
.apply
.head
val count = rows[Long]("c")
if(count==0){
true
}
else{
false
}
}
}
}
}
object ImageUtils {
import java.awt.Image
import java.awt.image.BufferedImage
import javax.imageio.ImageIO
import java.awt.Graphics2D
import java.awt.AlphaComposite
import java.io.File
def complete(file: java.io.File, path:String){
val inStream = new java.io.FileInputStream(file);
val bufferedImage = resize(inStream, 200, 200)
val mini_file = new File(path);
ImageIO.write(bufferedImage, "jpg", mini_file);
}
def resize(is:java.io.InputStream, maxWidth:Int, maxHeight:Int):BufferedImage = {
val originalImage:BufferedImage = ImageIO.read(is)
var height = originalImage.getHeight
var width = originalImage.getWidth
if (width <= maxWidth && height <= maxHeight)
originalImage
else {
// If the picture was too big, it will either fit by width or height.
// This essentially resizes the dimensions twice, until it fits
/*if (width > maxWidth){
height = (height.doubleValue() * (maxWidth.doubleValue() / width.doubleValue())).intValue
width = maxWidth
}
if (height > maxHeight){
width = (width.doubleValue() * (maxHeight.doubleValue() / height.doubleValue())).intValue
height = maxHeight
}*/
//Logger.info(width.toString+ " " + height.toString)
var nwidth:Int = (math.sqrt(width.toDouble/height.toDouble)*100).toInt // correspond Γ la largeur de l'image souhaitΓ©e
var nheight = (nwidth.toDouble*height.toDouble/width.toDouble).toInt
Logger.info("New dimensions: " + nwidth.toString+ " " + nheight.toString)
val scaledBI = new BufferedImage(nwidth, nheight, BufferedImage.TYPE_INT_RGB)
val g = scaledBI.createGraphics
g.setComposite(AlphaComposite.Src)
g.drawImage(originalImage, 0, 0, nwidth, nheight, null);
g.dispose
scaledBI
}
}
}
object Currency{
def list: List[Currency] = {
DB.withConnection { implicit connection =>
SQL(
"""
SELECT *
FROM currency
WHERE 1
"""
)
.as(simple *)
}
}
val simple = {
get[Long]("id") ~
get[String]("short") map {
case id~name => Currency(id, name)
}
}
def listSelect : Seq[(String,String)] = {
list.map(c => c.id.toString -> c.name)
}
}
object Site{
def list: List[Site] = {
DB.withConnection { implicit connection =>
SQL(
"""
SELECT *
FROM site
WHERE 1
"""
)
.as(simple *)
}
}
val simple = {
get[Long]("id") ~
get[String]("name") map {
case id~name => Site(id, name)
}
}
def listSelect : Seq[(String,String)] = {
list.map(c => c.id.toString -> c.name)
}
}
|
musethno/MGS
|
app/models/Utils.scala
|
Scala
|
mit
| 10,014 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.services.manager
import java.io.File
import java.net.ServerSocket
import org.apache.thrift.TMultiplexedProcessor
import org.apache.thrift.protocol.TBinaryProtocol
import org.apache.thrift.server.{ TServer, TThreadPoolServer }
import org.apache.thrift.transport.{ TFramedTransport, TServerSocket }
import org.bdgenomics.adam.util.CredentialsProperties
import org.bdgenomics.services.{ ConfigurableServiceExecutor, Configuration, ServiceContext }
import scala.collection.JavaConversions._
/**
* @param configuration This is the configuration that the manager is started with
*/
class ServiceManager(private val configuration: Configuration) {
var server: Option[TServer] = None
var port: Int = configuration.getPort
/**
* @note This is a blocking call, which will not exit until the service is stopped.
*/
def start() = {
if (server.isDefined)
throw new IllegalStateException("Cannot start two servers using the same ServiceManager")
val multiplexedProcessor = new TMultiplexedProcessor()
val sparkContext = ServiceContext.createSparkContext(configuration)
val credentials = Option(configuration.credentials)
.map(creds => new CredentialsProperties(Option(creds.location).map(new File(_)))
.awsCredentials(Option(creds.suffix)))
.getOrElse(new CredentialsProperties(None).awsCredentials())
configuration.getServices
.map(serviceConfiguration => {
val serviceContext = new ServiceContext(
serviceConfiguration,
sparkContext,
credentials)
(serviceConfiguration.getOverrideName,
Class.forName(serviceConfiguration.getClassName)
.newInstance()
.asInstanceOf[ConfigurableServiceExecutor]
.configure(serviceContext))
})
.foreach {
case (overrideName, plugin) => multiplexedProcessor.registerProcessor(
if (overrideName != null) overrideName else plugin.name, plugin.processor)
}
val serverSocket = new ServerSocket(port)
port = serverSocket.getLocalPort
val s = new TThreadPoolServer(new TThreadPoolServer.Args(new TServerSocket(serverSocket))
.processor(multiplexedProcessor)
.protocolFactory(new TBinaryProtocol.Factory())
.transportFactory(new TFramedTransport.Factory()))
server = Some(s)
s.serve()
}
def isStarted = server.exists(_.isServing)
/**
* @note This must be called from a separate thread than [[start()]], and will cause the thread that [[start()]] was
* executing on to return
*/
def stop() = {
server.foreach(_.stop())
}
}
|
bigdatagenomics/bdg-services
|
bdgs-service-manager/src/main/scala/org/bdgenomics/services/manager/ServiceManager.scala
|
Scala
|
apache-2.0
| 3,417 |
package thistle.examples.webevent
import org.scalatest.FunSpec
import thistle.core.{MatchSequence, MatchTree}
import thistle.predicates.General._
import thistle.core.{Query, Node}
import thistle.examples.webevent.Predicates._
class QueriesTestSpec extends FunSpec {
def generateEvent(ref: String, url: String): SearchEvent =
SearchEvent(
ref,
url,
1,
"query",
Seq(1,2,3)
)
describe("PurchaseChannel") {
it("should find a single purchase channel") {
val visit = Vector(
SearchEvent(
"",
"searchpage1",
1,
"something",
List(1,2,3,4,5)
),
ListingEvent(
"searchpage1",
"listing_page1",
2,
4,
99
),
PurchaseEvent(
"listing_page1",
"purchase",
3,
List(3,4)
)
)
val Test = Query(
ofType[WebEvent] || ofType[SearchEvent], samePage
)
MatchTree(Test)(visit)
}
}
describe("tabbedBrowsing") {
it("should generate the correct sized query") {
assert(Queries.tabbedBrowsing(5).tail.size == 4)
}
it("should find no tabs when there are none") {
val visit = Vector(
generateEvent("", "zero"),
generateEvent("zero", "first"),
generateEvent("first", "second")
)
val expectedIndexes = Set(
Seq(0, 1, 2)
)
val mt = MatchTree(Queries.tabbedBrowsing(visit.size))(visit)
assert(mt.allCompleteMatches.size == 1)
assert(mt.allIndexes.toSet == expectedIndexes)
}
it("should find a single tab") {
val visit = Vector(
generateEvent("", "zero"),
generateEvent("zero", "first"),
generateEvent("first", "second"),
generateEvent("first", "third")
)
val expectedIndexes = Set(
Seq(0, 1, 2),
Seq(0, 1, 3)
)
val mt = MatchTree(Queries.tabbedBrowsing(visit.size))(visit)
assert(mt.allCompleteMatches.isEmpty)
assert(mt.allIndexes.toSet == expectedIndexes)
}
it("should find tabs when they existss") {
/*
*
* 0
* 1 5
* 2 3
* 4
*/
val visit = Vector(
generateEvent("", "zero"),
generateEvent("zero", "first"),
generateEvent("first", "second"),
generateEvent("first", "third"),
generateEvent("second", "fourth"),
generateEvent("zero", "fifth")
)
val expectedIndexes = Set(
Seq(0, 1, 2, 4),
Seq(0, 1, 3),
Seq(0, 5)
)
val mt = MatchTree(Queries.tabbedBrowsing(visit.size))(visit)
assert(mt.maxDepth == 4)
assert(mt.allCompleteMatches.isEmpty)
assert(mt.allIndexes.toSet == expectedIndexes)
}
it("should find tabs when they exist") {
/*
*
* 0
* 1 6
* 2 3
* 4 5 7
*/
val visit = Vector(
generateEvent("", "zero"),
generateEvent("zero", "first"),
generateEvent("first", "second"),
generateEvent("first", "third"),
generateEvent("second", "fourth"),
generateEvent("second", "fifth"),
generateEvent("zero", "sixth"),
generateEvent("third", "seventh")
)
val expectedIndexes = Set(
Seq(0, 1, 2, 4),
Seq(0, 1, 2, 5),
Seq(0, 1, 3, 7),
Seq(0, 6)
)
val mt = MatchTree(Queries.tabbedBrowsing(visit.size))(visit)
assert(mt.maxDepth == 4)
assert(mt.allCompleteMatches.isEmpty)
assert(mt.allIndexes.toSet == expectedIndexes)
}
}
}
|
smarden1/thistle
|
src/test/scala/thistle/examples/QueriesSpec.scala
|
Scala
|
mit
| 3,731 |
/*
* Copyright 2017 io.metabookmarks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.metabookmarks.kafka.offsetstorage
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.{ WatchedEvent, Watcher, ZooKeeper }
import scala.concurrent.{ ExecutionContext, Future }
/**
* Base class to store / retrieve consumer offset.
* @param zookeeperQuorum
*/
abstract class OffsetStore(zookeeperQuorum: String) {
/**
* nops zookeeper node watcher.
*/
private val dummyWatcher = new Watcher {
override def process(event: WatchedEvent): Unit = ()
}
/**
* Return the patition count for a topic (zookeeper request).
* @param topicName
* @return
*/
protected def partitionCount(topicName: String): Future[Int] =
TWR.probably(new ZooKeeper(zookeeperQuorum, 10000, dummyWatcher)) { zk =>
val zkNodeName = s"/brokers/topics/$topicName/partitions"
zk.getChildren(zkNodeName, false).size
}
/**
* Insert new partition offet(0) in storage.
* <br />
* Used when for new consumer and/or the partition.
*
* @param topic
* @param consumer
* @param value
* @return
*/
protected def newPartitions(topic: String,
consumer: String,
value: Range): Future[Seq[(TopicPartition, Long)]]
/**
* Retrieve stored offset by partition from storage.
* @param topic
* @param consumer
* @return
*/
protected def getFromStorage(topic: String, consumer: String): Future[Seq[(TopicPartition, Long)]]
def update(consumer: String, topicOffsets: Seq[(TopicPartition, Long)]): Future[Long]
/**
* Return the partition and offsets for a given topic and consumer.
*
* @param topic
* @param consumer
* @return
*/
def getPartitionOffsets(topic: String, consumer: String)(
implicit ec: ExecutionContext
): Future[Seq[(TopicPartition, Long)]] =
for {
count <- partitionCount(topic)
os <- getFromStorage(topic, consumer)
np <- newPartitions(topic, consumer, os.size until count)
} yield os ++ np
}
|
metabookmarks/kafka-slick-offsetstorage
|
src/main/scala/io/metabookmarks/kafka/offsetstorage/OffsetStore.scala
|
Scala
|
apache-2.0
| 2,651 |
package com.karasiq.shadowcloud.webapp.controllers
import com.karasiq.shadowcloud.model.{Folder, Path}
import com.karasiq.shadowcloud.webapp.context.FolderContext
import com.karasiq.shadowcloud.webapp.utils.HasKeyUpdate
trait FolderController extends HasKeyUpdate[Path] {
def addFolder(folder: Folder): Unit
def deleteFolder(folder: Folder): Unit
}
object FolderController {
def apply(onUpdate: Path β Unit,
onAddFolder: Folder β Unit,
onDeleteFolder: Folder β Unit): FolderController = {
new FolderController {
def update(path: Path): Unit = onUpdate(path)
def addFolder(folder: Folder): Unit = onAddFolder(folder)
def deleteFolder(folder: Folder): Unit = onDeleteFolder(folder)
}
}
def inherit(onUpdate: Path β Unit = _ β (),
onAddFolder: Folder β Unit = _ β (),
onDeleteFolder: Folder β Unit = _ β ())
(implicit fc: FolderController): FolderController = apply(
path β { fc.update(path); onUpdate(path) },
folder β { fc.addFolder(folder); onAddFolder(folder) },
folder β { fc.deleteFolder(folder); onDeleteFolder(folder) }
)
def forFolderContext(implicit folderContext: FolderContext): FolderController = apply(
path β folderContext.update(path),
folder β {
folderContext.update(folder.path.parent)
folderContext.update(folder.path)
},
folder β folderContext.update(folder.path.parent)
)
}
|
Karasiq/shadowcloud
|
server/webapp/src/main/scala/com/karasiq/shadowcloud/webapp/controllers/FolderController.scala
|
Scala
|
apache-2.0
| 1,473 |
package com.socrata.soda.server.export
import com.rojoma.json.v3.ast.{JArray, JNull, JString}
import com.rojoma.json.v3.io.CompactJsonWriter
import com.rojoma.simplearm.v2._
import com.socrata.http.common.util.AliasedCharset
import com.socrata.http.server.HttpResponse
import com.socrata.soda.server.SodaUtils
import com.socrata.soda.server.highlevel.ExportDAO
import com.socrata.soda.server.wiremodels.{JsonColumnRep, JsonColumnWriteRep}
import com.socrata.soql.types.{SoQLType, SoQLValue}
import java.io.BufferedWriter
import javax.activation.MimeType
import javax.servlet.http.HttpServletResponse
import com.socrata.http.server.responses._
import com.socrata.http.server.implicits._
import com.socrata.soda.server.id.ResourceName
object JsonExporter extends Exporter {
val mimeTypeBase = SodaUtils.jsonContentTypeBase
val mimeType = new MimeType(mimeTypeBase)
val extension = Some("json")
val xhRowCount = "X-SODA2-Row-Count"
def export(charset: AliasedCharset, schema: ExportDAO.CSchema, rows: Iterator[Array[SoQLValue]], singleRow: Boolean = false, obfuscateId: Boolean = true, bom: Boolean = false, fuseMap: Map[String, String] = Map.empty): HttpResponse = {
val mt = new MimeType(mimeTypeBase)
mt.setParameter("charset", charset.alias)
val rowCountHeaders =
schema.approximateRowCount.fold(NoOp) { rc => Header(xhRowCount, rc.toString) }
exporterHeaders(schema) ~> rowCountHeaders ~> Write(mt) { rawWriter =>
using(new BufferedWriter(rawWriter, 65536)) { w =>
class Processor {
val writer = w
val jsonWriter = new CompactJsonWriter(writer)
val names: Array[String] = schema.schema.map { ci => JString(ci.fieldName.name).toString }.toArray
val jsonColumnReps = if (obfuscateId) JsonColumnRep.forClientType
else JsonColumnRep.forClientTypeClearId
val reps: Array[JsonColumnWriteRep] = schema.schema.map { ci => jsonColumnReps(ci.typ) }.toArray
def writeJsonRow(row: Array[SoQLValue]) {
writer.write('{')
var didOne = false
var i = 0
while(i != row.length) {
val jsonized = reps(i).toJValue(row(i))
if(JNull != jsonized) {
if(didOne) writer.write(',')
else didOne = true
writer.write(names(i))
writer.write(':')
jsonWriter.write(reps(i).toJValue(row(i)))
}
i += 1
}
writer.write('}')
}
def go(rows: Iterator[Array[SoQLValue]]) {
var rowsCount = 0
if(!singleRow) writer.write('[')
if(rows.hasNext) {
writeJsonRow(rows.next())
if(singleRow && rows.hasNext) throw new Exception("Expect to get exactly one row but got more.")
rowsCount += 1
}
while(rows.hasNext) {
writer.write("\\n,")
writeJsonRow(rows.next())
rowsCount += 1
}
if(!singleRow) writer.write("]\\n")
else writer.write("\\n")
}
}
val processor = new Processor
processor.go(rows)
}
}
}
override protected def maybeSoda2FieldsHeader(schema: ExportDAO.CSchema): HttpServletResponse => Unit = {
writeSoda2FieldsHeader(schema)
}
}
|
socrata-platform/soda-fountain
|
soda-fountain-lib/src/main/scala/com/socrata/soda/server/export/JsonExporter.scala
|
Scala
|
apache-2.0
| 3,395 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.runtime
package rdd
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.apache.spark.Partitioner
import org.apache.spark.rdd.{ RDD, ShuffledRDD }
import com.asakusafw.runtime.value.IntOption
@RunWith(classOf[JUnitRunner])
class ConfluentSpecTest extends ConfluentSpec
class ConfluentSpec extends FlatSpec with SparkForAll {
import ConfluentSpec._
behavior of "Confluent"
it should "confluent rdds" in {
val rdd1 = sc.parallelize(0 until 100).map(i => ((i.toString, 0), i))
val rdd2 = sc.parallelize(0 until 100).flatMap(i => Seq(((i.toString, 1), i + 100), ((i.toString, 2), i + 200)))
val part = new GroupingPartitioner(2)
val ord = implicitly[Ordering[(String, Int)]]
val rdd3: RDD[((String, Int), Int)] =
sc.parallelize(0 until 100).flatMap(i => Seq(((i.toString, 4), i + 400), ((i.toString, 3), i + 300)))
.shuffle(part, Option(ord))
val confluented = sc.confluent(Seq(rdd1, rdd2, rdd3), part, Some(ord))
val (part0, part1) = (0 until 100).sortBy(_.toString).partition { i =>
val part = i.toString.hashCode % 2
(if (part < 0) part + 2 else part) == 0
}
assert(confluented.collect ===
(part0 ++ part1).flatMap(i => (0 until 5).map(j => ((i.toString, j), i + 100 * j))))
}
it should "confluent empty rdds" in {
val part = new GroupingPartitioner(2)
val ord = implicitly[Ordering[(String, Int)]]
val confluented = sc.confluent(Seq.empty, part, Some(ord))
assert(confluented.collect === Array.empty)
}
it should "confluent rdds of mutable values" in {
val part = new GroupingPartitioner(2)
val ord = implicitly[Ordering[(String, Int)]]
val rdd1: RDD[((String, Int), IntOption)] =
sc.parallelize(0 until 10).map(i => ((i.toString, 0), i))
.shuffle(part, Option(ord))
.mapPartitions(f, preservesPartitioning = true)
val rdd2: RDD[((String, Int), IntOption)] =
sc.parallelize(0 until 10).flatMap(i => Seq(((i.toString, 1), i + 10), ((i.toString, 2), i + 20)))
.shuffle(part, Option(ord))
.mapPartitions(f, preservesPartitioning = true)
val rdd3: RDD[((String, Int), IntOption)] =
sc.parallelize(0 until 10).flatMap(i => Seq(((i.toString, 4), i + 40), ((i.toString, 3), i + 30)))
.shuffle(part, Option(ord))
.mapPartitions(f, preservesPartitioning = true)
val confluented = sc.confluent(Seq(rdd1, rdd2, rdd3), part, Some(ord))
val (part0, part1) = (0 until 10).sortBy(_.toString).partition { i =>
val part = i.toString.hashCode % 2
(if (part < 0) part + 2 else part) == 0
}
assert(confluented.map {
case (k, v) => k -> v.get
}.collect ===
(part0 ++ part1).flatMap(i => (0 until 5).map(j => ((i.toString, j), i + 10 * j))))
}
}
object ConfluentSpec {
class GroupingPartitioner(val numPartitions: Int) extends Partitioner {
override def getPartition(key: Any): Int = {
val (group, _) = key.asInstanceOf[(String, Int)]
val part = group.hashCode % numPartitions
if (part < 0) part + numPartitions else part
}
}
def f: Iterator[((String, Int), Int)] => Iterator[((String, Int), IntOption)] = {
lazy val intOption = new IntOption()
{
_.map { value =>
intOption.modify(value._2)
(value._1, intOption)
}
}
}
}
|
asakusafw/asakusafw-spark
|
runtime/src/test/scala/com/asakusafw/spark/runtime/rdd/ConfluentSpec.scala
|
Scala
|
apache-2.0
| 4,025 |
//package io.youi.util
//
//import io.youi.component.{AbstractContainer, Component, Transform}
//import io.youi.drawable.Context
//import io.youi.{dom, ui}
//import org.scalajs.dom.{Event, document, html}
//
//import scala.concurrent.ExecutionContext.Implicits.global
//
//object DebugWindow {
// private lazy val root = dom.create[html.Div]("div")
// private var canvases = List.empty[html.Canvas]
//
// root.style.display = "none"
//
// def toggle(container: AbstractContainer): Unit = if (root.style.display != "block") {
// scribe.info(s"Showing $container debug information...")
// showFor(container)
// } else {
// scribe.info("Hiding debug information")
// close()
// }
//
// def showFor(container: AbstractContainer): Unit = {
// root.innerHTML = ""
// close()
// root.style.position = "absolute"
// root.style.width = s"${ui.width().toInt}px"
// root.style.height = s"${ui.height().toInt}px"
// root.style.zIndex = "999999"
// root.style.left = "0px"
// root.style.top = "0px"
// root.style.overflow = "auto"
// root.style.display = "block"
// root.style.backgroundColor = "white"
// document.body.appendChild(root)
// val parent = root
// AbstractContainer.children(container).foreach(drawChild(_, parent, container))
// }
//
// def close(): Unit = {
// canvases.foreach(CanvasPool.restore)
// root.style.display = "none"
// }
//
// private def drawChild(component: Component, parent: html.Element, container: AbstractContainer): Unit = {
// val canvas = CanvasPool(component.size.width() * ui.ratio, component.size.height() * ui.ratio)
// val context = new Context(canvas, ui.ratio)
//
// val heading = dom.create[html.Element]("h3")
// heading.innerHTML = s"${component.toString} (size: ${canvas.width}x${canvas.height}, parent: ${component.parent()})"
// parent.appendChild(heading)
//
// val actions = dom.create[html.Div]("div")
// val invalidate = dom.create[html.Button]("button")
// invalidate.innerHTML = "Invalidate"
// invalidate.addEventListener("click", (_: Event) => {
// close()
// component.invalidate().foreach { _ =>
// showFor(container)
// }
// })
// actions.appendChild(invalidate)
// parent.appendChild(actions)
//
// canvas.style.border = "1px solid black"
// canvas.style.width = s"${math.ceil(component.size.width())}px"
// canvas.style.height = s"${math.ceil(component.size.height())}px"
// canvas.width = math.ceil(component.size.width * ui.ratio).toInt
// canvas.height = math.ceil(component.size.height * ui.ratio).toInt
// component.draw(context, Transform.None)
// parent.appendChild(canvas)
// canvases = canvas :: canvases
//
// component match {
// case c: AbstractContainer => {
// val div = dom.create[html.Div]("div")
// div.style.marginLeft = "20px"
// div.style.border = "1px solid red"
// parent.appendChild(div)
// AbstractContainer.children(c).foreach(drawChild(_, div, container))
// }
// case _ => // Not a container
// }
// }
//}
|
outr/youi
|
app/js/src/main/scala/io/youi/util/DebugWindow.scala
|
Scala
|
mit
| 3,095 |
package playground
import scala.quoted._, scala.quoted.matching._
import scala.quoted.{given _}
import scala.tasty._
object macros {
inline def mcr(x: => Any) = ${mcrImpl('x)}
def mcrImpl(body: Expr[Any])(using ctx: QuoteContext) : Expr[Any] = {
import ctx.tasty.{_, given _}
body.unseal match { case Block(_, _) => '{2} }
}
}
|
som-snytt/dotty
|
tests/neg-macros/i6976/Macro_1.scala
|
Scala
|
apache-2.0
| 344 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer
import java.io._
import java.nio.ByteBuffer
import javax.annotation.concurrent.NotThreadSafe
import scala.reflect.ClassTag
import org.apache.spark.annotation.{DeveloperApi, Private}
import org.apache.spark.util.NextIterator
/**
* :: DeveloperApi ::
* A serializer. Because some serialization libraries are not thread safe, this class is used to
* create [[org.apache.spark.serializer.SerializerInstance]] objects that do the actual
* serialization and are guaranteed to only be called from one thread at a time.
*
* Implementations of this trait should implement:
*
* 1. a zero-arg constructor or a constructor that accepts a [[org.apache.spark.SparkConf]]
* as parameter. If both constructors are defined, the latter takes precedence.
*
* 2. Java serialization interface.
*
* @note Serializers are not required to be wire-compatible across different versions of Spark.
* They are intended to be used to serialize/de-serialize data within a single Spark application.
*/
@DeveloperApi
abstract class Serializer {
/**
* Default ClassLoader to use in deserialization. Implementations of [[Serializer]] should
* make sure it is using this when set.
*/
@volatile protected var defaultClassLoader: Option[ClassLoader] = None
/**
* Sets a class loader for the serializer to use in deserialization.
*
* @return this Serializer object
*/
def setDefaultClassLoader(classLoader: ClassLoader): Serializer = {
defaultClassLoader = Some(classLoader)
this
}
/** Creates a new [[SerializerInstance]]. */
def newInstance(): SerializerInstance
/**
* :: Private ::
* Returns true if this serializer supports relocation of its serialized objects and false
* otherwise. This should return true if and only if reordering the bytes of serialized objects
* in serialization stream output is equivalent to having re-ordered those elements prior to
* serializing them. More specifically, the following should hold if a serializer supports
* relocation:
*
* {{{
* serOut.open()
* position = 0
* serOut.write(obj1)
* serOut.flush()
* position = # of bytes writen to stream so far
* obj1Bytes = output[0:position-1]
* serOut.write(obj2)
* serOut.flush()
* position2 = # of bytes written to stream so far
* obj2Bytes = output[position:position2-1]
* serIn.open([obj2bytes] concatenate [obj1bytes]) should return (obj2, obj1)
* }}}
*
* In general, this property should hold for serializers that are stateless and that do not
* write special metadata at the beginning or end of the serialization stream.
*
* This API is private to Spark; this method should not be overridden in third-party subclasses
* or called in user code and is subject to removal in future Spark releases.
*
* See SPARK-7311 for more details.
*/
@Private
private[spark] def supportsRelocationOfSerializedObjects: Boolean = false
}
/**
* :: DeveloperApi ::
* An instance of a serializer, for use by one thread at a time.
*
* It is legal to create multiple serialization / deserialization streams from the same
* SerializerInstance as long as those streams are all used within the same thread.
*/
@DeveloperApi
@NotThreadSafe
abstract class SerializerInstance {
def serialize[T: ClassTag](t: T): ByteBuffer
def deserialize[T: ClassTag](bytes: ByteBuffer): T
def deserialize[T: ClassTag](bytes: ByteBuffer, loader: ClassLoader): T
def serializeStream(s: OutputStream): SerializationStream
def deserializeStream(s: InputStream): DeserializationStream
}
/**
* :: DeveloperApi ::
* A stream for writing serialized objects.
*/
@DeveloperApi
abstract class SerializationStream {
/** The most general-purpose method to write an object. */
def writeObject[T: ClassTag](t: T): SerializationStream
/** Writes the object representing the key of a key-value pair. */
def writeKey[T: ClassTag](key: T): SerializationStream = writeObject(key)
/** Writes the object representing the value of a key-value pair. */
def writeValue[T: ClassTag](value: T): SerializationStream = writeObject(value)
def flush(): Unit
def close(): Unit
def writeAll[T: ClassTag](iter: Iterator[T]): SerializationStream = {
while (iter.hasNext) {
writeObject(iter.next())
}
this
}
}
/**
* :: DeveloperApi ::
* A stream for reading serialized objects.
*/
@DeveloperApi
abstract class DeserializationStream {
/** The most general-purpose method to read an object. */
def readObject[T: ClassTag](): T
/** Reads the object representing the key of a key-value pair. */
def readKey[T: ClassTag](): T = readObject[T]()
/** Reads the object representing the value of a key-value pair. */
def readValue[T: ClassTag](): T = readObject[T]()
def close(): Unit
/**
* Read the elements of this stream through an iterator. This can only be called once, as
* reading each element will consume data from the input source.
*/
def asIterator: Iterator[Any] = new NextIterator[Any] {
override protected def getNext() = {
try {
readObject[Any]()
} catch {
case eof: EOFException =>
finished = true
null
}
}
override protected def close() {
DeserializationStream.this.close()
}
}
/**
* Read the elements of this stream through an iterator over key-value pairs. This can only be
* called once, as reading each element will consume data from the input source.
*/
def asKeyValueIterator: Iterator[(Any, Any)] = new NextIterator[(Any, Any)] {
override protected def getNext() = {
try {
(readKey[Any](), readValue[Any]())
} catch {
case eof: EOFException =>
finished = true
null
}
}
override protected def close() {
DeserializationStream.this.close()
}
}
}
|
sachintyagi22/spark
|
core/src/main/scala/org/apache/spark/serializer/Serializer.scala
|
Scala
|
apache-2.0
| 6,695 |
package edu.cmu.lti.nlp.amr.FastFeatureVector
import edu.cmu.lti.nlp.amr._
import edu.cmu.lti.nlp.amr.Train._
import java.lang.Math.abs
import java.lang.Math.log
import java.lang.Math.exp
import java.lang.Math.random
import java.lang.Math.floor
import java.lang.Math.min
import java.lang.Math.max
import scala.io.Source
import scala.util.matching.Regex
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import scala.math.sqrt
class Adagrad extends Optimizer[FeatureVector] {
def learnParameters(gradient: (Option[Int], Int, FeatureVector) => (FeatureVector, Double),
initialWeights: FeatureVector,
trainingSize: Int,
passes: Int,
stepsize: Double,
l2strength: Double,
noreg: List[String],
trainingObserver: (Int, FeatureVector) => Boolean,
avg: Boolean) : FeatureVector = {
val weights = FeatureVector(initialWeights.labelset)
weights += initialWeights
var avg_weights = FeatureVector(weights.labelset)
var sumSq = FeatureVector(weights.labelset) // G_{i,i}
var pass = 0
while (pass < passes && (pass == 0 || trainingObserver(pass,avg_weights))) {
logger(-1,"Pass "+(pass+1).toString)
var objective = 0.0 // objective is 1/N \\sum_i=1^N Loss(i) + 1/2 * \\lambda * ||weights||^2 (var objective is N times this)
//for (t <- Range(0, trainingSize).toList) {
for (t <- Random.shuffle(Range(0, trainingSize).toList)) {
// normally we would do weights -= stepsize * gradient(t)._1
// but instead we do this: (see equation 8 in SocherBauerManningNg_ACL2013.pdf)
val (grad, score) = gradient(Some(pass), t, weights)
//logger(1, "--- Gradient ---")
//logger(1, grad)
sumSq.update(grad, (feat, label, x , y) => x + y * y)
weights.update(grad, (feat, label, x, y) => {
val sq = sumSq(feat, label)
if (sq > 0.0) {
x - stepsize * y / sqrt(sumSq(feat, label))
} else {
x
}
})
objective += score
if (l2strength != 0.0) {
val noregSaveValues = noreg.map(feat => (feat, weights.fmap(feat)))
noreg.map(feat => weights.fmap.remove(feat))
objective += weights.dot(weights) / 2.0 // don't count the unregularized features in the regularizer
sumSq.update(weights, (feat, label, x , y) => x + l2strength * l2strength * y * y)
weights.update(weights, (feat, label, x, y) => {
val sq = sumSq(feat, label)
if (sq > 0.0) {
x - stepsize * l2strength * y / sqrt(sumSq(feat, label))
} else {
x
}
})
noregSaveValues.map(x => { weights.fmap(x._1) = x._2 })
}
}
logger(-1," Avg objective value last pass: "+(objective/trainingSize.toDouble).toString)
//logger(0," objective: "+((0 until trainingSize).map(x => gradient(None, x, weights)._2).sum/trainingSize).toString)
avg_weights += weights
pass += 1
}
trainingObserver(pass,avg_weights)
if(avg) { avg_weights } else { weights }
}
}
|
hopshackle/wordAlignment
|
src/edu/cmu/lti/nlp/amr/FastFeatureVector/Adagrad.scala
|
Scala
|
bsd-2-clause
| 3,897 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package types
/**
* @author Alexander Podkhalyuzin
* Date: 13.03.2008
*/
trait ScTypeProjection extends ScTypeElement with ScReferenceElement {
override protected val typeName = "TypeProjection"
def typeElement: ScTypeElement = findChildByClassScala(classOf[ScTypeElement])
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/base/types/ScTypeProjection.scala
|
Scala
|
apache-2.0
| 369 |
package io.ruben.minecraft.avatars
import java.util.logging.Level._
import io.ruben.minecraft.avatars.listeners.{AvatarListeners, PlayerListeners}
import org.bukkit.plugin.java.JavaPlugin
import DataAccess._
import driver.api._
import slick.jdbc.meta.MTable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.Success
import scala.util.Failure
/**
* Created by istar on 13/09/15.
*/
class AvatarsPlugin extends JavaPlugin {
override def onEnable(): Unit = {
//Setup user login listener
getServer.getPluginManager.registerEvents(PlayerListeners, this)
getServer.getPluginManager.registerEvents(AvatarListeners, this)
//Setup command listener
getCommand("avatars").setExecutor(Commands)
//Setup database
db.run(MTable.getTables(users.baseTableRow.tableName)).onComplete {
case Success(tables) =>
if(tables.isEmpty) {
getLogger.info("Creating tables for first time")
val setup: DBIO[Unit] = DBIO.seq(
(users.schema ++ avatars.schema ++ locations.schema).create
)
db.run(setup).andThen {
case _ => getLogger.info("Tables created")
}
}
else {
getLogger.info("Initialized storage found")
}
case Failure(f) =>
f.printStackTrace()
getLogger.log(SEVERE, "Couldn't read/write the database")
}
}
}
|
Istar-Eldritch/bukkit-avatars
|
src/main/scala/io/ruben/minecraft/avatars/AvatarsPlugin.scala
|
Scala
|
gpl-3.0
| 1,402 |
/* Taking three different letters from the 26 letters of the alphabet, character strings of length three can be formed.
Examples are 'abc', 'hat' and 'zyx'.
When we study these three examples we see that for 'abc' two characters come lexicographically after its neighbour to the left.
For 'hat' there is exactly one character that comes lexicographically after its neighbour to the left. For 'zyx' there are zero characters that come lexicographically after its neighbour to the left.
In all there are 10400 strings of length 3 for which exactly one character comes lexicographically after its neighbour to the left.
We now consider strings of n β€ 26 different characters from the alphabet.
For every n, p(n) is the number of strings of length n for which exactly one character comes lexicographically after its neighbour to the left.
What is the maximum value of p(n)? */
class Rational(numerator: BigInt, denominator: BigInt) {
val n = numerator / (gcd(numerator.abs, denominator.abs))
val d = denominator / (gcd(numerator.abs, denominator.abs))
def add(other: Rational): Rational = {
new Rational(
(n * other.d) + (other.n * d),
d * other.d
)
}
def sub(other: Rational): Rational = {
new Rational(
(n * other.d) - (other.n * d),
d * other.d
)
}
def mul(other: Rational): Rational = {
new Rational(
n * other.n,
d * other.d
)
}
def div(other: Rational): Rational = {
mul(new Rational(other.d, other.n))
}
def scale(scalar: BigInt): Rational = {
new Rational(n * scalar, d)
}
def gcd(a: BigInt, b: BigInt): BigInt = {
if (b == 0) {
a
} else {
gcd(b, a % b)
}
}
def isInt(): Boolean = {
d == 1
}
def toInt(): BigInt = {
n
}
override def toString(): String = {
if (d != 1) {
n + "/" + d
} else {
n + ""
}
}
}
def fromPair(nd: (Int, Int)): Rational = {
new Rational(BigInt(nd._1), BigInt(nd._2))
}
def rational(n: BigInt): Rational = {
new Rational(n, BigInt(1))
}
object Problem158 {
type MutableRatArray = scala.collection.mutable.ArrayBuffer[Rational]
type MutableArrayArray = scala.collection.mutable.ArrayBuffer[MutableRatArray]
def initArray(value: BigInt): MutableRatArRat = {
val array: MutableIntArray = new scala.collection.mutable.ArrayBuffer(26)
for (i <- 0 to 25) {
array += rational(value)
}
array
}
def initArrayArray(value: BigInt): MutableArrayArray = {
val array: MutableArrayArray = new scala.collection.mutable.ArrayBuffer(26)
for (i <- 0 to 25) {
array += initArray(value)
}
array
}
def countsForStringsOfLengthDEV(len: Int): (MutableArrayArray, MutableRatArray) = {
def sortedToSemisortedMultiplier(len: Int, i: Int, j: Int): Rational = {
if (len <= 2) {
rational(BigInt(1))
} else {
// (j < i) and (i onwards is strictly decreasing), so range
// of possible collisions is [(i-1)..0]
val collidableRangeSize = i - 1 + 1 // + 1 for the 0 at the end
val numCollidable = len - 2
// 1 - P(collision) is 1 - (numCollidable / collidableRangeSize)
new Rational(collidableRangeSize - numCollidable, collidableRangeSize)
}
}
if (len == 1) {
(initArray(0), initArray(1))
} else {
val (oldSemisorted, oldSorted) = countsForStringsOfLength(len - 1)
val (semisorted, sorted) = (initArrayArray(0), initArray(0))
// sorted -> semisorted
for (i <- 0 to 25) {
for (j <- 0 to (i - 1)) { // pre-pending j, so j < i to make i lexicographically after.
// multiply by proportion that don't have have j as a repeated char -- problem is that currently this seems to be done right but doesn't give the right answer
semisorted(j)(j) += oldSorted(i).mul(sortedToSemisortedMultiplier(len, i, j))
}
}
println()
// semisorted -> semisorted
for (i <- 0 to 25) {
for (j <- (i + 1) to 25) {
val y = oldSemisorted(i).mul(semisortedToSemisortedMultiplier(len, i, j))
val x = semisorted(j).add(y)
if (j == jj) {
println("semisorted(" + j + ") += oldSemisorted(" + i + "){" + oldSemisorted(i) + "} * " + semisortedToSemisortedMultiplier(len, i, j) + " {" + y + "} -> " + x)
}
semisorted(j) = semisorted(j).add(oldSemisorted(i).mul(semisortedToSemisortedMultiplier(len, i, j)))
}
}
println()
// sorted -> sorted
for (i <- 0 to 25) {
for (j <- (i + 1) to 25) {
sorted(j) = sorted(j).add(oldSorted(i))
}
}
(semisorted, sorted)
}
}
// "sorted" = increasing
// "semisorted" = increasing except for 1 elem
// ( starting elems of semisorted strings of that length
// , starting elems of sorted strings of that length
def countsForStringsOfLength(len: Int): (MutableRatArray, MutableRatArray) = {
def sortedToSemisortedMultiplier(len: Int, i: Int, j: Int): Rational = {
if (len <= 2) {
rational(BigInt(1))
} else {
// (j < i) and (i onwards is strictly decreasing), so range
// of possible collisions is [(i-1)..0]
val collidableRangeSize = i - 1 + 1 // + 1 for the 0 at the end
val numCollidable = len - 2
// 1 - P(collision) is 1 - (numCollidable / collidableRangeSize)
new Rational(collidableRangeSize - numCollidable, collidableRangeSize)
}
}
def semisortedToSemisortedMultiplier(len: Int, i: Int, j: Int): Rational = {
if (len <= 2) {
rational(BigInt(1))
} else {
// (j > i) and (i onwards is semisorted), so only 1 number
// after i appears
val rangeOfCollidingValue = 25 - i
new Rational(rangeOfCollidingValue - 1, rangeOfCollidingValue)
}
}
if (len == 1) {
(initArray(0), initArray(1))
} else {
val (oldSemisorted, oldSorted) = countsForStringsOfLength(len - 1)
val (semisorted, sorted) = (initArray(0), initArray(0))
// sorted -> semisorted
for (i <- 0 to 25) {
for (j <- 0 to (i - 1)) { // pre-pending j, so j < i to make i lexicographically after.
// multiply by proportion that don't have have j as a repeated char -- problem is that currently this seems to be done right but doesn't give the right answer
semisorted(j) = semisorted(j).add(oldSorted(i).mul(sortedToSemisortedMultiplier(len, i, j)))
}
}
// semisorted -> semisorted
for (i <- 0 to 25) {
for (j <- (i + 1) to 25) {
semisorted(j) = semisorted(j).add(oldSemisorted(i).mul(semisortedToSemisortedMultiplier(len, i, j)))
}
}
// sorted -> sorted
for (i <- 0 to 25) {
for (j <- (i + 1) to 25) {
sorted(j) = sorted(j).add(oldSorted(i))
}
}
(semisorted, sorted)
}
}
def numsemisortedStringsOfLength(len: Int): BigInt = {
countsForStringsOfLength(len)._1.map(_.toInt).sum
}
def main(args: Array[String]) {
}
}
|
bgwines/project-euler
|
src/in progress/problem158.scala
|
Scala
|
bsd-3-clause
| 7,105 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.util
import org.bdgenomics.avocado.AvocadoFunSuite
import org.bdgenomics.adam.models.SequenceDictionary
import org.bdgenomics.adam.rdd.variant.GenotypeRDD
import org.bdgenomics.formats.avro.{ Genotype, GenotypeAllele, Variant }
import scala.collection.JavaConversions._
case class TestRewriteHetsArgs(
var maxHetSnpAltAllelicFraction: Float = 0.75f,
var maxHetIndelAltAllelicFraction: Float = 0.65f,
var disableHetSnpRewriting: Boolean = false,
var disableHetIndelRewriting: Boolean = false) extends RewriteHetsArgs {
}
class RewriteHetsSuite extends AvocadoFunSuite {
def buildGt(ref: String,
alt: String,
gq: Int,
dp: Int,
adp: Int,
alleles: List[GenotypeAllele]): Genotype = {
Genotype.newBuilder
.setGenotypeQuality(gq)
.setVariant(Variant.newBuilder
.setReferenceAllele(ref)
.setAlternateAllele(alt)
.build)
.setAlleles(alleles)
.setReadDepth(dp)
.setAlternateReadDepth(adp)
.build
}
val goodHetSnp = buildGt("A", "T", 30, 30, 15,
List(GenotypeAllele.REF, GenotypeAllele.ALT))
val badHetSnp = buildGt("A", "T", 30, 30, 25,
List(GenotypeAllele.REF, GenotypeAllele.ALT))
val goodHetIndel = buildGt("AA", "T", 30, 50, 30,
List(GenotypeAllele.REF, GenotypeAllele.ALT))
val badHetIndel = buildGt("A", "TCG", 30, 20, 20,
List(GenotypeAllele.REF, GenotypeAllele.ALT, GenotypeAllele.ALT))
val homRefSnp = buildGt("A", "T", 30, 30, 3,
List(GenotypeAllele.REF, GenotypeAllele.REF))
val homRefIndel = buildGt("A", "TCC", 30, 50, 5,
List(GenotypeAllele.REF))
val homAltSnp = buildGt("A", "T", 30, 30, 29,
List(GenotypeAllele.ALT))
val homAltIndel = buildGt("A", "TCC", 30, 50, 45,
List(GenotypeAllele.ALT, GenotypeAllele.ALT))
def shouldRewrite(gt: Genotype,
snps: Boolean = true,
indels: Boolean = true): Boolean = {
RewriteHets.shouldRewrite(gt, 0.75f, 0.65f, snps, indels)
}
test("should rewrite a bad het snp") {
assert(shouldRewrite(badHetSnp))
}
test("should not rewrite het snp if snp filtering is disabled") {
assert(!shouldRewrite(badHetSnp, snps = false))
}
test("should rewrite a bad het indel") {
assert(shouldRewrite(badHetIndel))
}
test("should not rewrite het indel if indel filtering is disabled") {
assert(!shouldRewrite(badHetIndel, indels = false))
}
test("don't rewrite good het calls") {
assert(!shouldRewrite(goodHetSnp))
assert(!shouldRewrite(goodHetIndel))
}
test("don't rewrite homozygous calls") {
assert(!shouldRewrite(homRefSnp))
assert(!shouldRewrite(homRefIndel))
assert(!shouldRewrite(homAltSnp))
assert(!shouldRewrite(homAltIndel))
}
test("rewrite a het call as a hom alt snp") {
val rewrittenGt = RewriteHets.rewriteGenotype(badHetSnp)
assert(rewrittenGt.getVariant.getReferenceAllele === "A")
assert(rewrittenGt.getVariant.getAlternateAllele === "T")
assert(Option(rewrittenGt.getGenotypeQuality).isEmpty)
assert(rewrittenGt.getAlleles.length === 2)
assert(rewrittenGt.getAlleles.get(0) === GenotypeAllele.ALT)
assert(rewrittenGt.getAlleles.get(1) === GenotypeAllele.ALT)
assert(rewrittenGt.getReadDepth === 30)
assert(rewrittenGt.getAlternateReadDepth === 25)
}
def processGenotype(gt: Genotype,
snps: Boolean = true,
indels: Boolean = true): Genotype = {
RewriteHets.processGenotype(gt, 0.75f, 0.65f, snps, indels)
}
test("processing a valid call should not change the call") {
val goodCalls = Seq(goodHetSnp, goodHetIndel,
homRefSnp, homRefIndel,
homAltSnp, homAltIndel)
goodCalls.foreach(gt => assert(gt === processGenotype(gt)))
}
test("if processing is disabled, don't rewrite bad calls") {
val badCalls = Seq(badHetSnp, badHetIndel)
badCalls.foreach(gt => assert(gt === processGenotype(gt,
snps = false,
indels = false)))
}
test("process a bad het snp call") {
val rewrittenGt = processGenotype(badHetSnp)
assert(rewrittenGt != badHetSnp)
assert(rewrittenGt.getVariant.getReferenceAllele === "A")
assert(rewrittenGt.getVariant.getAlternateAllele === "T")
assert(Option(rewrittenGt.getGenotypeQuality).isEmpty)
assert(rewrittenGt.getAlleles.length === 2)
assert(rewrittenGt.getAlleles.get(0) === GenotypeAllele.ALT)
assert(rewrittenGt.getAlleles.get(1) === GenotypeAllele.ALT)
assert(rewrittenGt.getReadDepth === 30)
assert(rewrittenGt.getAlternateReadDepth === 25)
}
test("process a bad het indel call") {
val rewrittenGt = processGenotype(badHetIndel)
assert(rewrittenGt != badHetIndel)
assert(rewrittenGt.getVariant.getReferenceAllele === "A")
assert(rewrittenGt.getVariant.getAlternateAllele === "TCG")
assert(Option(rewrittenGt.getGenotypeQuality).isEmpty)
assert(rewrittenGt.getAlleles.length === 3)
assert(rewrittenGt.getAlleles.get(0) === GenotypeAllele.ALT)
assert(rewrittenGt.getAlleles.get(1) === GenotypeAllele.ALT)
assert(rewrittenGt.getAlleles.get(2) === GenotypeAllele.ALT)
assert(rewrittenGt.getReadDepth === 20)
assert(rewrittenGt.getAlternateReadDepth === 20)
}
val genotypes = Seq(badHetSnp, badHetIndel,
goodHetSnp, goodHetIndel,
homRefSnp, homRefIndel,
homAltSnp, homAltIndel)
def gtRdd: GenotypeRDD = {
val rdd = sc.parallelize(genotypes)
GenotypeRDD(rdd,
SequenceDictionary.empty,
Seq.empty,
Seq.empty)
}
sparkTest("disable processing for a whole rdd") {
val rewrittenRdd = RewriteHets(gtRdd,
TestRewriteHetsArgs(disableHetSnpRewriting = true,
disableHetIndelRewriting = true))
val newGts = rewrittenRdd.rdd.collect
val oldGtSet = genotypes.toSet
newGts.foreach(gt => assert(oldGtSet(gt)))
}
sparkTest("process a whole rdd") {
val rewrittenRdd = RewriteHets(gtRdd, TestRewriteHetsArgs())
val newGts = rewrittenRdd.rdd.collect
val oldGtSet = genotypes.toSet
val (touchedGts, untouchedGts) = newGts.partition(_.getGenotypeQuality == null)
assert(untouchedGts.size === 6)
assert(touchedGts.size === 2)
untouchedGts.foreach(gt => assert(oldGtSet(gt)))
touchedGts.foreach(gt => assert(gt.getAlleles.forall(_ == GenotypeAllele.ALT)))
}
}
|
heuermh/bdg-avocado
|
avocado-core/src/test/scala/org/bdgenomics/avocado/util/RewriteHetsSuite.scala
|
Scala
|
apache-2.0
| 7,195 |
package com.socrata.spandex.secondary
import com.rojoma.simplearm._
import com.socrata.datacoordinator.secondary._
import com.socrata.datacoordinator.util.collection.ColumnIdMap
import com.socrata.soql.types.{SoQLText, SoQLType, SoQLValue}
import com.socrata.spandex.common.client._
class ResyncHandler(
client: SpandexElasticSearchClient,
batchSize: Int,
maxValueLength: Int,
refresh: RefreshPolicy = Eventually)
extends SecondaryEventLogger {
def go(datasetInfo: DatasetInfo,
copyInfo: CopyInfo,
schema: ColumnIdMap[ColumnInfo[SoQLType]],
rows: Managed[Iterator[ColumnIdMap[SoQLValue]]]): Unit = {
logResync(datasetInfo.internalName, copyInfo.copyNumber)
// Add dataset copy
client.putDatasetCopy(
datasetInfo.internalName,
copyInfo.copyNumber,
copyInfo.dataVersion,
copyInfo.lifecycleStage,
refresh)
// Add column maps for text columns
val textColumns =
schema.toSeq.collect { case (id, info) if info.typ == SoQLText =>
ColumnMap(datasetInfo.internalName, copyInfo.copyNumber, info)
}
textColumns.foreach(client.putColumnMap(_))
// Delete all existing column values
// Wait for these delete operations to be refreshed before continuing
client.deleteColumnValuesByCopyNumber(datasetInfo.internalName, copyInfo.copyNumber, refresh = BeforeReturning)
// Add/update column values for each row
insertRows(datasetInfo, copyInfo, schema, rows)
}
private def insertRows(
datasetInfo: DatasetInfo,
copyInfo: CopyInfo,
schema: ColumnIdMap[ColumnInfo[SoQLType]],
rows: Managed[Iterator[ColumnIdMap[SoQLValue]]]) = {
// Add column values for text columns
rows.foreach { iter =>
val columnValues = for {
row <- iter
(id, value: SoQLText) <- row.iterator
} yield {
ColumnValue.fromDatum(datasetInfo.internalName, copyInfo.copyNumber, (id, value), maxValueLength)
}
val datasetId = datasetInfo.internalName
val copyNumber = copyInfo.copyNumber
columnValues.grouped(batchSize).foreach { batch =>
client.putColumnValues(datasetId, copyNumber,
ColumnValue.aggregate(batch).toList, refresh)
}
}
}
}
|
socrata-platform/spandex
|
spandex-secondary/src/main/scala/com.socrata.spandex.secondary/ResyncHandler.scala
|
Scala
|
apache-2.0
| 2,261 |
package com.themillhousegroup.edn.test
object CaseClassFixtures {
// Basic case classes:
case class AllStrings(bish: String, bash: String, bosh: String)
case class OptionalStrings(bish: String, bash: Option[String], bosh: String)
case class AllLongs(bash: Option[Long], bosh: Long)
case class IntsNotLongs(bash: Option[Int], bosh: Int)
case class MixedBunch(bish: String, bash: Option[Int], bosh: Int)
// Basic case classes with collections as members:
case class BasicWithList(bish: String, bash: List[String], bosh: Int)
case class BasicWithSet(bish: String, bash: Set[String], bosh: Int)
case class BasicWithMap(bish: String, bash: Map[String, Int], bosh: Int)
// Case classes with another level of case classes within
case class NestedJustOnce(contents: AllStrings)
case class NestedWithFields(contents: AllStrings, a: Int, b: Int)
case class NestedOptionally(contents: Option[AllStrings])
// Multiply-nested case class
case class StringsAllTheWayDown(first: AllStrings, second: Option[AllStrings])
case class ThreeLevelsDeep(x: Int, y: Int, nest: Option[StringsAllTheWayDown])
}
|
themillhousegroup/edn-scala
|
src/test/scala/com/themillhousegroup/edn/test/CaseClassFixtures.scala
|
Scala
|
gpl-2.0
| 1,124 |
package propertynder.ml
import java.io.File
import breeze.linalg._
import propertynder.ml.Transform._
object CSVLoader {
case class TrainingSet(examples: DenseMatrix[Double], labels: DenseVector[Double])
private def mkTrainingSet(f: File, skip: Option[Int])(exampleTransformation: DenseMatrix[Double] => DenseMatrix[Double]) = {
val mat = skip.fold(csvread(f, ','))(l => csvread(f, ',', skipLines = l))
val labels = mat(::, -1)
val noLabels = mat(::, 0 to -2)
val examples: DenseMatrix[Double] = exampleTransformation(noLabels)
TrainingSet(examples, labels)
}
/**
* Load a CSV file representing the training set,
* with the last column denoting labels.
* It also adds the bias column at the beginning
*/
def loadLinear(f: File, skip: Option[Int] = None): TrainingSet =
mkTrainingSet(f, skip)(addBias)
/**
* Load a CSV file representing the training set,
* with the last column denoting labels.
* If the training set has the parameter x1, x2, ..., xn, this will create
* a training set with the shape:
* 1, x1 ** 2, x1 * x2, ..., x1 * xn, x2 ** 2, ..., x2 * xn, ..., xn ** 2, x1, x2, ..., xn
* (note the bias at the first column)
* So a a training set with n parameters will generate another training set with 1 + n * (n + 1) / 2 + n parameters
*/
def loadQuadratic(f: File, skip: Option[Int] = None): TrainingSet = {
mkTrainingSet(f, skip) { noLabels =>
val n = noLabels.cols
val m = noLabels.rows
// 1 bias + n * (n + 1) / 2 quadratic terms + n linear terms
val empty = DenseMatrix.zeros[Double](0, 1 + n * (n + 1) / 2 + n)
(0 until m).foldLeft(empty) { case (accRes, i) =>
val row = noLabels(i, ::)
val newResultRow = toQuadraticParameters(row.t).toDenseMatrix
DenseMatrix.vertcat(accRes, newResultRow)
}
}
}
}
|
ostapneko/propertynder
|
src/main/scala/propertynder/ml/CSVLoader.scala
|
Scala
|
mit
| 1,877 |
/*
* Copyright 2010-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package http
package testing
import net.liftweb.util.Helpers._
import net.liftweb.common.{ Box, Full, Empty, Failure}
import net.liftweb.util.{Helpers}
/*
* The purpose of these classes is not to run actual tests,
* but to insure that tests can be run correctly by
* making sure they compile correctly
*/
object MyCode extends TestKit {
val baseUrl = ""
val l2: TestResponse = post("/foo")
l2.foreach {
x: HttpResponse =>
val l3: TestResponse = x.get("ddd")
println("Hello")
}
for {
login <- post("/whatever")
next <- login.get("/bla")
} {}
}
object MyBoxCode extends RequestKit {
def baseUrl = ""
val l2: Box[TheResponse] = post("/foo")
l2.foreach {
x: TheResponse =>
val l3: Box[TheResponse] = x.get("ddd")
println("Hello")
}
for {
login: TheResponse <- post("/whatever")
next <- login.get("/bla")
} {}
}
|
lzpfmh/framework-2
|
web/testkit/src/test/scala/net/liftweb/http/testing/TestObjects.scala
|
Scala
|
apache-2.0
| 1,537 |
package com.alanjz.microstrike.gear
class Incendiary {
}
|
spacenut/microstrike
|
src/com/alanjz/microstrike/gear/Incendiary.scala
|
Scala
|
gpl-2.0
| 59 |
package com.twitter.finagle.netty4.proxy
import com.twitter.finagle.ProxyConnectException
import io.netty.buffer.{ByteBuf, Unpooled}
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.handler.proxy.{ProxyHandler, ProxyConnectException => NettyProxyConnectException}
import io.netty.util.concurrent.{Future, GenericFutureListener}
import org.scalatest.{FunSuite, OneInstancePerTest}
import java.net.InetSocketAddress
class Netty4ProxyConnectHandlerTest extends FunSuite with OneInstancePerTest {
val fakeAddress = InetSocketAddress.createUnresolved("proxy", 0)
class FakeProxyHandler extends ProxyHandler(fakeAddress) {
private[this] var removedDecoder = false
private[this] var removedEncder = false
def removedCodec: Boolean = removedDecoder && removedEncder
override def removeDecoder(ctx: ChannelHandlerContext): Unit = {
removedDecoder = true
}
override def removeEncoder(ctx: ChannelHandlerContext): Unit = {
removedEncder = true
}
override def protocol(): String = "proxy"
override def authScheme(): String = "auth"
override def addCodec(ctx: ChannelHandlerContext): Unit = ()
override def handleResponse(ctx: ChannelHandlerContext, response: Any): Boolean = true
override def newInitialMessage(ctx: ChannelHandlerContext): AnyRef =
Unpooled.wrappedBuffer("connect".getBytes())
}
val (handler, fakeHandler, channel) = {
val fh = new FakeProxyHandler
val hd = new Netty4ProxyConnectHandler(fh)
val ch = new EmbeddedChannel(hd)
(hd, fh, ch)
}
test("canceled before completed connection") {
val connectPromise = channel.connect(fakeAddress)
channel.writeOutbound("foo")
channel.readOutbound[ByteBuf]().release() // drops the proxy handshake message
assert(!connectPromise.isDone)
assert(connectPromise.cancel(true))
assert(!channel.isActive)
intercept[NettyProxyConnectException] { channel.finishAndReleaseAll() }
}
test("success") {
val promise = channel.connect(fakeAddress)
assert(!promise.isDone)
channel.writeOutbound("foo")
channel.readOutbound[ByteBuf]().release() // drops the proxy handshake message
assert(channel.readOutbound[Any]() == null)
promise.addListener(new GenericFutureListener[Future[Any]] {
def operationComplete(future: Future[Any]): Unit =
// The codec should be already removed when connect promise is satsfied.
// See https://github.com/netty/netty/issues/6671
assert(fakeHandler.removedCodec)
})
channel.writeInbound(Unpooled.wrappedBuffer("connected".getBytes))
assert(promise.isDone)
assert(channel.readOutbound[String]() == "foo")
assert(channel.pipeline().get(classOf[Netty4ProxyConnectHandler]) == null)
assert(!channel.finishAndReleaseAll())
}
test("failure") {
val promise = channel.connect(fakeAddress)
assert(!promise.isDone)
channel.writeOutbound("foo")
channel.readOutbound[ByteBuf]().release() // drops the proxy handshake message
assert(channel.readOutbound[String]() == null)
channel.pipeline().fireExceptionCaught(new Exception())
intercept[Exception](channel.checkException())
assert(promise.isDone)
assert(promise.cause.isInstanceOf[ProxyConnectException])
assert(!channel.finishAndReleaseAll())
}
}
|
luciferous/finagle
|
finagle-netty4/src/test/scala/com/twitter/finagle/netty4/proxy/Netty4ProxyConnectHandlerTest.scala
|
Scala
|
apache-2.0
| 3,384 |
package com.outr.arango
import com.outr.arango.api.model.GetAPIDatabaseNew
import com.outr.arango.api.{APIDatabase, APIDatabaseDatabaseName}
import com.outr.arango.model.ArangoResponse
import io.youi.net.Path
import profig.JsonUtil
import scala.concurrent.{ExecutionContext, Future}
class SystemDatabase(db: ArangoDB) extends ArangoDatabase(db, db.client.path(Path.parse(s"/_db/_system")), "_system") {
def create(databaseName: String)(implicit ec: ExecutionContext): Future[ArangoResponse[Boolean]] = {
APIDatabase.post(client, GetAPIDatabaseNew(
name = databaseName
)).map(json => JsonUtil.fromJson[ArangoResponse[Boolean]](json))
// TODO: Support setting user
}
def drop(databaseName: String)(implicit ec: ExecutionContext): Future[ArangoResponse[Boolean]] = {
APIDatabaseDatabaseName.delete(client, databaseName).map(json => JsonUtil.fromJson[ArangoResponse[Boolean]](json))
}
}
|
outr/arangodb-scala
|
driver/src/main/scala/com/outr/arango/SystemDatabase.scala
|
Scala
|
mit
| 917 |
package mr.merc.ui.world
import mr.merc.local.Localization
import mr.merc.map.hex.TerrainHexField
import mr.merc.ui.common.CanvasLayers
import mr.merc.ui.minimap.Minimap
import scalafx.scene.layout.Pane
import scalafx.application.Platform
class WorldInterfacePane(frame: WorldFrame, val worldCanvas: CanvasLayers, terrainField: TerrainHexField, factor:Double, pixelWidth:Int, pixelHeight:Int) extends Pane {
private val emptyPane = new Pane with WorldInterfaceNode
private val minimapChild = new Minimap(terrainField, worldCanvas, factor, pixelWidth, pixelHeight, false)
private val minimap: Pane = new MinimapParent(minimapChild)
children = List(worldCanvas, minimap, emptyPane)
worldCanvas.prefWidth <== this.width - minimap.width
worldCanvas.prefHeight <== this.height
layoutX = 0
layoutY = 0
rebindMinimapAndEndTurn(false)
def rebindMinimapAndEndTurn(minimapCollapsed: Boolean): Unit = {
if (minimapCollapsed) {
minimap.layoutX <== this.width - this.width / 5
minimap.layoutY <== this.height
minimap.prefWidth <== this.width / 4
minimap.prefHeight <== this.width / 5
} else {
minimap.layoutX <== this.width - this.width / 4
minimap.layoutY <== this.height - this.width / 5
minimap.prefWidth <== this.width / 4
minimap.prefHeight <== this.width / 5
}
emptyPane.layoutX <== this.width - this.width / 4
emptyPane.layoutY = 0
emptyPane.prefWidth <== this.width / 4
emptyPane.prefHeight <== this.height - this.width / 5
}
private var rightTopPanel: Option[Pane] = None
private var facePanel: Option[Pane] = None
private var fullPanel: Option[Pane] = None
def setRightTopPanel(pane: Pane): Unit = {
removeRightTopPanel()
rightTopPanel = Some(pane)
children.add(pane)
pane.layoutX <== this.width - this.width / 4
pane.layoutY = 0
pane.prefWidth <== this.width / 4
pane.prefHeight <== this.height - this.width / 5
pane.requestFocus()
}
def removeRightTopPanel(): Unit = {
rightTopPanel.foreach { p =>
this.children.remove(p)
}
rightTopPanel = None
}
def removeFacePanel(): Unit ={
facePanel.foreach { p =>
this.children.remove(p)
}
facePanel = None
}
def removeFullPanel(): Unit = {
fullPanel.foreach { p =>
this.children.remove(p)
}
fullPanel = None
}
def showMinimap(): Unit = {
rebindMinimapAndEndTurn(false)
}
def hideMinimap(): Unit = {
rebindMinimapAndEndTurn(true)
}
def refreshMinimap(field:TerrainHexField): Unit = {
minimapChild.terrainHexField = field
minimapChild.refreshMapCanvas()
}
def setFacePanel(pane: Pane): Unit = {
removeFacePanel()
facePanel = Some(pane)
children.add(pane)
pane.layoutX = 0
pane.layoutY = 0
pane.prefWidth <== this.width * 3 / 4
pane.prefHeight <== this.height
pane.requestFocus()
}
def setFullPanel(pane: Pane): Unit = {
removeFullPanel()
fullPanel = Some(pane)
children.add(pane)
pane.layoutX = 0
pane.layoutY = 0
pane.prefWidth <== this.width
pane.prefHeight <== this.height
pane.requestFocus()
}
def minimapPos: (Double, Double) = (worldCanvas.hvalue.value, worldCanvas.vvalue.value)
def minimapPos_=(c:(Double, Double)): Unit = {
worldCanvas.hvalue.value = c._2
worldCanvas.vvalue.value = c._1
Platform.runLater {
minimapChild.updatePositionOnMinimap()
}
}
}
|
RenualdMarch/merc
|
src/main/scala/mr/merc/ui/world/WorldInterfacePane.scala
|
Scala
|
gpl-3.0
| 3,461 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib
import org.apache.log4j.{Level, Logger}
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors
/**
* An example k-means app. Run with
* {{{
* ./bin/spark-example org.apache.spark.examples.mllib.DenseKMeans [options] <input>
* }}}
* If you use it as a template to create your own app, please use `spark-submit` to submit your app.
*/
object DenseKMeans {
object InitializationMode extends Enumeration {
type InitializationMode = Value
val Random, Parallel = Value
}
import InitializationMode._
case class Params(
input: String = null,
k: Int = -1,
numIterations: Int = 10,
initializationMode: InitializationMode = Parallel)
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("DenseKMeans") {
head("DenseKMeans: an example k-means app for dense data.")
opt[Int]('k', "k")
.required()
.text(s"number of clusters, required")
.action((x, c) => c.copy(k = x))
opt[Int]("numIterations")
.text(s"number of iterations, default; ${defaultParams.numIterations}")
.action((x, c) => c.copy(numIterations = x))
opt[String]("initMode")
.text(s"initialization mode (${InitializationMode.values.mkString(",")}), " +
s"default: ${defaultParams.initializationMode}")
.action((x, c) => c.copy(initializationMode = InitializationMode.withName(x)))
arg[String]("<input>")
.text("input paths to examples")
.required()
.action((x, c) => c.copy(input = x))
}
parser.parse(args, defaultParams).map { params =>
run(params)
}.getOrElse {
sys.exit(1)
}
}
def run(params: Params) {
val conf = new SparkConf().setAppName(s"DenseKMeans with $params")
val sc = new SparkContext(conf)
Logger.getRootLogger.setLevel(Level.WARN)
val examples = sc.textFile(params.input).map { line =>
Vectors.dense(line.split(' ').map(_.toDouble))
}.cache()
val numExamples = examples.count()
println(s"numExamples = $numExamples.")
val initMode = params.initializationMode match {
case Random => KMeans.RANDOM
case Parallel => KMeans.K_MEANS_PARALLEL
}
val model = new KMeans()
.setInitializationMode(initMode)
.setK(params.k)
.setMaxIterations(params.numIterations)
.run(examples)
val cost = model.computeCost(examples)
println(s"Total cost = $cost.")
sc.stop()
}
}
|
adobe-research/spark-cluster-deployment
|
initial-deployment-puppet/modules/spark/files/spark/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
|
Scala
|
apache-2.0
| 3,440 |
import org.apache.spark.sql.types._
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.sql.streaming.Trigger
import scala.concurrent.duration._
// load the trained model we saved in the previous step
val datadir = "/user/pliu/CMAPSS/"
val model = PipelineModel.load(datadir + "gbtmodel")
val kafkaBrokers = "{{your Kafka brokers}}"
val kafkaTopic = "kafkalab"
val delim = ","
val maxOffsetPerTrigger = 50
val triggerInterval = "10 seconds"
val dfraw = spark.readStream.
format("kafka").
option("kafka.bootstrap.servers", kafkaBrokers).
option("subscribe", kafkaTopic).
option("startingOffsets", "earliest").
option("maxOffsetsPerTrigger", maxOffsetPerTrigger).
load
val dftyped = dfraw.
select($"key".cast(StringType).alias("id"), $"value".cast(StringType).alias("val")).
withColumn("_tmp", split($"val", delim)).
select(
$"id",
$"_tmp".getItem(0).cast(IntegerType).alias("cycle"),
$"_tmp".getItem(3).cast(FloatType).alias("s9"),
$"_tmp".getItem(4).cast(FloatType).alias("s11"),
$"_tmp".getItem(5).cast(FloatType).alias("s14"),
$"_tmp".getItem(6).cast(FloatType).alias("s15")
).drop("_tmp")
val df = dftyped.withColumn("ts", current_timestamp)
val predictions = model.transform(df)
val query = predictions.select($"ts", $"id", $"cycle", $"RUL_Pred").writeStream.
format("console").
option("truncate", false).
trigger(Trigger.ProcessingTime(Duration(triggerInterval))).
start
|
liupeirong/Azure
|
IoTKafkaSpark/6.Predict/streamingPredict.scala
|
Scala
|
mit
| 1,473 |
import scala.tools.partest._
import scala.tools.nsc._
object Test extends DirectTest {
override def extraSettings: String = "-usejavacp -nowarn -Ystop-after:typer"
override def code = "" // not used
def code1 = """
package object p1 {
trait A
object A
}
"""
def code2 = """
package object p2 {
class A
object A
}
"""
def code3 = """
package object p3 {
object A
trait A
}
"""
def code4 = """
package object p4 {
object A
trait A
}
"""
def show() {
val global = newCompiler()
import global._
def typecheck(code: String): Unit = {
val r = new Run
val sourceFile = newSources(code).head
global.reporter.reset()
r.compileSources(sourceFile :: Nil)
assert(!global.reporter.hasErrors)
}
def typecheckTwice(code: String): Unit = {
typecheck(code)
typecheck(code)
}
// was: illegal cyclic reference involving package ...
Seq(code1, code2, code3, code4) foreach typecheckTwice
}
}
|
yusuke2255/dotty
|
tests/pending/run/t8029.scala
|
Scala
|
bsd-3-clause
| 996 |
import scala.quoted._
object E {
inline def eval[T](inline x: E[T]): T = ${ impl('x) }
def impl[T: Type](expr: Expr[E[T]]) (using QuoteContext): Expr[T] =
expr.unliftOrError.lift
implicit def ev1[T: Type]: Unliftable[E[T]] = new Unliftable { // TODO use type class derivation
def apply(x: Expr[E[T]]) (using QuoteContext): Option[E[T]] = (x match {
case '{ I(${Const(n)}) } => Some(I(n))
case '{ D(${Const(n)}) } => Some(D(n))
case '{ Plus[Int](${Value(x)}, ${Value(y)})(using $op) } => Some(Plus(x, y)(using Plus2.IPlus))
case '{ Plus[Double](${Value(x)}, ${Value(y)})(using $op) } => Some(Plus(x, y)(using Plus2.DPlus))
case '{ Times[Int](${Value(x)}, ${Value(y)})(using $op) } => Some(Times(x, y)(using Times2.ITimes))
case '{ Times[Double](${Value(x)}, ${Value(y)})(using $op) } => Some(Times(x, y)(using Times2.DTimes))
case _ => None
}).asInstanceOf[Option[E[T]]]
}
object Value {
def unapply[T, U >: T](expr: Expr[T])(using Unliftable[U], QuoteContext): Option[U] = expr.unlift
}
}
trait E[T] {
def lift (using QuoteContext): Expr[T]
}
case class I(n: Int) extends E[Int] {
def lift (using QuoteContext): Expr[Int] = Expr(n)
}
case class D(n: Double) extends E[Double] {
def lift (using QuoteContext): Expr[Double] = Expr(n)
}
case class Plus[T](x: E[T], y: E[T])(implicit op: Plus2[T]) extends E[T] {
def lift (using QuoteContext): Expr[T] = op(x.lift, y.lift)
}
case class Times[T](x: E[T], y: E[T])(implicit op: Times2[T]) extends E[T] {
def lift (using QuoteContext): Expr[T] = op(x.lift, y.lift)
}
trait Op2[T] {
def apply(x: Expr[T], y: Expr[T]) (using QuoteContext): Expr[T]
}
trait Plus2[T] extends Op2[T]
object Plus2 {
implicit case object IPlus extends Plus2[Int] {
def apply(x: Expr[Int], y: Expr[Int]) (using QuoteContext): Expr[Int] = '{$x + $y}
}
implicit case object DPlus extends Plus2[Double] {
def apply(x: Expr[Double], y: Expr[Double]) (using QuoteContext): Expr[Double] = '{$x + $y}
}
}
trait Times2[T] extends Op2[T]
object Times2 {
implicit case object ITimes extends Times2[Int] {
def apply(x: Expr[Int], y: Expr[Int]) (using QuoteContext): Expr[Int] = '{$x * $y}
}
implicit case object DTimes extends Times2[Double] {
def apply(x: Expr[Double], y: Expr[Double]) (using QuoteContext): Expr[Double] = '{$x * $y}
}
}
|
som-snytt/dotty
|
tests/run-macros/inline-macro-staged-interpreter/Macro_1.scala
|
Scala
|
apache-2.0
| 2,379 |
package pl.pholda.malpompaaligxilo.dsl
object LogixExprParserJVMTest extends LogicExprParserTest {
override def testForm: TestForm = TestFormJVM
}
|
pholda/MalpompaAligxilo
|
dsl/jvm/src/test/scala/pl/pholda/malpompaaligxilo/dsl/LogixExprParserJVMTest.scala
|
Scala
|
gpl-3.0
| 150 |
package com.scalaAsm.x86
package Instructions
package General
// Description: Call to Interrupt Procedure
// Category: general/breakstack
trait INT1 extends InstructionDefinition {
val mnemonic = "INT1"
}
object INT1 extends ZeroOperands[INT1] with INT1Impl
trait INT1Impl extends INT1 {
implicit object _0 extends NoOp{
val opcode: OneOpcode = 0xF1
override def hasImplicitOperand = true
}
}
|
bdwashbu/scala-x86-inst
|
src/main/scala/com/scalaAsm/x86/Instructions/General/INT1.scala
|
Scala
|
apache-2.0
| 415 |
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package scalaguide.advanced.extending
import play.api.test._
object ScalaPlugins extends PlaySpecification {
"scala plugins" should {
"allow accessing plugins" in {
val app = FakeApplication(additionalPlugins = Seq(classOf[MyPlugin].getName))
var mc: MyComponent = null
running(app) {
//#access-plugin
import play.api.Play
import play.api.Play.current
val myComponent = Play.application.plugin[MyPlugin]
.getOrElse(throw new RuntimeException("MyPlugin not loaded"))
.myComponent
//#access-plugin
myComponent.started must beTrue
mc = myComponent
}
mc.stopped must beTrue
}
"allow the actors example to work" in {
val app = FakeApplication(additionalPlugins = Seq(classOf[Actors].getName))
running(app) {
import scala.concurrent.duration._
import akka.pattern.ask
implicit def timeout = 20.seconds
await(Actors.myActor ? "hi") must_== "hi"
}
}
}
}
//#my-plugin
//###insert: package plugins
import play.api.{Plugin, Application}
class MyPlugin extends Plugin {
val myComponent = new MyComponent()
override def onStart() = {
myComponent.start()
}
override def onStop() = {
myComponent.stop()
}
override def enabled = true
}
//#my-plugin
class MyComponent() {
var started: Boolean = false
var stopped: Boolean = false
def start() = started = true
def stop() = stopped = true
}
//#actor-example
//###insert: package actors
import play.api._
import play.api.libs.concurrent.Akka
import akka.actor._
import javax.inject.Inject
class Actors @Inject() (implicit app: Application) extends Plugin {
lazy val myActor = Akka.system.actorOf(MyActor.props, "my-actor")
}
object Actors {
def myActor: ActorRef = Play.current.plugin[Actors]
.getOrElse(throw new RuntimeException("Actors plugin not loaded"))
.myActor
}
//#actor-example
object MyActor {
def props = Props(classOf[MyActor])
}
class MyActor extends Actor {
def receive = {
case msg => sender ! msg
}
}
|
jyotikamboj/container
|
pf-documentation/manual/working/scalaGuide/advanced/extending/code/ScalaPlugins.scala
|
Scala
|
mit
| 2,163 |
package odfi.server.manager.ui
class WelcomeView extends ODFIBaseUI {
this.pageContent {
h1("Welcome") {
}
}
}
|
richnou/odfi-manager
|
server/src/main/scala/odfi/server/manager/ui/WelcomeView.scala
|
Scala
|
lgpl-3.0
| 136 |
package vggames.regex
import vggames.shared.task.JudgedTask
import vggames.shared.task.Task
class Match(matchingTargets : MatcherTargets) extends Task {
def judge(challenge : String) : JudgedTask = new Regex(challenge).matchAll(matchingTargets).judgment();
def challenge : String = "Qual RegEx reconhece " + matchingTargets.asHtml() + "?"
def resource = ""
}
|
vidageek/games
|
games/regex/src/main/scala/vggames/regex/Match.scala
|
Scala
|
gpl-3.0
| 370 |
package mesosphere.marathon
package raml
import org.apache.mesos.{Protos => Mesos}
import scala.collection.immutable.Map
trait EnvVarConversion {
implicit val envVarRamlWrites: Writes[Map[String, state.EnvVarValue], Map[String, EnvVarValueOrSecret]] =
Writes {
_.mapValues {
case (state.EnvVarString(v)) => EnvVarValue(v)
case (state.EnvVarSecretRef(secret: String)) => EnvVarSecret(secret)
}
}
implicit val envVarReads: Reads[Map[String, EnvVarValueOrSecret], Map[String, state.EnvVarValue]] =
Reads {
_.mapValues {
case EnvVarValue(v) => state.EnvVarString(v)
case EnvVarSecret(secret: String) => state.EnvVarSecretRef(secret)
}
}
implicit val envProtoRamlWrites: Writes[(Seq[Mesos.Environment.Variable], Seq[Protos.EnvVarReference]), Map[String, EnvVarValueOrSecret]] =
Writes {
case (env, refs) =>
val vanillaEnv: Map[String, EnvVarValueOrSecret] = env.map { item =>
item.getName -> EnvVarValue(item.getValue)
}(collection.breakOut)
vanillaEnv ++ refs.withFilter(_.getType == Protos.EnvVarReference.Type.SECRET).map { secretRef =>
secretRef.getName -> EnvVarSecret(secretRef.getSecretRef.getSecretId)
}
}
}
object EnvVarConversion extends EnvVarConversion
|
gsantovena/marathon
|
src/main/scala/mesosphere/marathon/raml/EnvVarConversion.scala
|
Scala
|
apache-2.0
| 1,311 |
package eu.monniot.redis.plugin
import java.io.{BufferedReader, InputStreamReader}
import redis.embedded.{Redis, RedisExecProvider, RedisServer}
import redis.embedded.cluster.RedisCluster
import sbt.{File, Logger, TestEvent, TestResult, TestsListener}
class RedisTestsListener(logger: Logger,
binaries: Seq[((String, OS, Architecture), String)],
instances: Seq[RedisInstance]) extends TestsListener {
private var redisServers: Seq[RedisServer] = _
private var redisClusters: Seq[RedisCluster] = _
override def doInit(): Unit = {
val redisExecProviders = buildProvider(binaries)
logger.debug(s"Redis configuration: ${binaries.toMap}")
logger.debug(s"Redis servers defined: $instances")
startRedisCluster(logger, redisExecProviders, instances.filter(m => m.isRedisCluster))
startRedisServer(logger, redisExecProviders, instances.filter(m => m.isRedisServer))
}
override def doComplete(finalResult: TestResult): Unit = {
logger.info("Stopping redis instances")
if (redisServers != null) {
redisServers.foreach(_.stop())
}
if (redisClusters != null) {
redisClusters.foreach(_.stop())
}
}
private def buildProvider(redisBinaries: Seq[((String, OS, Architecture), String)]) = {
redisBinaries
.map { case ((v, os, arch), path) =>
(v, os, arch, path)
}
.groupBy(_._1)
.map { case (v, list) =>
val provider = RedisExecProvider.build()
list.foreach { case (_, os, arch, path) =>
provider.`override`(os.toJava, arch.toJava, path)
}
(v, provider)
}
}
private def startRedisServer(logger: Logger, providers: Map[String, RedisExecProvider], redisList: Seq[RedisInstance]): Unit = {
redisServers = redisList.map { config =>
val port = config.ports.copy().next()
ensureFileExecutable(providers(config.version).get(), logger)
val redisServer = new RedisServer.Builder()
.redisExecProvider(providers(config.version))
.port(port)
.build()
startAndCaptureErrors(redisServer, logger)
logger.info(s"Redis Server started on port $port")
redisServer
}
}
private def startRedisCluster(logger: Logger, providers: Map[String, RedisExecProvider], redis: Seq[RedisInstance]): Unit = {
redisClusters = redis.map { config =>
logger.info("Starting Redis Cluster")
ensureFileExecutable(providers(config.version).get(), logger)
val redisCluster = new RedisCluster.Builder()
.serverPorts(config.ports.copy())
.numOfMasters(config.numOfMaster)
.withServerBuilder(
new RedisServer.Builder()
.setting("bind 127.0.0.1")
.redisExecProvider(providers(config.version))
)
.build()
startAndCaptureErrors(redisCluster, logger)
logger.info(s"Redis Cluster started on ports ${redisCluster.ports()}")
redisCluster
}
}
private def ensureFileExecutable(file: File, logger: Logger) = {
if (!file.canExecute) {
logger.debug(s"Making ${file.getAbsolutePath} executable.")
file.setExecutable(true, true)
}
}
private def startAndCaptureErrors(redis: Redis, logger: Logger): Unit = {
val reader = new BufferedReader(new InputStreamReader(redis.errors()))
try {
redis.start()
} catch {
case e: RuntimeException =>
val ports = redis.ports()
val error = Stream.continually(reader.readLine()).takeWhile(_ != null).foldLeft(false) { case (_, line) =>
if (line.contains("Address already in use")) {
logger.error(s"[${redis.getClass.getSimpleName}@$ports] $line")
true
} else false
}
if (error) throw e
}
}
// TestReportListener interface, not used but necessary
override def startGroup(name: String): Unit = {}
override def testEvent(event: TestEvent): Unit = {}
override def endGroup(name: String, t: Throwable): Unit = {}
override def endGroup(name: String, result: TestResult): Unit = {}
}
|
fmonniot/sbt-redis-plugin
|
src/main/scala/eu/monniot/redis/plugin/RedisTestsListener.scala
|
Scala
|
apache-2.0
| 4,108 |
package de.christofreichardt.scala.diagnosis
import de.christofreichardt.diagnosis.TracerFactory
import de.christofreichardt.diagnosis.AbstractTracer
trait Tracing {
def withTracer[T](resultTypeAsString: String, callee: AnyRef, methodSignature: String)(block: => T): T = {
val tracer = getCurrentTracer()
tracer.entry(resultTypeAsString, callee, methodSignature)
try {
block
}
finally {
tracer.wayout()
}
}
def getCurrentTracer(): AbstractTracer = TracerFactory.getInstance().getDefaultTracer()
}
|
chr78rm/secret-sharing
|
shamirs-scheme/src/main/scala/de/christofreichardt/scala/diagnosis/Tracing.scala
|
Scala
|
gpl-3.0
| 555 |
package com.sksamuel.scapegoat.inspections.string
import com.sksamuel.scapegoat.InspectionTest
/** @author Stephen Samuel */
class StripMarginOnRegexTest extends InspectionTest {
override val inspections = Seq(new StripMarginOnRegex)
"StripMarginOnRegex" - {
"should report warning" - {
"for regex containing | that calls strip margin before r" in {
val code = """object Test {
val regex = "match|this".stripMargin.r
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 1
}
}
"should not report warning" - {
"for regex without | that calls strip margin before r" in {
val code = """object Test {
val regex = "match_this".stripMargin.r
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 0
}
"for plain regex" in {
val code = """object Test {
val regex = "match|this".r
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 0
}
}
}
}
|
sksamuel/scapegoat
|
src/test/scala/com/sksamuel/scapegoat/inspections/string/StripMarginOnRegexTest.scala
|
Scala
|
apache-2.0
| 1,225 |
import java.net.Socket
import java.io.PrintWriter
import java.io.BufferedReader
import java.io.InputStreamReader
/**
* Scala IRC Bot, based upon apbot[0]
* [0] http://sourceforge.net/p/apbot/home/apbot/
*/
object Sib {
val host = "127.0.0.1"
val port = 6667
val nick = "sib"
val channel = "#test"
val symbol = "!"
val debugOut = true
val s = new Socket(host, port)
val out = new PrintWriter(s.getOutputStream(), true)
val in = new BufferedReader(new InputStreamReader(s.getInputStream()))
def login {
out.println("USER " + nick + " " + nick + " " + nick + " :sib")
out.println("NICK " + nick)
out.println("JOIN " + channel)
}
def ping(msg : String) {
println("Got ping, token is [" + msg + "]")
out.println("PONG :" + msg)
}
def sendMessageToChannel(channel: String, msg: String) {
out.println("PRIVMSG "+ channel +" :" + msg)
}
def doActionInChannel(channel: String, action: String) {
out.println("PRIVMSG "+ channel +" :\u0001ACTION " + action + "\u0001")
}
def main(argv : Array[String]) {
login
do {
var line = in.readLine();
println(line)
if (line.contains("PING :")) {
ping(line.split(":")(1))
}
else if (line.contains("PRIVMSG")) {
val tokens = line.split(" ")
if (tokens.length < 4) {
println("Got weird line (less than four tokens)..")
} else {
val msg = line.split(" ").drop(3).mkString(" ").drop(1)
val cmd = msg.split(" ").head
val args = msg.split(" ").tail
val channel = line.split(" ")(2)
val directMessage = channel == nick
val user = line.drop(1).split("!")(0)
if (debugOut) {
println("msg = " + msg)
println("cmd = " + cmd)
println("args = " + args.mkString(","))
println("channel = " + channel)
println("directMessage = " + directMessage)
println("user = " + user)
}
if (cmd == symbol + "bang") {
sendMessageToChannel(channel, "Booooyah!")
} else if (cmd == symbol + "boom") {
doActionInChannel(channel, "scratches his head")
}
}
}
} while (true)
}
}
/*
def joinchan(channel):
s.send("PRIVMSG "+ CHANNEL +" :Joining "+ channel +"\r\n")
s.send("JOIN "+ channel +"\r\n")
def partchan(channel):
s.send("PRIVMSG "+ CHANNEL +" :Leaving "+ channel +"\r\n")
s.send("PART "+ channel +"\r\n")
def hello(user):
s.send("PRIVMSG "+ CHANNEL +" :G'day "+ nick +"!\n")
def quitIRC():
s.send("QUIT "+ CHANNEL +"\n")
def fail():
s.send("PRIVMSG "+ CHANNEL +" :Either you do not have the permission to do that, or that is not a valid command.\n")
def fish(user):
s.send("PRIVMSG "+ CHANNEL +" :\x01ACTION slaps "+ user +" with a wet sloppy tuna fish.\x01\r\n")
time.sleep(1)
s.send("PRIVMSG "+ CHANNEL +" :take that bitch\n")
def cake(sender):
if food == True:
s.send("PRIVMSG "+ CHANNEL +" :\x01ACTION is making "+ sender +" a cake\x01\r\n")
time.sleep(10)
s.send("PRIVMSG "+ CHANNEL +" :\x01ACTION has finished making "+ sender +"'s cake\x01\r\n")
time.sleep(1)
s.send("PRIVMSG "+ CHANNEL +" :Here you go "+ sender +"! I hope you enjoy it!\r\n")
else:
s.send("PRIVMSG "+ CHANNEL +" :Command not loaded\r\n")
def echo(message):
s.send("PRIVMSG "+ CHANNEL +" :"+ message +"\r\n")
def pepsi(user):
if food == True:
s.send("PRIVMSG "+ CHANNEL +" :\x01ACTION dispenses a can of Pepsi for "+ user +"\x01\r\n")
else:
s.send("PRIVMSG "+ CHANNEL +" :Command not loaded\r\n")
def coke(user):
if food == True:
s.send("PRIVMSG "+ CHANNEL +" :\x01ACTION dispenses a can of Coke for "+ user +"\x01\r\n")
else:
s.send("PRIVMSG "+ CHANNEL +" :Command not loaded\r\n")
s = socket.socket( )
s.connect((HOST, PORT))
s.send("USER "+ NICK +" "+ NICK +" "+ NICK +" :apbot\n")
s.send("NICK "+ NICK +"\r\n")
s.send("JOIN "+ HOME_CHANNEL +"\r\n")
while 1:
line = s.recv(2048)
line = line.strip("\r\n")
print line
stoperror = line.split(" ")
if ("PING :" in line):
pingcmd = line.split(":", 1)
pingmsg = pingcmd[1]
ping(pingmsg)
elif "PRIVMSG" in line:
if len(line) < 30:
print blank
elif len(stoperror) < 4:
print blank
else:
complete = line.split(":", 2)
info = complete[1]
msg = line.split(":", 2)[2] ##the thing that was said
cmd = msg.split(" ")[0]
CHANNEL = info.split(" ")[2] ##channel from which it was said
user = line.split(":")[1].split("!")[0] ## the person that said the thing
arg = msg.split(" ")
if "hello " + NICK ==cmd:
hello(user)
print "recieved hello"
elif "hey " + NICK ==cmd:
hello(user)
"print recieved hello"
elif "hi " + NICK ==cmd:
hello(user)
"print recieved hello"
elif SYMBOL + "join"==cmd and len(arg) > 1:
x = line.split(" ", 4)
newchannel = x[4]
joinchan(newchannel)
elif SYMBOL + "leave"==cmd and len(arg) > 1:
x = line.split(" ", 4)
newchannel = x[4]
partchan(newchannel)
elif SYMBOL + "quit"==cmd:
quitIRC()
elif SYMBOL + "coke"==cmd and len(arg) > 1:
x = line.split(" ")
recvr = x[4]
coke(recvr)
elif SYMBOL + "pepsi"==cmd and len(arg) > 1:
x = line.split(" ")
recvr = x[4]
pepsi(recvr)
elif SYMBOL + "fish"==cmd and len(arg) > 1:
x = line.split(" ")
recvr = x[4]
fish(recvr)
elif SYMBOL + "bomb"==cmd and len(arg) > 1:
x = line.split(" ")
recvr = x[4]
bomb(recvr)
elif SYMBOL + "cake"==cmd:
cake(user)
elif SYMBOL + "echo"==cmd:
x = msg.split(" ", 1)[1]
echo(x)
elif line.find(""+ SYMBOL +"load") != -1:
plugin = msg.split(" ")[1]
load(plugin)
elif line.find(""+ SYMBOL +"unload") != -1:
plugin = msg.split(" ")[1]
unload(plugin)
elif SYMBOL in cmd:
fail()
*/
|
axelknauf/sib
|
sib.scala
|
Scala
|
mit
| 6,273 |
/*
* Copyright 2017 Nikolay Donets
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nikdon.google.datastore
import cats.data.{NonEmptyList, Validated}
import cats.implicits._
import com.github.nikdon.google.datastore.DatastoreFormat.BaseDatatypes
import com.google.cloud.datastore.{Datastore, Entity}
import shapeless._
import shapeless.labelled._
import scala.util.{Failure, Success, Try}
trait DerivedDatastoreFormat {
type ValidatedPropertiesError[T] = Validated[InvalidPropertiesError, T]
type NotSymbol[T] = |Β¬|[Symbol]#Ξ»[T]
trait ConstructedDatastoreFormat[T] {
def read(av: Entity): Validated[InvalidPropertiesError, T]
def write(t: T, kind: Option[String])(implicit datastore: Datastore): Entity
}
implicit val hnil: ConstructedDatastoreFormat[HNil] =
new ConstructedDatastoreFormat[HNil] {
def read(av: Entity) = Validated.valid(HNil)
def write(t: HNil, kind: Option[String])(implicit datastore: Datastore): Entity = {
val knd = kind.getOrElse(BaseDatatypes.HLIST)
val k = datastore.newKeyFactory().setKind(knd).newKey(BaseDatatypes.HLIST)
Entity.newBuilder(k).build()
// val k = datastore.newKeyFactory().setKind(knd).newKey()
// Entity.newBuilder(datastore.allocateId(k)).build()
}
}
implicit def hcons[K <: Symbol, V, T <: HList](
implicit headFormat: Lazy[DatastoreFormat[V]],
tailFormat: Lazy[ConstructedDatastoreFormat[T]],
fieldWitness: Witness.Aux[K]): ConstructedDatastoreFormat[FieldType[K, V] :: T] =
new ConstructedDatastoreFormat[FieldType[K, V] :: T] {
def read(av: Entity): Validated[InvalidPropertiesError, FieldType[K, V] :: T] = {
def validatedProperty(x: Either[DatastoreError, V]): Validated[InvalidPropertiesError, V] = {
x.leftMap(ge =>
InvalidPropertiesError(NonEmptyList(GenericDatastoreError(s"Read property error: ${ge.msg}"), Nil)))
.toValidated
}
val fieldName: String = fieldWitness.value.name
val possibleValue: Either[InvalidPropertiesError, V] = {
for {
underlying <- Either
.fromTry(Try(av.getEntity(fieldName).asInstanceOf[Entity]))
.leftMap(err =>
InvalidPropertiesError(NonEmptyList(GenericDatastoreError(s"Read property error: $err"), Nil)))
res <- headFormat.value.read(underlying)
} yield res
}
val head: Validated[InvalidPropertiesError, FieldType[K, V]] =
validatedProperty(possibleValue).map(field[K](_))
val tail: Validated[InvalidPropertiesError, T] =
tailFormat.value.read(av)
val res: ValidatedPropertiesError[FieldType[K, V] :: T] =
cats.Apply[ValidatedPropertiesError].map2(head, tail)(_ :: _)
res
}
def write(t: FieldType[K, V] :: T, kind: Option[String])(implicit datastore: Datastore): Entity = {
val tailValue: Entity = tailFormat.value.write(t.tail, kind)
Entity
.newBuilder(tailValue)
.set(fieldWitness.value.name, headFormat.value.write(t.head))
.build()
}
}
trait CoProductDatastoreFormat[T] extends DatastoreFormat[T] {
def read(av: Entity): Either[InvalidPropertiesError, T]
def write(t: T)(implicit datastore: Datastore): Entity
def write(t: T, kind: Option[String])(implicit datastore: Datastore): Entity
}
implicit val cnil: CoProductDatastoreFormat[CNil] = new CoProductDatastoreFormat[CNil] {
def read(av: Entity): Either[InvalidPropertiesError, CNil] =
Left(InvalidPropertiesError(NonEmptyList(GenericDatastoreError(s"$av was not of the expected type"), Nil)))
def write(t: CNil)(implicit datastore: Datastore): Entity = write(t, None)
def write(t: CNil, kind: Option[String])(implicit datastore: Datastore): Entity =
sys.error("CNil cannot be written to an Entity")
}
implicit def coproduct[K <: Symbol, V, T <: Coproduct](
implicit datastore: Datastore,
headFormat: Lazy[DatastoreFormat[V]],
tailFormat: CoProductDatastoreFormat[T],
fieldWitness: Witness.Aux[K]): CoProductDatastoreFormat[FieldType[K, V] :+: T] = {
val fieldName = fieldWitness.value.name
new CoProductDatastoreFormat[FieldType[K, V] :+: T] {
def read(av: Entity): Either[InvalidPropertiesError, FieldType[K, V] :+: T] = {
Try {
val entity = av.getEntity(fieldName)
val value = headFormat.value.read(entity.asInstanceOf[Entity])
value.map(v => Inl(field[K](v)))
} match {
case Success(res) => res
case Failure(_) => tailFormat.read(av).map(v => Inr(v))
}
}
def write(field: FieldType[K, V] :+: T)(implicit datastore: Datastore): Entity = write(field, None)
def write(field: FieldType[K, V] :+: T, kind: Option[String])(implicit datastore: Datastore): Entity =
field match {
case Inl(h) =>
val knd = kind.getOrElse(BaseDatatypes.HLIST)
val key =
datastore.newKeyFactory().setKind(knd).newKey(BaseDatatypes.HLIST)
Entity
.newBuilder(key)
.set(fieldWitness.value.name, headFormat.value.write(h))
.build()
case Inr(t) => tailFormat.write(t, kind)
}
}
}
implicit def genericProduct[T: NotSymbol, R](implicit datastore: Datastore,
gen: LabelledGeneric.Aux[T, R],
formatR: Lazy[ConstructedDatastoreFormat[R]]): DatastoreFormat[T] =
new DatastoreFormat[T] {
def read(av: Entity): Either[InvalidPropertiesError, T] =
formatR.value.read(av).map(gen.from).toEither
def write(t: T)(implicit datastore: Datastore): Entity =
formatR.value.write(gen.to(t), Some(t.getClass.getName))
}
implicit def genericCoProduct[T, R](implicit datastore: Datastore,
gen: LabelledGeneric.Aux[T, R],
formatR: Lazy[CoProductDatastoreFormat[R]]): DatastoreFormat[T] =
new DatastoreFormat[T] {
def read(av: Entity): Either[InvalidPropertiesError, T] =
formatR.value.read(av).map(gen.from)
def write(t: T)(implicit datastore: Datastore): Entity =
formatR.value.write(gen.to(t), Some(t.getClass.getName))
}
}
|
nikdon/scala-cloud-datastore
|
src/main/scala/com/github/nikdon/google/datastore/DerivedDatastoreFormat.scala
|
Scala
|
apache-2.0
| 6,973 |
package gdg.blaze.ext.io
import gdg.blaze._
import org.apache.spark.streaming.dstream.DStream
class StdIn (pc: StdInConfig, source:SourceInput) extends Input {
override def apply(): DStream[Message] = {
source(scala.io.Source.stdin).flatMap(pc.codec.decode)
}
}
case class StdInConfig(codec:Codec)
object StdIn extends PluginFactory[StdIn] {
override def apply(pc: PluginConfig, bc: BlazeContext): StdIn = new StdIn(pc.convert(classOf[StdInConfig]), new SourceInput(bc))
}
|
micahrupersburg/blaze-of-glory
|
src/main/scala/gdg/blaze/ext/io/StdIn.scala
|
Scala
|
apache-2.0
| 485 |
object Foo {
inline def track[T](inline f: T): T = {
printStack("track")
printStack("track")
f
}
def printStack(tag: String): Unit = {
println(tag + ": "+ new Exception().getStackTrace().apply(1))
}
}
object Test {
import Foo.*
def main(args: Array[String]): Unit = {
track {
printStack("main1")
printStack("main2")
}
}
}
|
dotty-staging/dotty
|
tests/run/i4947c.scala
|
Scala
|
apache-2.0
| 376 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.renewal
import models.{Country, SubscriptionRequest}
import models.businessactivities.BusinessActivities
import models.hvd.Hvd
import models.moneyservicebusiness.MoneyServiceBusiness
import models.renewal.Conversions._
import org.scalatest.{MustMatchers, WordSpec}
class ConversionsSpec extends WordSpec with MustMatchers {
trait Fixture {
val businessActivities = BusinessActivities()
val msbSection = MoneyServiceBusiness()
val hvdSection = Hvd()
val subscriptionRequest = SubscriptionRequest(None, None, None, None, None, None, Some(businessActivities), None, None, None, Some(msbSection), Some(hvdSection), None, None)
}
"The renewal converter" must {
"convert the AMLS expected turnover" in new Fixture {
val turnover: AMLSTurnover = AMLSTurnover.First
val renewal = Renewal(turnover = Some(turnover))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.businessActivitiesSection.get.expectedAMLSTurnover mustBe Some(models.businessactivities.ExpectedAMLSTurnover.First)
}
"convert the business turnover" in new Fixture {
val businessTurnover: BusinessTurnover = BusinessTurnover.Second
val renewal = Renewal(businessTurnover = Some(businessTurnover))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.businessActivitiesSection.get.expectedBusinessTurnover mustBe Some(models.businessactivities.ExpectedBusinessTurnover.Second)
}
"convert the 'involved in other businesses' model" in new Fixture {
val model: InvolvedInOther = InvolvedInOtherYes("some other business")
val renewal = Renewal(involvedInOtherActivities = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.businessActivitiesSection.get.involvedInOther mustBe Some(models.businessactivities.InvolvedInOtherYes("some other business"))
}
"convert the 'customers outside the UK' model" in new Fixture {
val country = Country("My Country", "MC")
val model = CustomersOutsideUK(Some(Seq(country)))
val renewal = Renewal(customersOutsideUK = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.businessActivitiesSection.get.customersOutsideUK mustBe Some(models.businessactivities.CustomersOutsideUK(Some(Seq(country))))
}
"convert the 'MSB throughput' model" in new Fixture {
val model = TotalThroughput("03")
val renewal = Renewal(totalThroughput = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.msbSection.get.throughput mustBe Some(models.moneyservicebusiness.ExpectedThroughput.Third)
}
"convert the 'MSB money transfers' model" in new Fixture {
val model = TransactionsInLast12Months("2500")
val renewal = Renewal(transactionsInLast12Months = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.msbSection.get.transactionsInNext12Months mustBe Some(models.moneyservicebusiness.TransactionsInNext12Months("2500"))
}
"convert the 'MSB largest amounts' model" in new Fixture {
val model = SendTheLargestAmountsOfMoney(Seq(Country("United Kingdom", "GB"), Country("France", "FR"), Country("us", "US")))
val renewal = Renewal(sendTheLargestAmountsOfMoney = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.msbSection.get.sendTheLargestAmountsOfMoney mustBe Some(
models.moneyservicebusiness.SendTheLargestAmountsOfMoney(Seq(
Country("United Kingdom", "GB"), Country("France", "FR"), Country("us", "US"))))
}
"convert the 'MSB send money to other country' model" in new Fixture {
val model = SendMoneyToOtherCountry(true)
val renewal = Renewal(sendMoneyToOtherCountry = Some(model))
val converted = subscriptionRequest.withRenewalData((renewal))
converted.msbSection.get.sendMoneyToOtherCountry mustBe Some(models.moneyservicebusiness.SendMoneyToOtherCountry(true))
}
"convert the 'MSB most transactions' model" in new Fixture {
val model = MostTransactions(Seq(Country("United Kingdom", "GB")))
val renewal = Renewal(mostTransactions = Some(model))
val converted = subscriptionRequest.withRenewalData((renewal))
converted.msbSection.get.mostTransactions mustBe Some(models.moneyservicebusiness.MostTransactions(Seq(Country("United Kingdom", "GB"))))
}
"convert the 'MSB currency transactions' model" in new Fixture {
val model = CETransactionsInLast12Months("12345678963")
val renewal = Renewal(ceTransactionsInLast12Months = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.msbSection.get.ceTransactionsInNext12Months mustBe Some(models.moneyservicebusiness.CETransactionsInNext12Months("12345678963"))
}
"convert the 'MSB which currencies' model" in new Fixture {
val model = WhichCurrencies(Seq("USD", "CHF", "EUR"), Some(UsesForeignCurrenciesYes), Some(MoneySources(Some(BankMoneySource("Bank names")))))
val renewal = Renewal(whichCurrencies = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.msbSection.get.whichCurrencies mustBe Some(models.moneyservicebusiness.WhichCurrencies(Seq("USD", "CHF", "EUR"), Some(models.moneyservicebusiness.UsesForeignCurrenciesYes), Some(models.moneyservicebusiness.MoneySources(Some(models.moneyservicebusiness.BankMoneySource("Bank names"))))))
}
"convert the 'MSB foreign exchange transactions' model" in new Fixture {
val model = FXTransactionsInLast12Months("987")
val renewal = Renewal(fxTransactionsInLast12Months = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.msbSection.get.fxTransactionsInNext12Months mustBe Some(
models.moneyservicebusiness.FXTransactionsInNext12Months("987")
)
}
"convert the 'HVD percentage' model First" in new Fixture {
val model = PercentageOfCashPaymentOver15000.First
val renewal = Renewal(percentageOfCashPaymentOver15000 = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.hvdSection.get.percentageOfCashPaymentOver15000 mustBe Some(models.hvd.PercentageOfCashPaymentOver15000.First)
}
"convert the 'HVD percentage' model Second" in new Fixture {
val model = PercentageOfCashPaymentOver15000.Second
val renewal = Renewal(percentageOfCashPaymentOver15000 = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.hvdSection.get.percentageOfCashPaymentOver15000 mustBe Some(models.hvd.PercentageOfCashPaymentOver15000.Second)
}
"convert the 'HVD percentage' model Third" in new Fixture {
val model = PercentageOfCashPaymentOver15000.Third
val renewal = Renewal(percentageOfCashPaymentOver15000 = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.hvdSection.get.percentageOfCashPaymentOver15000 mustBe Some(models.hvd.PercentageOfCashPaymentOver15000.Third)
}
"convert the 'HVD percentage' model Fourth" in new Fixture {
val model = PercentageOfCashPaymentOver15000.Fourth
val renewal = Renewal(percentageOfCashPaymentOver15000 = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.hvdSection.get.percentageOfCashPaymentOver15000 mustBe Some(models.hvd.PercentageOfCashPaymentOver15000.Fourth)
}
"convert the 'HVD percentage' model Fifth" in new Fixture {
val model = PercentageOfCashPaymentOver15000.Fifth
val renewal = Renewal(percentageOfCashPaymentOver15000 = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.hvdSection.get.percentageOfCashPaymentOver15000 mustBe Some(models.hvd.PercentageOfCashPaymentOver15000.Fifth)
}
"convert the 'HVD receive cash payments' model" in new Fixture {
val model = CashPayments(CashPaymentsCustomerNotMet(true), Some(HowCashPaymentsReceived(PaymentMethods(true,true,Some("other")))))
val renewal = Renewal(receiveCashPayments = Some(model))
val converted = subscriptionRequest.withRenewalData(renewal)
converted.hvdSection.get.receiveCashPayments mustBe Some(true)
converted.hvdSection.get.cashPaymentMethods mustBe Some(models.hvd.PaymentMethods(true,true,Some("other")))
}
}
}
|
hmrc/amls-frontend
|
test/models/renewal/ConversionsSpec.scala
|
Scala
|
apache-2.0
| 9,148 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.space
import squants.mass.{ Kilograms, KilogramsPerSquareMeter }
import squants.motion.{ Newtons, Pascals }
import squants.photo.{ Candelas, CandelasPerSquareMeter, Lumens, Lux }
import squants.time.Seconds
import squants.radio.SquareMeterSeconds
import squants.{ MetricSystem, QuantityParseException }
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class AreaSpec extends AnyFlatSpec with Matchers {
behavior of "Area and its Units of Measure"
it should "create values using UOM factories" in {
SquareMeters(1).toSquareMeters should be(1)
SquareCentimeters(1).toSquareCentimeters should be(1)
SquareKilometers(1).toSquareKilometers should be(1)
SquareUsMiles(1).toSquareUsMiles should be(1)
SquareYards(1).toSquareYards should be(1)
SquareFeet(1).toSquareFeet should be(1)
SquareInches(1).toSquareInches should be(1)
Hectares(1).toHectares should be(1)
Acres(1).toAcres should be(1)
Barnes(1).toBarnes should be(1)
}
it should "create values from properly formatted Strings" in {
Area("10.22 mΒ²").get should be(SquareMeters(10.22))
Area("10.22 cmΒ²").get should be(SquareCentimeters(10.22))
Area("10.22 kmΒ²").get should be(SquareKilometers(10.22))
Area("10.22 miΒ²").get should be(SquareUsMiles(10.22))
Area("10.22 ydΒ²").get should be(SquareYards(10.22))
Area("10.22 ftΒ²").get should be(SquareFeet(10.22))
Area("10.22 inΒ²").get should be(SquareInches(10.22))
Area("10.22 ha").get should be(Hectares(10.22))
Area("10.22 acre").get should be(Acres(10.22))
Area("10.22 b").get should be(Barnes(10.22))
Area("10.22 zz").failed.get should be(QuantityParseException("Unable to parse Area", "10.22 zz"))
Area("ZZ mΒ²").failed.get should be(QuantityParseException("Unable to parse Area", "ZZ mΒ²"))
}
it should "properly convert to all supported Units of Measure" in {
val x = SquareMeters(1)
x.toSquareMeters should be(1)
x.toSquareCentimeters should be(1 / (MetricSystem.Centi * MetricSystem.Centi))
x.toSquareKilometers should be(1 / (MetricSystem.Kilo * MetricSystem.Kilo))
x.toSquareUsMiles should be(1 / SquareUsMiles.conversionFactor)
x.toSquareYards should be(1 / SquareYards.conversionFactor)
x.toSquareFeet should be(1 / SquareFeet.conversionFactor)
x.toHectares should be(1 / Hectares.conversionFactor)
x.toAcres should be(1 / Acres.conversionFactor)
x.toBarnes should be(1 / Barnes.conversionFactor)
}
it should "return properly formatted strings for all supported Units of Measure" in {
SquareMeters(1).toString(SquareMeters) should be("1.0 mΒ²")
SquareCentimeters(1).toString(SquareCentimeters) should be("1.0 cmΒ²")
SquareKilometers(1).toString(SquareKilometers) should be("1.0 kmΒ²")
SquareUsMiles(1).toString(SquareUsMiles) should be("1.0 miΒ²")
SquareYards(1).toString(SquareYards) should be("1.0 ydΒ²")
SquareFeet(1).toString(SquareFeet) should be("1.0 ftΒ²")
SquareInches(1).toString(SquareInches) should be("1.0 inΒ²")
Hectares(1).toString(Hectares) should be("1.0 ha")
Acres(1).toString(Acres) should be("1.0 acre")
Barnes(1).toString(Barnes) should be("1.0 b")
}
it should "return Volume when multiplied by Length" in {
SquareMeters(1) * Meters(1) should be(CubicMeters(1))
}
it should "return Mass when multiplied by AreaDensity" in {
SquareMeters(1) * KilogramsPerSquareMeter(1) should be(Kilograms(1))
}
it should "return Force when multiplied by Pressure" in {
SquareMeters(1) * Pascals(1) should be(Newtons(1))
}
it should "return Lumens when multiplied by Illuminance" in {
SquareMeters(1) * Lux(1) should be(Lumens(1))
}
it should "return Candelas when multiplied by Luminance" in {
SquareMeters(1) * CandelasPerSquareMeter(1) should be(Candelas(1))
}
it should "return Length when divided by Length" in {
SquareMeters(1) / Meters(1) should be(Meters(1))
}
it should "return Length when square rooted" in {
SquareMeters(4).squareRoot should be(Meters(2))
}
it should "return AreaTime when multiplied by Time" in {
SquareMeters(4) * Seconds(1) should be(SquareMeterSeconds(4))
}
behavior of "AreaConversion"
it should "provide aliases for single unit values" in {
import AreaConversions._
squareMeter should be(SquareMeters(1))
squareCentimeter should be(SquareCentimeters(1))
squareKilometer should be(SquareKilometers(1))
squareMile should be(SquareUsMiles(1))
squareYard should be(SquareYards(1))
squareFoot should be(SquareFeet(1))
squareInch should be(SquareInches(1))
hectare should be(Hectares(1))
acre should be(Acres(1))
barne should be(Barnes(1))
}
it should "provide implicit conversion from Double" in {
import AreaConversions._
val d = 10.22d
d.squareMeters should be(SquareMeters(d))
d.squareCentimeters should be(SquareCentimeters(d))
d.squareKilometers should be(SquareKilometers(d))
d.squareMiles should be(SquareUsMiles(d))
d.squareYards should be(SquareYards(d))
d.squareFeet should be(SquareFeet(d))
d.squareInches should be(SquareInches(d))
d.hectares should be(Hectares(d))
d.acres should be(Acres(d))
d.barnes should be(Barnes(d))
}
it should "provide Numeric support" in {
import AreaConversions.AreaNumeric
val as = List(SquareMeters(100), SquareMeters(10))
as.sum should be(SquareMeters(110))
}
}
|
garyKeorkunian/squants
|
shared/src/test/scala/squants/space/AreaSpec.scala
|
Scala
|
apache-2.0
| 6,079 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import org.I0Itec.zkclient.ZkClient
import kafka.server.{BrokerAndInitialOffset, AbstractFetcherThread, AbstractFetcherManager}
import kafka.cluster.{Cluster, Broker}
import scala.collection.immutable
import scala.collection.Map
import collection.mutable.HashMap
import scala.collection.mutable
import java.util.concurrent.locks.ReentrantLock
import kafka.utils.Utils.inLock
import kafka.utils.ZkUtils._
import kafka.utils.{ShutdownableThread, SystemTime}
import kafka.common.TopicAndPartition
import kafka.client.ClientUtils
import java.util.concurrent.atomic.AtomicInteger
/**
* Usage:
* Once ConsumerFetcherManager is created, startConnections() and stopAllConnections() can be called repeatedly
* until shutdown() is called.
*/
class ConsumerFetcherManager(private val consumerIdString: String,
private val config: ConsumerConfig,
private val zkClient : ZkClient)
extends AbstractFetcherManager("ConsumerFetcherManager-%d".format(SystemTime.milliseconds),
config.clientId, 1) {
private var partitionMap: immutable.Map[TopicAndPartition, PartitionTopicInfo] = null
private var cluster: Cluster = null
private val noLeaderPartitionSet = new mutable.HashSet[TopicAndPartition]
private val lock = new ReentrantLock
private val cond = lock.newCondition()
private var leaderFinderThread: ShutdownableThread = null
private val correlationId = new AtomicInteger(0)
private class LeaderFinderThread(name: String) extends ShutdownableThread(name) {
// thread responsible for adding the fetcher to the right broker when leader is available
override def doWork() {
val leaderForPartitionsMap = new HashMap[TopicAndPartition, Broker]
lock.lock()
try {
while (noLeaderPartitionSet.isEmpty) {
trace("No partition for leader election.")
cond.await()
}
trace("Partitions without leader %s".format(noLeaderPartitionSet))
val brokers = getAllBrokersInCluster(zkClient)
val topicsMetadata = ClientUtils.fetchTopicMetadata(noLeaderPartitionSet.map(m => m.topic).toSet,
brokers,
config.clientId,
config.socketTimeoutMs,
correlationId.getAndIncrement).topicsMetadata
if(logger.isDebugEnabled) topicsMetadata.foreach(topicMetadata => debug(topicMetadata.toString()))
topicsMetadata.foreach { tmd =>
val topic = tmd.topic
tmd.partitionsMetadata.foreach { pmd =>
val topicAndPartition = TopicAndPartition(topic, pmd.partitionId)
if(pmd.leader.isDefined && noLeaderPartitionSet.contains(topicAndPartition)) {
val leaderBroker = pmd.leader.get
leaderForPartitionsMap.put(topicAndPartition, leaderBroker)
noLeaderPartitionSet -= topicAndPartition
}
}
}
} catch {
case t: Throwable => {
if (!isRunning.get())
throw t /* If this thread is stopped, propagate this exception to kill the thread. */
else
warn("Failed to find leader for %s".format(noLeaderPartitionSet), t)
}
} finally {
lock.unlock()
}
try {
addFetcherForPartitions(leaderForPartitionsMap.map{
case (topicAndPartition, broker) =>
topicAndPartition -> BrokerAndInitialOffset(broker, partitionMap(topicAndPartition).getFetchOffset())}
)
} catch {
case t: Throwable => {
if (!isRunning.get())
throw t /* If this thread is stopped, propagate this exception to kill the thread. */
else {
warn("Failed to add leader for partitions %s; will retry".format(leaderForPartitionsMap.keySet.mkString(",")), t)
lock.lock()
noLeaderPartitionSet ++= leaderForPartitionsMap.keySet
lock.unlock()
}
}
}
shutdownIdleFetcherThreads()
Thread.sleep(config.refreshLeaderBackoffMs)
}
}
override def createFetcherThread(fetcherId: Int, sourceBroker: Broker): AbstractFetcherThread = {
new ConsumerFetcherThread(
"ConsumerFetcherThread-%s-%d-%d".format(consumerIdString, fetcherId, sourceBroker.id),
config, sourceBroker, partitionMap, this)
}
def startConnections(topicInfos: Iterable[PartitionTopicInfo], cluster: Cluster) {
leaderFinderThread = new LeaderFinderThread(consumerIdString + "-leader-finder-thread")
leaderFinderThread.start()
inLock(lock) {
partitionMap = topicInfos.map(tpi => (TopicAndPartition(tpi.topic, tpi.partitionId), tpi)).toMap
this.cluster = cluster
noLeaderPartitionSet ++= topicInfos.map(tpi => TopicAndPartition(tpi.topic, tpi.partitionId))
cond.signalAll()
}
}
def stopConnections() {
/*
* Stop the leader finder thread first before stopping fetchers. Otherwise, if there are more partitions without
* leader, then the leader finder thread will process these partitions (before shutting down) and add fetchers for
* these partitions.
*/
info("Stopping leader finder thread")
if (leaderFinderThread != null) {
leaderFinderThread.shutdown()
leaderFinderThread = null
}
info("Stopping all fetchers")
closeAllFetchers()
// no need to hold the lock for the following since leaderFindThread and all fetchers have been stopped
partitionMap = null
noLeaderPartitionSet.clear()
info("All connections stopped")
}
def addPartitionsWithError(partitionList: Iterable[TopicAndPartition]) {
debug("adding partitions with error %s".format(partitionList))
inLock(lock) {
if (partitionMap != null) {
noLeaderPartitionSet ++= partitionList
cond.signalAll()
}
}
}
}
|
fintler/kafka
|
core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala
|
Scala
|
apache-2.0
| 6,872 |
package net.categoricaldata
package object dsl {
def ??? = throw new NoSuchMethodException //???
}
|
JasonGross/categoricaldata
|
src/main/scala/net/categoricaldata/dsl/package.scala
|
Scala
|
mit
| 100 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.nio.ByteBuffer
import akka.actor._
import akka.remote._
import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.TaskState.TaskState
import org.apache.spark.deploy.worker.WorkerWatcher
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.util.{AkkaUtils, Utils}
private[spark] class CoarseGrainedExecutorBackend(
driverUrl: String,
executorId: String,
hostPort: String,
cores: Int)
extends Actor
with ExecutorBackend
with Logging {
Utils.checkHostPort(hostPort, "Expected hostport")
var executor: Executor = null
var driver: ActorSelection = null
override def preStart() {
logInfo("Connecting to driver: " + driverUrl)
driver = context.actorSelection(driverUrl)
driver ! RegisterExecutor(executorId, hostPort, cores)
context.system.eventStream.subscribe(self, classOf[RemotingLifecycleEvent])
}
override def receive = {
case RegisteredExecutor(sparkProperties) =>
logInfo("Successfully registered with driver")
// Make this host instead of hostPort ?
executor = new Executor(executorId, Utils.parseHostPort(hostPort)._1, sparkProperties)
case RegisterExecutorFailed(message) =>
logError("Slave registration failed: " + message)
System.exit(1)
case LaunchTask(taskDesc) =>
logInfo("Got assigned task " + taskDesc.taskId)
if (executor == null) {
logError("Received LaunchTask command but executor was null")
System.exit(1)
} else {
executor.launchTask(this, taskDesc.taskId, taskDesc.serializedTask)
}
case KillTask(taskId, _) =>
if (executor == null) {
logError("Received KillTask command but executor was null")
System.exit(1)
} else {
executor.killTask(taskId)
}
case x: DisassociatedEvent =>
logError(s"Driver $x disassociated! Shutting down.")
System.exit(1)
case StopExecutor =>
logInfo("Driver commanded a shutdown")
context.stop(self)
context.system.shutdown()
}
override def statusUpdate(taskId: Long, state: TaskState, data: ByteBuffer) {
driver ! StatusUpdate(executorId, taskId, state, data)
}
}
private[spark] object CoarseGrainedExecutorBackend {
def run(driverUrl: String, executorId: String, hostname: String, cores: Int,
workerUrl: Option[String]) {
// Debug code
Utils.checkHost(hostname)
// Create a new ActorSystem to run the backend, because we can't create a SparkEnv / Executor
// before getting started with all our system properties, etc
val (actorSystem, boundPort) = AkkaUtils.createActorSystem("sparkExecutor", hostname, 0,
indestructible = true, conf = new SparkConf)
// set it
val sparkHostPort = hostname + ":" + boundPort
actorSystem.actorOf(
Props(classOf[CoarseGrainedExecutorBackend], driverUrl, executorId, sparkHostPort, cores),
name = "Executor")
workerUrl.foreach{ url =>
actorSystem.actorOf(Props(classOf[WorkerWatcher], url), name = "WorkerWatcher")
}
actorSystem.awaitTermination()
}
def main(args: Array[String]) {
args.length match {
case x if x < 4 =>
System.err.println(
// Worker url is used in spark standalone mode to enforce fate-sharing with worker
"Usage: CoarseGrainedExecutorBackend <driverUrl> <executorId> <hostname> " +
"<cores> [<workerUrl>]")
System.exit(1)
case 4 =>
run(args(0), args(1), args(2), args(3).toInt, None)
case x if x > 4 =>
run(args(0), args(1), args(2), args(3).toInt, Some(args(4)))
}
}
}
|
sryza/spark
|
core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
|
Scala
|
apache-2.0
| 4,492 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package patterns
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlockExpr
import scala.collection.immutable.ArraySeq
/**
* @author ilyas
*/
class ScPatternArgumentListImpl(node: ASTNode) extends ScalaPsiElementImpl (node) with ScPatternArgumentList{
override def toString: String = "Pattern Argument List"
override def patterns: Seq[ScPattern] = {
val children = findChildren[ScPattern]
val grandChildrenInBlockExpr = children
.iterator
.filterByType[ScBlockExpr]
.flatMap(s => s.children.filterByType[ScPattern])
children ++ grandChildrenInBlockExpr
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScPatternArgumentListImpl.scala
|
Scala
|
apache-2.0
| 837 |
package deburnat.transade.gui.center
import collection.mutable.Map
import swing.{Orientation, BorderPanel, CheckBox, ComboBox, BoxPanel, Swing, event}
import event.{FocusGained, FocusLost, MouseClicked}
import Orientation._
import BorderPanel.Position._
import scala.xml.Node
import deburnat.transade.gui.admins.GuiAdmin._
import deburnat.transade.gui.admins.TemplatesAdmin.{tMode, tShow}
import deburnat.transade.Mode._
import deburnat.transade.gui.north.{TemplatesComboBox, TemplateSelectedEvent}
import deburnat.transade.gui.components.{DOptionPane, MonoTextField, LButton}
import DOptionPane.warn
/**
* An algorithm for dynamic programming. It uses internally a two-dimensional
* matrix to store the previous results.
* Project name: deburnat
* Date: 8/28/13
* Time: 8:56 AM
* @author Patrick Meppe ([email protected])
*/
protected[center] class LoadPanel(
tabbedPane: DTabbedPane, nodeCheckBoxMapMap: Map[Int, Map[Node, CheckBox]],
templates: TemplatesComboBox
) extends BorderPanel{
private val modes = Map[String, String]()
list.foreach{l => modes(view.read(l)) = l}
//a BoxPanel at the place of a BorderPanel can also do the trick
layout(new BorderPanel{
val (modeComboBox, showCheckBox) = (
new ComboBox(modes.keys.toSeq){selection.item = view.read(list(0))},
new CheckBox(view.read("openreport")){
selected = true
tooltip = tRead("openreport")
}
)
//WEST
layout(new BoxPanel(Horizontal){
contents += Swing.HStrut(13) //to enable the alignment on the gui
contents += modeComboBox
contents += Swing.HStrut(10)
contents += showCheckBox
contents += Swing.HStrut(50)
}) = West
//CENTER: template TextField
private val templateTextField = new MonoTextField{
val _text = "templatetext"
tooltip = tRead(_text)
val vText = view.read(_text)
def empty = (text.trim == vText && foreground == off) || text.trim.isEmpty
def reset = { //default settings
text = vText
foreground = off
}
reset
listenTo(mouse.clicks, this)
reactions += {
case e: MouseClicked =>
if(e.peer.getButton == 1) text = ""
else{ //right click (and wheel click) to keep the current word in the TextField
if(empty) text = ""
requestFocus
}
foreground = on
case e: FocusLost => if(text.trim == "") reset
case e: FocusGained => if(empty){
text = ""
foreground = on
}
}
}
layout(templateTextField) = Center
//EAST: RunButton
//val page = tabbedPane.selected.page
val idx = tabbedPane.selectedIdx
layout(new LButton("run",
try{ //ONCLICK
if(coreAdmin.goodToGo){
//First make sure that the templateTextField is adequately set
if(templateTextField.empty) templateTextField.text = ""
//Second run the play/run-button
val mode = modeComboBox.selection.item
RunButton.onClick(
nodeCheckBoxMapMap(idx), templates,
(mode, modes(mode)), showCheckBox.selected,
templateTextField, templateTextField.reset,
tabbedPane.getPath, tabbedPane.setComputationDone
)
}else warn("goodtogo")
}catch{case e: NoSuchElementException => //this exception is thrown if the button is click before a file is selected
templateTextField.reset
warn("file")
}
){border = Swing.EmptyBorder(0, 5, 0, 0)}) = East
listenTo(templates) //react accordingly if a template is selected
reactions += {
case TemplateSelectedEvent(r) =>
modeComboBox.selection.item = view.read(r.read(tMode))
val show = r.read(tShow)
showCheckBox.selected = if(show.matches("(true|false)")) show.toBoolean else true
}
border = Swing.EmptyBorder(5, 30, 5, 400)
}) = Center
}
|
deburnatshazem/deburnat
|
gui/src/main/scala/deburnat/transade/gui/center/LoadPanel.scala
|
Scala
|
apache-2.0
| 3,931 |
object Firsts:
type First[X] = X match
case Map[_, v] => First[Option[v]]
def first[X](x: X): First[X] = x match
case x: Map[_, _] => first(x.values.headOption) // error
@main
def runFirsts2(): Unit =
assert(first(Map.empty[Int, Int]) == None) // error
|
dotty-staging/dotty
|
tests/neg/10349.scala
|
Scala
|
apache-2.0
| 276 |
package org.gbougeard.model.projects
/**
* Created with IntelliJ IDEA.
* User: gbougeard
* Date: 13/07/13
* Time: 19:07
* To change this template use File | Settings | File Templates.
*/
case class DashboardSectionInfo(name: String,
query: String)
object DashboardSectionInfo {
import play.api.libs.json.Json
import play.api.libs.functional.syntax._
implicit val format = Json.format[DashboardSectionInfo]
}
|
gbougeard/gas
|
src/main/scala/org/gbougeard/model/projects/DashboardSectionInfo.scala
|
Scala
|
apache-2.0
| 456 |
// $ scalac LogisticRegression.scala
// $ scala LogisticRegression
import scala.math
class LogisticRegression(val N: Int, val n_in: Int, val n_out: Int) {
val W: Array[Array[Double]] = Array.ofDim[Double](n_out, n_in)
val b: Array[Double] = new Array[Double](n_out)
def train(x: Array[Int], y: Array[Int], lr: Double) {
val p_y_given_x: Array[Double] = new Array[Double](n_out)
val dy: Array[Double] = new Array[Double](n_out)
var i: Int = 0
var j: Int = 0
for(i <- 0 until n_out) {
p_y_given_x(i) = 0
for(j <- 0 until n_in) {
p_y_given_x(i) += W(i)(j) * x(j)
}
p_y_given_x(i) += b(i)
}
softmax(p_y_given_x)
for(i <- 0 until n_out) {
dy(i) = y(i) - p_y_given_x(i)
for(j <- 0 until n_in) {
W(i)(j) += lr * dy(i) * x(j) / N
}
b(i) += lr * dy(i) / N
}
}
def softmax(x: Array[Double]) {
var max: Double = 0.0
var sum: Double = 0.0
var i: Int = 0
for(i <- 0 until n_out) if(max < x(i)) max = x(i)
for(i <- 0 until n_out) {
x(i) = math.exp(x(i) - max)
sum += x(i)
}
for(i <- 0 until n_out) x(i) /= sum
}
def predict(x: Array[Int], y: Array[Double]) {
var i: Int = 0
var j: Int = 0
for(i <- 0 until n_out) {
y(i) = 0
for(j <- 0 until n_in) {
y(i) += W(i)(j) * x(j)
}
y(i) += b(i)
}
softmax(y)
}
}
object LogisticRegression {
def test_lr() {
val learning_rate: Double = 0.1
val n_epochs: Int = 500
val train_N: Int = 6
val test_N: Int = 2
val n_in: Int = 6
val n_out: Int = 2
val train_X: Array[Array[Int]] = Array(
Array(1, 1, 1, 0, 0, 0),
Array(1, 0, 1, 0, 0, 0),
Array(1, 1, 1, 0, 0, 0),
Array(0, 0, 1, 1, 1, 0),
Array(0, 0, 1, 0, 1, 0),
Array(0, 0, 1, 1, 1, 0)
)
val train_Y: Array[Array[Int]] = Array(
Array(1, 0),
Array(1, 0),
Array(1, 0),
Array(0, 1),
Array(0, 1),
Array(0, 1)
)
// construct
val classifier = new LogisticRegression(train_N, n_in, n_out)
// train
var epoch: Int = 0
var i: Int = 0
for(epoch <- 0 until n_epochs) {
for(i <- 0 until train_N) {
classifier.train(train_X(i), train_Y(i), learning_rate)
}
// learning_rate *= 0.95
}
// test data
val test_X: Array[Array[Int]] = Array(
Array(1, 0, 1, 0, 0, 0),
Array(0, 0, 1, 1, 1, 0)
)
val test_Y: Array[Array[Double]] = Array.ofDim[Double](test_N, n_out)
// test
var j: Int = 0
for(i <- 0 until test_N) {
classifier.predict(test_X(i), test_Y(i))
for(j <- 0 until n_out) {
printf("%.5f ", test_Y(i)(j))
}
println()
}
}
def main(args: Array[String]) {
test_lr()
}
}
|
oma-deeplearning/deeplearning
|
scala/LogisticRegression.scala
|
Scala
|
gpl-2.0
| 2,813 |
package com.example.actors
import akka.actor.{Actor, Props}
import akka.event.Logging
import com.example.models.{Location, PlaceListResult}
import com.example.services.HttpService
class PlaceFinder {
var httpService = HttpService()
def getPlacesForLocation(location: Location): PlaceListResult = {
val radius = 5000
val urlMidsection = s"/maps/api/place/nearbysearch/json?location=${location.toString}&radius=$radius&type=restaurant"
val placeListResult = httpService.get[PlaceListResult](urlMidsection)
placeListResult
}
}
class SystemActor extends Actor {
val log = Logging.getLogger(context.system, this)
def receive = {
case location: Location =>
val placeService = new PlaceFinder()
val listProcessingActor = context.system.actorOf(ResultProcessorActor.props)
listProcessingActor ! placeService.getPlacesForLocation(location).results
case _ =>
log.info(s"${this.getClass.getSimpleName}: Unhandled Message")
}
}
object SystemActor {
val props = Props[SystemActor]
}
|
divanvisagie/lazy-places
|
src/main/scala/com/example/actors/SystemActor.scala
|
Scala
|
apache-2.0
| 1,042 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package docs.scaladsl.advanced.akka
package workerservice {
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceCall
import docs.scaladsl.advanced.akka.dataobjects.Job
import docs.scaladsl.advanced.akka.dataobjects.JobAccepted
trait WorkerService extends Service {
def doWork: ServiceCall[Job, JobAccepted]
override def descriptor = {
import Service._
named("workservice").withCalls(call(doWork))
}
}
}
package dataobjects {
//#dataobjects
import play.api.libs.json.Format
import play.api.libs.json.Json
case class Job(jobId: String, task: String, payload: String)
object Job {
implicit val format: Format[Job] = Json.format
}
case class JobAccepted(jobId: String)
object JobAccepted {
implicit val format: Format[JobAccepted] = Json.format
}
//#dataobjects
}
package workerserviceimpl {
import dataobjects.Job
import dataobjects.JobAccepted
import worker.Worker
import workerservice.WorkerService
//#service-impl
import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.routing.ClusterRouterGroup
import akka.cluster.routing.ClusterRouterGroupSettings
import akka.routing.ConsistentHashingGroup
import akka.pattern.ask
import akka.util.Timeout
import com.lightbend.lagom.scaladsl.api.ServiceCall
import scala.concurrent.duration._
class WorkerServiceImpl(system: ActorSystem) extends WorkerService {
if (Cluster.get(system).selfRoles("worker-node")) {
// start a worker actor on each node that has the "worker-node" role
system.actorOf(Worker.props, "worker")
}
// start a consistent hashing group router,
// which will delegate jobs to the workers. It is grouping
// the jobs by their task, i.e. jobs with same task will be
// delegated to same worker node
val workerRouter = {
val paths = List("/user/worker")
val groupConf = ConsistentHashingGroup(paths, hashMapping = {
case Job(_, task, _) => task
})
val routerProps = ClusterRouterGroup(
groupConf,
ClusterRouterGroupSettings(
totalInstances = 1000,
routeesPaths = paths,
allowLocalRoutees = true,
useRoles = Set("worker-node")
)
).props
system.actorOf(routerProps, "workerRouter")
}
def doWork = ServiceCall { job =>
implicit val timeout = Timeout(5.seconds)
(workerRouter ? job).mapTo[JobAccepted]
}
}
//#service-impl
}
package worker {
import dataobjects.Job
import dataobjects.JobAccepted
//#actor
import akka.actor.Actor
import akka.actor.Props
import akka.event.Logging
object Worker {
def props = Props[Worker]
}
class Worker extends Actor {
private val log = Logging.getLogger(context.system, this)
override def receive = {
case job @ Job(id, task, payload) =>
log.info("Working on job: {}", job)
sender ! JobAccepted(id)
// perform the work...
context.stop(self)
}
}
//#actor
}
|
ignasi35/lagom
|
docs/manual/scala/guide/advanced/code/Akka.scala
|
Scala
|
apache-2.0
| 3,146 |
package com.twitter.finagle.mux.lease.exp
import com.twitter.app.GlobalFlag
import com.twitter.conversions.storage.intToStorageUnitableWholeNumber
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver, DefaultStatsReceiver}
import com.twitter.util.{Duration, Stopwatch, StorageUnit, NilStopwatch}
import java.util.Collections
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import java.util.logging.Logger
/**
* ClockedDrainer is a thread which keeps track of the garbage collector and
* guesses when it will pause.
*
* @param coord controls how long we sleep for
* @param forceGc forces a gc on invocation
* @param space represents the current state of the JVM's heap
* @param rSnooper keeps track of request rate vis a vis heap allocations
* @param log is used for logging, will be removed later
* @param lr is used for logging things in a pretty way
* @param statsReceiver keeps track of the stats
* @param verbose whether the logging should be verbose or not
*/
// NB: this is a mess, but it's actually fairly straightforward.
// There are four stages, wrapped in an infinite loop.
// The stages are:
// Wait until close enough
// Drain
// GC
// Undrain
private[finagle] class ClockedDrainer(
coord: Coordinator,
forceGc: () => Unit,
space: MemorySpace,
rSnooper: RequestSnooper,
log: Logger,
lr: LogsReceiver = NullLogsReceiver,
statsReceiver: StatsReceiver = NullStatsReceiver,
verbose: Boolean = false
) extends Thread("GcDrainer") with Lessor {
private[this] val lessees = Collections.newSetFromMap(
new ConcurrentHashMap[Lessee, java.lang.Boolean])
private[this] val requestCount = new AtomicInteger(0)
private[this] val narrival = new AtomicInteger(0)
@volatile private[this] var openFor = Stopwatch.start()
@volatile private[this] var closedFor = NilStopwatch.start()
@volatile private[this] var forcedGc = 0L
@volatile private[this] var genDrained, genOpen = 0L
private[this] def calculateMaxWait: Duration = {
val rate = coord.counter.rate
val r = space.left
if (r <= StorageUnit.zero) Duration.Zero
else if (rate <= 0) 10.milliseconds
else (r.inBytes / rate).toLong.milliseconds
}
private object stats {
val undrain = statsReceiver.counter("undrain")
val drain = statsReceiver.counter("drain")
val forcedGcs = statsReceiver.counter("forcedgcs")
val naturalGcs = statsReceiver.counter("naturalgcs")
val pendingAtGc = statsReceiver.stat("pendingatgc")
val pendingAtDrain = statsReceiver.stat("pendingatdrain")
val drainTime = statsReceiver.stat("draintime_ms")
val openTime = statsReceiver.stat("opentime_ms")
val closedTime = statsReceiver.stat("closedtime_ms")
val openForGauge = statsReceiver.addGauge("openfor_ms") {
openFor() match {
case Duration.Finite(d) => d.inMilliseconds.toFloat
case _ => -1F
}
}
val closedForGauge = statsReceiver.addGauge("closedfor_ms") {
closedFor() match {
case Duration.Finite(d) => d.inMilliseconds.toFloat
case _ => -1F
}
}
val discountGauge = statsReceiver.addGauge("discount") { space.discount().inBytes.toFloat }
}
def npending() = {
var s = 0
val iter = lessees.iterator()
while (iter.hasNext)
s += iter.next().npending()
s
}
private[this] def upkeep(state: String, init: () => Duration) {
lr.record("%s_ms".format(state), init().inMilliseconds.toString)
lr.record("count_%s".format(state), requestCount.get().toString)
lr.record("pending_%s".format(state), npending().toString)
lr.record("arrival_%s".format(state), narrival.get().toString)
coord.counter.info.record(lr, state)
}
override def run() {
var ncycles = 0L
val init = Stopwatch.start()
coord.warmup()
while (true) {
ready(init)
val g: Long = coord.counter.info.generation()
upkeep("closed", init)
drain()
upkeep("drained", init)
gc(g, init)
undrain()
ncycles += 1
lr.record("cycle", ncycles.toString)
flushLogs()
}
}
setDaemon(true)
start()
// READY
private[lease] def ready(init: () => Duration) { // private[lease] for testing
lr.record("gate_open_ms", init().inMilliseconds.toString)
coord.gateCycle()
upkeep("open", init)
coord.sleepUntilDiscountRemaining(space, { () =>
if (verbose) {
log.info("AWAIT-DISCOUNT: discount="+space.discount()+
"; clock="+coord.counter +
"; space="+space
)
}
// discount (bytes) / rate (bytes / second) == expiry (seconds)
issueAll((space.discount.inBytes / coord.counter.rate).toLong.seconds)
})
}
// DRAINING
private[lease] def drain() { // private[lease] for testing
val sinceClosed = Stopwatch.start()
startDraining()
finishDraining()
stats.drainTime.add(sinceClosed().inMilliseconds)
}
private[this] def startDraining() {
stats.openTime.add(openFor().inMilliseconds)
openFor = NilStopwatch.start()
closedFor = Stopwatch.start()
stats.drain.incr()
stats.pendingAtDrain.add(npending())
issueAll(Duration.Zero)
}
private[this] def issueAll(duration: Duration) {
val iter = lessees.iterator()
while (iter.hasNext)
iter.next().issue(duration)
}
private[this] def finishDraining() {
val maxWait = calculateMaxWait
if (verbose) {
log.info("AWAIT-DRAIN: n="+npending()+
"; clock="+coord.counter+
"; space="+space+
"; maxWaitMs="+maxWait.inMilliseconds+
"; minDiscount="+space.minDiscount)
}
coord.sleepUntilFinishedDraining(space, maxWait, npending, log)
}
// GC
// loop until the gc is acknowledged
private[lease] def gc(generation: Long, init: () => Duration) { // private[lease] for testing
val elapsedGc = Stopwatch.start()
forcedGc = 0
if (coord.counter.info.generation() == generation) {
val n = npending()
if (verbose) log.info("FORCE-GC: n="+n+"; clock="+coord.counter+"; space="+space)
lr.record("byteLeft", coord.counter.info.remaining().inBytes.toString)
forcedGc = 0
coord.sleepUntilGc({ () =>
forceGc()
forcedGc += 1
}, 10.milliseconds)
stats.pendingAtGc.add(n)
stats.forcedGcs.incr()
} else {
if (verbose) log.info("NATURAL-GC")
lr.record("byteLeft", -1.toString)
stats.naturalGcs.incr()
}
upkeep("done", init)
val gcMs = elapsedGc().inMilliseconds
lr.record("gcMs", gcMs.toString)
}
// UNDRAINING
private[lease] def undrain() { // private[lease] for testing
stats.closedTime.add(closedFor().inMilliseconds)
openFor = Stopwatch.start()
closedFor = NilStopwatch.start()
stats.undrain.incr()
issueAll(Duration.Top)
}
// FLUSHING
private[this] def flushLogs() {
lr.record("gendiff", (genDrained - genOpen).toString)
lr.record("forcedGc", forcedGc.toString)
lr.flush()
}
// TODO: can this API be made easier to use?
def register(lessee: Lessee) {
lessees.add(lessee)
// TODO: issue leases immediately.
// currently there's a bit of startup cost if
// a client joins while we're draining.
}
def unregister(lessee: Lessee) {
lessees.remove(lessee)
}
def observe(d: Duration) {
requestCount.incrementAndGet()
rSnooper.observe(d)
}
def observeArrival() {
narrival.incrementAndGet()
}
}
object drainerDiscountRange extends GlobalFlag(
(50.megabytes, 600.megabytes), "Range of discount")
object drainerPercentile extends GlobalFlag(95, "GC drainer cutoff percentile")
object drainerDebug extends GlobalFlag(false, "GC drainer debug log (verbose)")
object drainerEnabled extends GlobalFlag(false, "GC drainer enabled")
object nackOnExpiredLease extends GlobalFlag(false, "nack when the lease has expired")
private[finagle] object ClockedDrainer {
private[this] val log = Logger.getLogger("ClockedDrainer")
private[this] val lr = if (drainerDebug()) new DedupingLogsReceiver(log) else NullLogsReceiver
lazy val flagged: Lessor = if (drainerEnabled()) {
Coordinator.create() match {
case None =>
log.warning("Failed to acquire a ParNew+CMS Coordinator; cannot "+
"construct drainer")
Lessor.nil
case Some(coord) =>
val rSnooper = new RequestSnooper(
coord.counter,
drainerPercentile().toDouble / 100.0,
lr
)
val (min, max) = drainerDiscountRange()
assert(min < max)
val space = new MemorySpace(
coord.counter.info,
min,
max,
rSnooper,
lr
)
new ClockedDrainer(
coord,
GarbageCollector.forceNewGc,
space,
rSnooper,
log,
lr,
DefaultStatsReceiver.scope("gcdrainer")
)
}
} else {
log.info("Drainer is disabled; bypassing")
Lessor.nil
}
}
|
sveinnfannar/finagle
|
finagle-mux/src/main/scala/com/twitter/finagle/mux/lease/exp/ClockedDrainer.scala
|
Scala
|
apache-2.0
| 9,070 |
package org.helgoboss.domino.configuration_watching
import org.osgi.service.cm.{ConfigurationAdmin, ManagedService}
import org.helgoboss.capsule.{CapsuleContext, CapsuleScope}
import org.osgi.service.metatype.{MetaTypeProvider => JMetaTypeProvider}
import org.helgoboss.scala_osgi_metatype.interfaces.MetaTypeProvider
import org.osgi.framework.{BundleContext, Constants, ServiceRegistration}
import java.util.Dictionary
import org.helgoboss.domino.DominoUtil
import org.helgoboss.domino.service_consuming.ServiceConsuming
/**
* A capsule which allows easy access to the [[org.osgi.service.cm.ConfigurationAdmin]] functionality related
* to normal configurations (not factory configurations).
*
* @param servicePid Service PID
* @param f Handler which is executed initially and on every configuration change
* @param metaTypeProvider Optional metatype provider
* @param serviceConsuming Dependency
* @param bundleContext Dependency
* @param capsuleContext Dependency
*/
class ConfigurationWatcherCapsule(
servicePid: String,
f: Map[String, Any] => Unit,
metaTypeProvider: Option[MetaTypeProvider],
serviceConsuming: ServiceConsuming,
bundleContext: BundleContext,
capsuleContext: CapsuleContext
) extends AbstractConfigurationWatcherCapsule(metaTypeProvider) with ManagedService {
protected var _reg: ServiceRegistration[ManagedService] = _
/**
* Returns the service registration of the configuration listener as long as the current scope is active.
*/
def reg = _reg
/**
* Contains the interfaces under which this object will be put in the service registry.
*/
protected lazy val interfacesArray: Array[String] = Array(classOf[ManagedService].getName) ++ (
metaTypeProvider map { p => classOf[JMetaTypeProvider].getName })
/**
* Contains the new capsule scope.
*/
protected var newCapsuleScope: Option[CapsuleScope] = None
/**
* Contains the previous configuration map. Used to determine whether the configuration has changed.
*/
protected var oldOptConf: Option[Dictionary[String, _]] = None
def start() {
// Service properties
val propertiesMap = Map(Constants.SERVICE_PID -> servicePid)
// Find out current configuration by pulling it
val optConf = getConfigDirectly
// At first execute inner block synchronously with current configuration. Even if configuration admin is not present.
executeBlockWithConf(optConf)
// Register managed service. This will cause ConfigurationAdmin push the current configuration in a separate
// thread and call updated(). In updated(), we prevent the second execution of the inner block because we check
// whether the configuration is still the same.
val tmp = bundleContext.registerService(interfacesArray, this, DominoUtil.convertToDictionary(propertiesMap))
_reg = tmp.asInstanceOf[ServiceRegistration[ManagedService]]
}
def stop() {
// Stop capsules in the newly created capsule scope
newCapsuleScope foreach { _.stop() }
// Unregister managed service
_reg.unregister()
_reg = null
}
def updated(conf: Dictionary[String, _]) {
// We query the config admin directly because the user can make sure then that the config value is already set.
// See http://www.mail-archive.com/[email protected]/msg06764.html
val safeOptConf = Option(conf) orElse getConfigDirectly
// Execute handler only if configuration has changed
executeBlockWithConfIfChanged(safeOptConf)
}
/**
* Executes the handler only if the configuration has changed compared to the one which was used last.
*/
protected def executeBlockWithConfIfChanged(optConf: Option[Dictionary[String, _]]) {
if (oldOptConf != optConf) {
executeBlockWithConf(optConf)
}
}
/**
* Executes the handler with the given configuration and saves it for future comparison.
*/
protected def executeBlockWithConf(optConf: Option[Dictionary[String, _]]) {
// Stop capsules in previous scope
newCapsuleScope foreach { _.stop() }
// Start capsules in new scope
newCapsuleScope = Some(capsuleContext.executeWithinNewCapsuleScope {
optConf match {
case Some(conf) =>
// Execute handler
f(DominoUtil.convertToMap(conf))
case None =>
// No configuration there. We use an empty map.
f(Map.empty)
}
})
// Save old conf
oldOptConf = optConf
}
/**
* Pulls the current configuration from the configuration admin.
*/
protected def getConfigDirectly: Option[Dictionary[String, _]] = {
serviceConsuming.withService[ConfigurationAdmin, Option[Dictionary[String, _]]] {
case Some(confAdmin) =>
Option(confAdmin.getConfiguration(servicePid)) match {
case Some(c) => Option(c.getProperties)
case None => None
}
case None => None
}
}
}
|
lefou/domino
|
src/main/scala/org/helgoboss/domino/configuration_watching/ConfigurationWatcherCapsule.scala
|
Scala
|
mit
| 4,899 |
package com.github.mgoeminne.iban
import org.scalatest.{Inspectors, Matchers, FlatSpec}
/**
* Tests dedicated to the Portuguese BBAN
*/
class BBANPortugalTest extends FlatSpec with Matchers with Inspectors
{
val validBBANs = Seq("0002 0123 12345678901 54",
"0035 0683 00000007843 11"
)
val invalidBBANs = Seq("0781 0112 00000007843 10")
"All valid BBAN" should "pass the validation test" in {
forAll(validBBANs){ x =>
new BBANPortugal(x).isValid shouldBe Some(true)
}
}
it should "produce correct string representation" in {
validBBANs.map(new BBANPortugal(_).toString) shouldEqual validBBANs
}
"All invalid BBAN" should "not pass the validation test" in {
forAll(invalidBBANs){ x=> new BBANPortugal(x).isValid shouldBe Some(false)}
}
}
|
mgoeminne/scala-iban
|
src/test/scala/com/github/mgoeminne/iban/BBANPortugalTest.scala
|
Scala
|
mit
| 829 |
package elea.term
import elea._
import elea.rewrite.Env
import scalaz.Ordering.{EQ, GT, LT}
import scalaz.{Name => _, _}
import Scalaz._
case class Fix(body: Term,
index: Fix.Index,
name: Option[String] = None)
extends Term with FirstOrder[Term] {
override def reduce(env: Env): Term = {
val newFix = super.reduce(env)
if (newFix =@= this) this // preserve `name`
else newFix
}
override def reduceHead(env: Env): Term = {
constantArgs
.headOption.map(argIdx => removeConstantArg(argIdx).reduce(env))
.getOrElse {
body match {
case body: Lam if !body.body.freeVars.contains(body.binding) =>
body.body
case body: Bot.type =>
Bot
case _ =>
this
}
}
}
// TODO filter on decreasing/strict args
override def reduceHeadApp(env: Env, args: NonEmptyList[Term]): Term = {
if (strictArgs(args.list).any(_ == Bot)) {
Bot
} else {
strictArgIndices.find(i => i < args.size && args.index(i).get.isInstanceOf[Case]) match {
case Some(idx) =>
args.index(idx).get match {
// If a pattern match is a strict argument to a fixed-point,
// we can float it out to be topmost
case caseArg: Case =>
C(x => this.apply(args.list.setAt(idx, Var(x))))
.applyToBranches(caseArg)
.reduceIgnoringMatchedTerm(env)
}
case None =>
body match {
case body: Lam if args.any(t => t.leftmost.isInstanceOf[Constructor] || t == β₯) =>
// If an argument to a fixed-point is a constructor or a β₯, we can try to unfold
// the fixed-point
val originalTerm = App(this, args)
val reduced = App(body.body, args).reduce(env)
val recursiveSubterms = reduced.subtermsContaining(ISet.singleton(body.binding))
// TODO try all recursive subterms which are pattern matches must match over a sub-term, instead of the general is case-of logic
lazy val wasProductive = recursiveSubterms.all {
case term@App(Var(f), xs) if f == body.binding =>
term.strictlyEmbedsInto(App(Var(f), args))
case _ =>
true
}
// TODO what about examples that remove all recursive subterms after multiple unfoldings?
// Remember the ".lteq (.count n xs) (.count n (.app xs ys))" example next time you feel
// like simplifying this unfolding logic
if (recursiveSubterms.isEmpty || (!reduced.isInstanceOf[Case] && wasProductive))
(reduced :/ (this / body.binding)).reduce(env.havingSeen(originalTerm))
else
super.reduceHeadApp(env, args)
case _ =>
super.reduceHeadApp(env, args)
}
}
}
}
override def unfold: Term = body.betaReduce(NonEmptyList(this))
override def mapImmediateSubtermsWithBindings(f: (ISet[Name], Term) => Term): Term = {
val newBody = f(ISet.empty, body)
if (newBody =@= body)
// Preserve things like `name` if nothing semantically has changed
copy(body = newBody)
else
Fix(newBody, index)
}
override def toLisp(settings: LispPrintSettings): String =
name.map(n => Name.asDefinition(n) + index.toString).getOrElse {
val (bindings, innerBody) = body.flattenLam
s"(fix$index ${bindings.toList.mkString(" ")} $innerBody)".indentNewBlock
}
override def withName(name: String) =
copy(name = Some(name))
lazy val isProductive: Boolean = {
val (bindings, innerBody) = body.flattenLam
bindings match {
case INil() =>
false
case ICons(fixVar, _) =>
def productiveBranch(branch: Branch): Boolean =
branch.immediateSubtermsWithBindings.all {
case (branchVars, branchTerm) =>
!branchVars.contains(fixVar) && productiveTerm(branchTerm)
}
def productiveTerm(term: Term): Boolean =
term match {
case _ if !term.freeVars.contains(fixVar) =>
true
case term: Case =>
!term.matchedTerm.freeVars.contains(fixVar) &&
term.branches.all(productiveBranch)
case _ =>
term.leftmost.isInstanceOf[Constructor]
}
productiveTerm(innerBody)
}
}
def argCount: Int =
body.flattenLam._1.length - 1 // -1 for the fixed variable
/**
* Constant arguments are ones whose value never changes in any recursive function call.
* {{{
* t"fix f x y -> f x (Suc y)".asInstanceOf[Fix].constantArgs == ISet.singleton(0)
* t"Add".asInstanceOf[Fix].constantArgs == ISet.singleton(1)
* }}}
* @return The indices of any constant arguments to this fixed-point
*/
def constantArgs: IList[Int] =
body match {
case body: Lam =>
val (bindings, innerBody) = body.flatten
require(bindings.toList.size == bindings.toSet.size)
val fixBinding = bindings.head
val argBindings = bindings.tail
val recursiveCalls = innerBody
.subtermsWithBindings
.filter { case (bindings, term) =>
term match {
case App(Var(f), _) => !bindings.contains(f) && f == fixBinding
case _ => false
}
}
IList(argBindings.toList.indices : _*).filter { (i: Int) =>
val arg = argBindings.index(i).get
recursiveCalls.all {
case (bindings, App(_, xs)) =>
!bindings.contains(arg) && xs.index(i).fold(false)(x => x == Var(arg))
case _ =>
throw new AssertionError("wat")
}
}
case _ =>
IList.empty
}
def removeConstantArg(argIdx: Int): Term =
body match {
case body: Lam =>
val (NonEmptyList(fixBinding, argBindings), innerBody) = body.flatten
require(argBindings.length > argIdx)
val (leftArgs, otherArgs) = argBindings.splitAt(argIdx)
val (removedArg, rightArgs) = (otherArgs.headOption.get, otherArgs.tailOption.get)
val newInnerBody = innerBody.mapTermsContaining(ISet.singleton(fixBinding)) {
case App(f, xs) if f == Var(fixBinding) =>
require(xs.index(argIdx) == Some(Var(removedArg)), "this is not a constant argument")
f.apply(xs.list.removeAt(argIdx).get)
case other => other
}
val newFixBody = Lam(NonEmptyList.nel(fixBinding, leftArgs ++ rightArgs), newInnerBody)
val newFix = Fix(newFixBody, index)
Lam(leftArgs :+ removedArg, newFix.apply(leftArgs.map((x: Name) => Var(x).asInstanceOf[Term])))
case _ =>
throw new AssertionError("Cannot remove constant arguments from fixed-points with non-lambda bodies")
}
override def arbitraryOrderingNumber: Int = 3
override def zip(other: Term): Option[IList[(Term, Term)]] =
other match {
case other: Fix =>
Some(IList((body, other.body)))
case _ =>
None
}
override def order(other: Term) =
other match {
case other: Fix =>
index ?|? other.index |+| body.order(other.body)
case _ =>
arbitraryOrderingNumber ?|? other.arbitraryOrderingNumber
}
def guessConstructorContext: Option[Context] =
body match {
case body: Lam =>
val fixArgs = body.flatten._1.tail
val explored = this.apply(fixArgs.map(n => Var(n): Term)).explore.filter(_ != Bot)
for {
potentialContext <- explored.headOption
constr <- potentialContext.leftmost match {
case constr: Constructor => Some(constr)
case _ => None
}
context <- constr.recursiveArgs.toList match {
case Seq() =>
Some(C(_ => potentialContext))
case Seq(recArgIdx) =>
Some(C(ctxGap => constr.apply(potentialContext.asInstanceOf[App].args.list.setAt(recArgIdx, Var(ctxGap)))))
case _ =>
// TODO implement the case for recursive argument count > 1
None
}
if context.freeVars.isSubsetOf(this.freeVars)
if explored.all(t => context.strip(t).isDefined)
} yield context
case _ =>
None
}
final def fissionConstructorContext(args: IList[Term]): Option[Term] =
fissionConstructorContext.map {
case (context, newFix) => context.apply(newFix.apply(args))
}
lazy val fissionConstructorContext: Option[(Context, Fix)] =
body match {
case body: Lam =>
for {
ctx <- guessConstructorContext
fixArgs = body.flatten._1.tail
expandedCtx = C(gap => Lam(fixArgs, ctx.apply(Var(gap).apply(fixArgs.map(n => Var.apply(n): Term)))))
reduced = body.apply(expandedCtx.apply(Var(body.binding))).reduce
(fixArgs2, reducedBody) = reduced.flattenLam
stripped <- ctx
.strip(reducedBody)
.tap(_ => assert(fixArgs == fixArgs2))
} yield (ctx, Fix(Lam(body.binding, Lam(fixArgs, stripped)), index))
case _ =>
None
}
lazy val strictArgIndices: IList[Int] =
body match {
case body: Lam =>
val vars = body.body.flattenLam._1.map(x => Var(x): Term)
IList(0.until(argCount): _*).filter { i =>
val args = vars.setAt(i, Bot)
body.body.apply(args).reduce == Bot
}
case _ =>
IList.empty
}
final def strictArgs(args: IList[Term]): IList[Term] = {
var strict = IList.empty[Term]
strictArgIndices.toList.foreach { i =>
args.index(i).foreach { t => strict = t :: strict }
}
strict
}
/**
* Is fixed-point promoted form
*/
def isFPPF(args: IList[Term]): Boolean =
args.all(_.isInstanceOf[Var]) &&
args.distinct == args &&
freeVars.intersection(ISet.unions(args.map(_.freeVars).toList)).isEmpty
}
object Fix {
case class Index private(name: Name, isFinite: Boolean) {
override def toString =
if (isFinite) s"[$name]" else ""
def asFinite: Index = copy(isFinite = true)
def asOmega: Index = copy(isFinite = false)
def freshen: Index = copy(name = Name.fresh("Ξ±"))
def isOmega: Boolean = !isFinite
}
def emptyIndex: Index = omega(Name("empty"))
def finite(name: Name): Index = Index(name, true)
def omega(name: Name): Index = Index(name, false)
def freshOmegaIndex: Index = emptyIndex.freshen.asOmega
def freshFiniteIndex: Index = emptyIndex.freshen.asFinite
implicit val fixIndexOrder: Order[Fix.Index] = new Order[Fix.Index] {
override def order(x: Index, y: Index): Ordering =
x.isFinite ?|? y.isFinite |+| x.name ?|? y.name
}
}
|
wsonnex/elea
|
src/main/scala/elea/term/Fix.scala
|
Scala
|
mit
| 11,173 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package expressions
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 13.02.2008
*/
/*
* SelfInvocation ::= 'this' ArgumentExprs {ArgumentExprs}
*/
object SelfInvocation extends SelfInvocation {
override protected def argumentExprs = ArgumentExprs
}
trait SelfInvocation {
protected def argumentExprs: ArgumentExprs
def parse(builder: ScalaPsiBuilder): Boolean = {
val selfMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.kTHIS =>
builder.advanceLexer() //Ate this
case _ =>
//error moved to ScalaAnnotator to differentiate with compiled files
selfMarker.drop()
return true
}
if (!argumentExprs.parse(builder)) {
selfMarker.done(ScalaElementTypes.SELF_INVOCATION)
return true
}
while (!builder.newlineBeforeCurrentToken && argumentExprs.parse(builder)) {}
selfMarker.done(ScalaElementTypes.SELF_INVOCATION)
true
}
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/parser/parsing/expressions/SelfInvocation.scala
|
Scala
|
apache-2.0
| 1,161 |
package io.reactivecqrs.core.aggregaterepository
import java.time.Instant
import io.reactivecqrs.core.eventstore.EventStoreState
import io.reactivecqrs.core.util.ActorLogging
import io.reactivecqrs.api._
import akka.actor.{Actor, ActorRef, PoisonPill}
import io.reactivecqrs.api.id.{AggregateId, CommandId, UserId}
import io.reactivecqrs.core.eventbus.EventsBusActor.{PublishEvents, PublishEventsAck}
import io.reactivecqrs.core.commandhandler.{CommandExecutorActor, CommandResponseState}
import io.reactivecqrs.core.util.RandomUtil
import org.postgresql.util.PSQLException
import scalikejdbc.DBSession
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.reflect._
import scala.reflect.runtime.universe._
import scala.util.{Failure, Success}
object AggregateRepositoryActor {
case class GetAggregateRootCurrentVersion(respondTo: ActorRef)
case class GetAggregateRootCurrentMinVersion(respondTo: ActorRef, version: AggregateVersion, durationMillis: Int)
case class GetAggregateRootExactVersion(respondTo: ActorRef, version: AggregateVersion)
case class GetAggregateRootWithEventsCurrentVersion(respondTo: ActorRef, eventTypes: Set[String])
case class GetAggregateRootWithEventsExactVersion(respondTo: ActorRef, version: AggregateVersion, eventTypes: Set[String])
case class IdempotentCommandInfo(command: Any, response: CustomCommandResponse[_])
case class PersistEvents[AGGREGATE_ROOT](respondTo: ActorRef,
commandId: CommandId,
userId: UserId,
expectedVersion: AggregateVersion,
timestamp: Instant,
events: Seq[Event[AGGREGATE_ROOT]],
commandInfo: Option[IdempotentCommandInfo])
case class OverrideAndPersistEvents[AGGREGATE_ROOT](rewriteEvents: Iterable[EventWithVersion[AGGREGATE_ROOT]],
persist: PersistEvents[AGGREGATE_ROOT])
case class EventsPersisted[AGGREGATE_ROOT](events: Seq[IdentifiableEvent[AGGREGATE_ROOT]])
case object ResendPersistedMessages
case class AggregateWithSelectedEvents[AGGREGATE_ROOT](aggregate: Aggregate[AGGREGATE_ROOT], events: Iterable[EventWithVersion[AGGREGATE_ROOT]])
}
case class DelayedMinVersionQuery(respondTo: ActorRef, requestedVersion: AggregateVersion, untilTimestamp: Long)
class AggregateRepositoryActor[AGGREGATE_ROOT:ClassTag:TypeTag](aggregateId: AggregateId,
eventStore: EventStoreState,
commandResponseState: CommandResponseState,
eventsBus: ActorRef,
eventHandlers: (UserId, Instant, AGGREGATE_ROOT) => PartialFunction[Any, AGGREGATE_ROOT],
initialState: () => AGGREGATE_ROOT,
singleReadForVersionOnly: Option[AggregateVersion],
eventsVersionsMap: Map[EventTypeVersion, String],
eventsVersionsMapReverse: Map[String, EventTypeVersion]) extends Actor with ActorLogging {
import AggregateRepositoryActor._
private var version: AggregateVersion = AggregateVersion.ZERO
private var aggregateRoot: AGGREGATE_ROOT = initialState()
private val aggregateType = AggregateType(classTag[AGGREGATE_ROOT].toString)
private var eventsToPublish = List[IdentifiableEventNoAggregateType[AGGREGATE_ROOT]]()
private var pendingPublish = List[EventWithIdentifier[AGGREGATE_ROOT]]()
private var delayedQueries = List[DelayedMinVersionQuery]()
private def assureRestoredState(): Unit = {
//TODO make it future
version = AggregateVersion.ZERO
aggregateRoot = initialState()
eventStore.readAndProcessEvents[AGGREGATE_ROOT](eventsVersionsMap, aggregateId, singleReadForVersionOnly)(handleEvent)
if(singleReadForVersionOnly.isEmpty) {
eventsToPublish = eventStore.readEventsToPublishForAggregate[AGGREGATE_ROOT](eventsVersionsMap, aggregateId)
context.system.scheduler.scheduleOnce(10.seconds, self, ResendPersistedMessages)(context.dispatcher)
}
}
private def resendEventsToPublish(): Unit = {
if(eventsToPublish.nonEmpty) {
log.info("Resending messages for " + aggregateType+" "+aggregateId+" " + eventsToPublish.map(e => e.event.getClass.getSimpleName+" "+e.version))
eventsBus ! PublishEvents(aggregateType, eventsToPublish.map(e => EventInfo(e.version, e.event, e.userId, e.timestamp)), aggregateId, Option(aggregateRoot))
pendingPublish = (eventsToPublish.map(e => EventWithIdentifier[AGGREGATE_ROOT](e.aggregateId, e.version, e.event)) ::: pendingPublish).distinct
context.system.scheduler.scheduleOnce(60.seconds, self, ResendPersistedMessages)(context.dispatcher)
}
}
assureRestoredState()
override def preStart(): Unit = {
// empty
}
override def postRestart(reason: Throwable) {
// do not call preStart
}
override def receive = logReceive {
case ee: PersistEvents[_] => handlePersistEvents(ee)
case ee: OverrideAndPersistEvents[_] => handleOverrideAndPersistEvents(ee)
case ep: EventsPersisted[_] => handleEventsPersisted(ep)
case GetAggregateRootCurrentVersion(respondTo) => receiveReturnAggregateRoot(respondTo, None)
case GetAggregateRootCurrentMinVersion(respondTo, version, durationMillis) => receiveReturnAggregateRootMinVersion(respondTo, version, System.currentTimeMillis() + durationMillis)
case GetAggregateRootExactVersion(respondTo, version) => receiveReturnAggregateRoot(respondTo, Some(version)) // for following command
case GetAggregateRootWithEventsCurrentVersion(respondTo, eventTypes) => receiveReturnAggregateRootWithEvents(respondTo, None, eventTypes) // for following command
case GetAggregateRootWithEventsExactVersion(respondTo, version, eventTypes) => receiveReturnAggregateRootWithEvents(respondTo, Some(version), eventTypes) // for following command
case PublishEventsAck(aggId, versions) => markPublishedEvents(aggregateId, versions)
case ResendPersistedMessages => resendEventsToPublish()
}
private def handleEventsPersisted(ep: EventsPersisted[_]): Unit = {
val events = ep.asInstanceOf[EventsPersisted[AGGREGATE_ROOT]].events
if (events.exists(_.event.isInstanceOf[UndoEvent[_]]) ||
events.exists(_.event.isInstanceOf[DuplicationEvent[_]])) {
// In case of those events it's easier to re read past events
assureRestoredState()
} else {
events.foreach(eventIdentifier => handleEvent(eventIdentifier.userId, eventIdentifier.timestamp, eventIdentifier.event, aggregateId, eventIdentifier.version.asInt, noopEvent = false))
}
eventsBus ! PublishEvents(aggregateType, events.map(e => EventInfo(e.version, e.event, e.userId, e.timestamp)), aggregateId, Option(aggregateRoot))
pendingPublish = (events.map(e => EventWithIdentifier[AGGREGATE_ROOT](e.aggregateId, e.version, e.event)).toList ::: pendingPublish).distinct
replayDelayedQueries()
}
private def handleOverrideAndPersistEvents(ee: OverrideAndPersistEvents[_]): Unit = {
tryToApplyEvents(ee.persist) match {
case s: Right[_, _] => overrideAndPersistEvents(ee.asInstanceOf[OverrideAndPersistEvents[AGGREGATE_ROOT]])
case Left((exception, event)) =>
val errorId = RandomUtil.generateRandomString(16)
ee.persist.respondTo ! EventHandlingError(event.getClass.getSimpleName, errorId, ee.persist.commandId)
log.error(exception, "Error handling event, errorId: [" + errorId+"]")
}
}
private def handlePersistEvents(ee: PersistEvents[_]): Unit = {
tryToApplyEvents(ee) match {
case s: Right[_, _] => persistEvents(ee.asInstanceOf[PersistEvents[AGGREGATE_ROOT]])
case Left((exception, event)) =>
val errorId = RandomUtil.generateRandomString(16)
ee.respondTo ! EventHandlingError(event.getClass.getSimpleName, errorId, ee.commandId)
log.error(exception, "Error handling event, errorId: [" + errorId+"]")
}
}
private def tryToApplyEvents(ee: PersistEvents[_]) = {
ee.asInstanceOf[PersistEvents[AGGREGATE_ROOT]].events.foldLeft(Right(aggregateRoot).asInstanceOf[Either[(Exception, Event[AGGREGATE_ROOT]), AGGREGATE_ROOT]])((aggEither, event) => {
aggEither match {
case Right(agg) => tryToHandleEvent(ee.userId, ee.timestamp, event, noopEvent = false, agg)
case f: Left[_, _] => f
}
})
}
private def overrideAndPersistEvents(eventsEnvelope: OverrideAndPersistEvents[AGGREGATE_ROOT]): Unit = {
if (eventsEnvelope.persist.expectedVersion == version) {
persist(eventsEnvelope.persist, eventsEnvelope.rewriteEvents)(respond(eventsEnvelope.persist.respondTo))
// println("AggregateRepository persisted events for expected version " + eventsEnvelope.expectedVersion)
} else {
eventsEnvelope.persist.respondTo ! AggregateConcurrentModificationError(aggregateId, aggregateType, eventsEnvelope.persist.expectedVersion, version)
// println("AggregateRepository AggregateConcurrentModificationError expected " + eventsEnvelope.expectedVersion.asInt + " but i have " + version.asInt)
}
}
private def persistEvents(eventsEnvelope: PersistEvents[AGGREGATE_ROOT]): Unit = {
if (eventsEnvelope.expectedVersion == version) {
persist(eventsEnvelope, Seq.empty)(respond(eventsEnvelope.respondTo))
// println("AggregateRepository persisted events for expected version " + eventsEnvelope.expectedVersion)
} else {
eventsEnvelope.respondTo ! AggregateConcurrentModificationError(aggregateId, aggregateType, eventsEnvelope.expectedVersion, version)
// println("AggregateRepository AggregateConcurrentModificationError expected " + eventsEnvelope.expectedVersion.asInt + " but i have " + version.asInt)
}
}
private def receiveReturnAggregateRoot(respondTo: ActorRef, requestedVersion: Option[AggregateVersion]): Unit = {
if(version == AggregateVersion.ZERO) {
respondTo ! Failure(new NoEventsForAggregateException(aggregateId, aggregateType))
} else {
// println("RepositoryActor "+this.toString+" Someone requested aggregate " + aggregateId.asLong + " of version " + requestedVersion.map(_.asInt.toString).getOrElse("None") + " and now I have version " + version.asInt)
requestedVersion match {
case Some(v) if v != version => respondTo ! Failure(new AggregateInIncorrectVersionException(aggregateId, aggregateType, version, v))
case _ => respondTo ! Success(Aggregate[AGGREGATE_ROOT](aggregateId, version, Some(aggregateRoot)))
}
}
if(singleReadForVersionOnly.isDefined) {
self ! PoisonPill
}
}
private def replayDelayedQueries(): Unit = {
if(delayedQueries.nonEmpty) {
val now = System.currentTimeMillis()
val queries = delayedQueries
delayedQueries = List.empty
queries.foreach(q =>
if(q.untilTimestamp > now) {
receiveReturnAggregateRootMinVersion(q.respondTo, q.requestedVersion, q.untilTimestamp)
})
}
}
private def receiveReturnAggregateRootMinVersion(respondTo: ActorRef, requestedVersion: AggregateVersion, timeoutTimestamp: Long): Unit = {
if(version >= requestedVersion) {
respondTo ! Success(Aggregate[AGGREGATE_ROOT](aggregateId, version, Some(aggregateRoot)))
} else {
val now = System.currentTimeMillis()
delayedQueries = DelayedMinVersionQuery(respondTo, requestedVersion, timeoutTimestamp) :: delayedQueries.filter(_.untilTimestamp > now)
}
}
private def receiveReturnAggregateRootWithEvents(respondTo: ActorRef, requestedVersion: Option[AggregateVersion], eventTypes: Set[String]): Unit = {
if(version == AggregateVersion.ZERO) {
respondTo ! Failure(new NoEventsForAggregateException(aggregateId, aggregateType))
} else {
// println("RepositoryActor "+this.toString+" Someone requested aggregate " + aggregateId.asLong + " of version " + requestedVersion.map(_.asInt.toString).getOrElse("None") + " and now I have version " + version.asInt)
requestedVersion match {
case Some(v) if v != version => respondTo ! Failure(new AggregateInIncorrectVersionException(aggregateId, aggregateType, version, v))
case _ => {
var events: List[EventWithVersion[AGGREGATE_ROOT]] = List.empty
eventStore.readAndProcessEvents[AGGREGATE_ROOT](eventsVersionsMap, aggregateId, singleReadForVersionOnly)((userId: UserId, timestamp: Instant, event: Event[AGGREGATE_ROOT], aggId: AggregateId, eventVersion: Int, noopEvent: Boolean) => {
if(eventTypes.contains(event.getClass.getName)) {
events ::= EventWithVersion(AggregateVersion(eventVersion), event)
}
})
respondTo ! Success(AggregateWithSelectedEvents[AGGREGATE_ROOT](Aggregate[AGGREGATE_ROOT](aggregateId, version, Some(aggregateRoot)), events.reverse))
}
}
}
if(singleReadForVersionOnly.isDefined) {
self ! PoisonPill
}
}
private def persist(eventsEnvelope: PersistEvents[AGGREGATE_ROOT], overwrite: Iterable[EventWithVersion[AGGREGATE_ROOT]])(afterPersist: Seq[Event[AGGREGATE_ROOT]] => Unit): Unit = {
//Future { FIXME this future can broke order in which events are stored
val eventsWithVersionsTry = eventStore.localTx {implicit session =>
if(overwrite.nonEmpty) {
eventStore.overwriteEvents(aggregateId, overwrite)
}
eventStore.persistEvents(eventsVersionsMapReverse, aggregateId, eventsEnvelope.asInstanceOf[PersistEvents[AnyRef]]) match {
case Failure(exception) => Failure(exception)
case Success(eventsWithVersions) =>
persistIdempotentCommandResponse(eventsEnvelope.commandInfo)
Success(eventsWithVersions)
}
}
eventsWithVersionsTry match {
case Failure(exception) => exception match {
case e: PSQLException if e.getLocalizedMessage.contains("Concurrent aggregate modification exception") => eventsEnvelope.respondTo ! AggregateConcurrentModificationError(aggregateId, aggregateType, eventsEnvelope.expectedVersion, version)
case e =>
val errorId = RandomUtil.generateRandomString(16)
eventsEnvelope.respondTo ! EventHandlingError(eventsEnvelope.events.head.getClass.getSimpleName, errorId, eventsEnvelope.commandId)
log.error(exception, "Error handling event, errorId: [" + errorId+"]")
}
case Success(eventsWithVersions) =>
var mappedEvents = 0
self ! EventsPersisted(eventsWithVersions.map { case (event, eventVersion) =>
mappedEvents += 1
IdentifiableEvent(AggregateType(event.aggregateRootType.toString), aggregateId, eventVersion, event, eventsEnvelope.userId, eventsEnvelope.timestamp)
})
afterPersist(eventsEnvelope.events)
}
// } onFailure {
// case e: Exception => throw new IllegalStateException(e)
// }
}
private def persistIdempotentCommandResponse(commandInfo: Option[IdempotentCommandInfo])(implicit session: DBSession): Unit = {
commandInfo match {
case Some(ci) =>
ci.command match {
case idm: IdempotentCommand[_] if idm.idempotencyId.isDefined =>
val key = idm.idempotencyId.get.asDbKey
commandResponseState.storeResponse(key, ci.response)
case _ => ()
}
case None => ()
}
}
private def respond(respondTo: ActorRef)(events: Seq[Event[AGGREGATE_ROOT]]): Unit = {
respondTo ! CommandExecutorActor.AggregateModified
}
private def tryToHandleEvent(userId: UserId, timestamp: Instant, event: Event[AGGREGATE_ROOT], noopEvent: Boolean, tmpAggregateRoot: AGGREGATE_ROOT): Either[(Exception, Event[AGGREGATE_ROOT]), AGGREGATE_ROOT] = {
if(!noopEvent) {
try {
Right(eventHandlers(userId, timestamp, tmpAggregateRoot)(event))
} catch {
case e: Exception =>
log.error(e, "Error while handling event tryout : " + event +", aggregateRoot: " + aggregateRoot)
Left((e, event))
}
} else {
Right(tmpAggregateRoot)
}
}
private def handleEvent(userId: UserId, timestamp: Instant, event: Event[AGGREGATE_ROOT], aggId: AggregateId, eventVersion: Int, noopEvent: Boolean): Unit = {
if(!noopEvent) {
try {
aggregateRoot = eventHandlers(userId, timestamp, aggregateRoot)(event)
} catch {
case e: Exception =>
log.error(e, "Error while handling event: " + event +", aggregateRoot: " + aggregateRoot)
throw e;
}
}
if(aggregateId == aggId) { // otherwise it's event from base aggregate we don't want to count
version = version.increment
}
}
def markPublishedEvents(aggregateId: AggregateId, versions: Seq[AggregateVersion]): Unit = {
import context.dispatcher
eventsToPublish = eventsToPublish.filterNot(e => e.aggregateId == aggregateId && versions.contains(e.version))
val (published, remaining) = pendingPublish.partition(e => e.aggregateId == aggregateId && versions.contains(e.version))
pendingPublish = remaining
Future { // Fire and forget
eventStore.deletePublishedEventsToPublish(published.map(v => EventWithIdentifier(aggregateId, v.version, v.event)))
if(published.exists(_.event.isInstanceOf[PermanentDeleteEvent[_]])) {
self ! PoisonPill
}
} onFailure {
case e: Exception => throw new IllegalStateException(e)
}
}
}
|
marpiec/ReactiveCQRS
|
core/src/main/scala/io/reactivecqrs/core/aggregaterepository/AggregateRepositoryActor.scala
|
Scala
|
apache-2.0
| 17,903 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe
import wvlet.log.LogFormatter.PlainSourceCodeLogFormatter
import wvlet.log.Logger
/**
*/
package object log {
/**
* Initialize the logger to the default log level and default handler
*/
def init: Unit = {
Logger.init
}
/**
* Initialize the logger without using ANSI coloring
*/
def initNoColor: Unit = {
Logger.init
Logger.setDefaultFormatter(PlainSourceCodeLogFormatter)
}
}
|
wvlet/airframe
|
airframe-log/src/main/scala/wvlet/airframe/log/package.scala
|
Scala
|
apache-2.0
| 1,005 |
package hasheq
package immutable
import scala.annotation.tailrec
/**
* Note that each element insertion takes O(n) time, which means that creating a list map with
* n elements will take O(n^2^) time. This makes the builder suitable only for a small number of
* elements.
*
*/
private[hasheq] object ListMap {
def empty[A, B]: ListMap[A, B] = EmptyListMap.asInstanceOf[ListMap[A, B]]
private object EmptyListMap extends ListMap[Any, Nothing]
}
/**
* This class implements immutable maps using a list-based data structure. List map iterators and
* traversal methods visit key-value pairs in the order whey were first inserted.
*
* Entries are stored internally in reversed insertion order, which means the newest key is at the
* head of the list. As such, methods such as `head` and `tail` are O(n), while `last` and `init`
* are O(1). Other operations, such as inserting or removing entries, are also O(n), which makes
* this collection suitable only for a small number of elements.
*
* Instances of `ListMap` represent empty maps; they can be either created by calling the
* constructor directly, or by applying the function `ListMap.empty`.
*
* @tparam A the type of the keys contained in this list map
* @tparam B the type of the values associated with the keys
*/
private[hasheq] sealed class ListMap[A, +B] extends Iterable[(A, B)] {
override def size: Int = 0
override def isEmpty: Boolean = true
override def head: (A, B) = throw new NoSuchElementException("head of empty map")
def get(key: A)(implicit A: Equal[A]): Option[B] = None
def contains(key: A)(implicit A: Equal[A]): Boolean = get(key).isDefined
def apply(key: A)(implicit A: Equal[A]): B = get(key).get
def updated[B1 >: B](key: A, value: B1)(implicit A: Equal[A]): ListMap[A, B1] = new Node[B1](key, value)
def +[B1 >: B](kv: (A, B1))(implicit A: Equal[A]): ListMap[A, B1] = new Node[B1](kv._1, kv._2)
def -(key: A)(implicit A: Equal[A]): ListMap[A, B] = this
def ++[B1 >: B](xs: Iterable[(A, B1)])(implicit A: Equal[A]): ListMap[A, B1] =
if (xs.isEmpty) this
else xs.foldLeft(this: ListMap[A, B1])(_ + _)
def iterator: Iterator[(A, B)] = {
def reverseList = {
var curr: ListMap[A, B] = this
var res: List[(A, B)] = Nil
while (!curr.isEmpty) {
res = (curr.key, curr.value) :: res
curr = curr.next
}
res
}
reverseList.iterator
}
override def filter(p: ((A, B)) => Boolean): ListMap[A, B] = {
@tailrec def go(cur: ListMap[A, B], acc: ListMap[A, B]): ListMap[A, B] =
if(cur.isEmpty) acc
else if(p((cur.key, cur.value))) go(cur.next, new acc.Node(cur.key, cur.value))
else go(cur.next, acc)
go(this, ListMap.empty)
}
override def filterNot(p: ((A, B)) => Boolean): ListMap[A, B] = {
filter(p andThen (!_))
}
override def splitAt(n: Int): (ListMap[A, B], ListMap[A, B]) = {
@tailrec def take(cur: ListMap[A, B], n: Int, acc: ListMap[A, B]): (ListMap[A, B], ListMap[A, B]) =
if(n > 0 && !cur.isEmpty) take(cur.next, n-1, new acc.Node(cur.key, cur.value))
else (acc, cur)
take(this, n, ListMap.empty)
}
protected def key: A = throw new NoSuchElementException("key of empty map")
protected def value: B = throw new NoSuchElementException("value of empty map")
protected def next: ListMap[A, B] = throw new NoSuchElementException("next of empty map")
override def stringPrefix = "ListMap"
/**
* Represents an entry in the `ListMap`.
*/
protected class Node[B1 >: B](override protected val key: A,
override protected val value: B1) extends ListMap[A, B1] with Serializable {
override def size: Int = sizeInternal(this, 0)
@tailrec private[this] def sizeInternal(cur: ListMap[A, B1], acc: Int): Int =
if (cur.isEmpty) acc
else sizeInternal(cur.next, acc + 1)
override def isEmpty: Boolean = false
override def apply(k: A)(implicit A: Equal[A]): B1 = applyInternal(this, k)
override def head: (A, B1) = (key, value)
@tailrec private[this] def applyInternal(cur: ListMap[A, B1], k: A)(implicit A: Equal[A]): B1 =
if (cur.isEmpty) throw new NoSuchElementException("key not found: " + k)
else if (A.equiv(k, cur.key)) cur.value
else applyInternal(cur.next, k)
override def get(k: A)(implicit A: Equal[A]): Option[B1] = getInternal(this, k)
@tailrec private[this] def getInternal(cur: ListMap[A, B1], k: A)(implicit A: Equal[A]): Option[B1] =
if (cur.isEmpty) None
else if (A.equiv(k, cur.key)) Some(cur.value)
else getInternal(cur.next, k)
override def contains(k: A)(implicit A: Equal[A]): Boolean = containsInternal(this, k)
@tailrec private[this] def containsInternal(cur: ListMap[A, B1], k: A)(implicit A: Equal[A]): Boolean =
if(cur.isEmpty) false
else if (A.equiv(k, cur.key)) true
else containsInternal(cur.next, k)
override def updated[B2 >: B1](k: A, v: B2)(implicit A: Equal[A]): ListMap[A, B2] = {
val m = this - k
new m.Node[B2](k, v)
}
override def +[B2 >: B1](kv: (A, B2))(implicit A: Equal[A]): ListMap[A, B2] = {
val m = this - kv._1
new m.Node[B2](kv._1, kv._2)
}
override def -(k: A)(implicit A: Equal[A]): ListMap[A, B1] = removeInternal(k, this, Nil)
@tailrec private[this] def removeInternal(k: A, cur: ListMap[A, B1], acc: List[ListMap[A, B1]])(implicit A: Equal[A]): ListMap[A, B1] =
if (cur.isEmpty) acc.last
else if (A.equiv(k, cur.key)) (cur.next /: acc) { case (t, h) => new t.Node(h.key, h.value) }
else removeInternal(k, cur.next, cur :: acc)
override protected def next: ListMap[A, B1] = ListMap.this
}
}
|
TomasMikula/hasheq
|
src/main/scala/hasheq/immutable/ListMap.scala
|
Scala
|
bsd-3-clause
| 5,749 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.submission
import org.orbeon.oxf.xforms.event.events.XFormsSubmitErrorEvent
import org.orbeon.saxon.om.NodeInfo
import org.orbeon.oxf.xforms.model.DataModel
import org.orbeon.oxf.xforms.action.XFormsActions
import org.orbeon.oxf.xforms.XFormsContainingDocument
import org.orbeon.oxf.util.{ConnectionResult, XPathCache}
import org.orbeon.oxf.xforms.model.DataModel._
/**
* Handle replace="text".
*/
class TextReplacer(submission: XFormsModelSubmission, containingDocument: XFormsContainingDocument) extends BaseReplacer(submission, containingDocument) {
private var responseBody: String = _
def deserialize(connectionResult: ConnectionResult, p: XFormsModelSubmission#SubmissionParameters, p2: XFormsModelSubmission#SecondPassParameters) =
Option(connectionResult.getTextResponseBody) match {
case Some(responseBody) β
this.responseBody = responseBody
case None β
// This is a binary result
// Don't store anything for now as per the spec, but we could do something better by going beyond the spec
// NetUtils.inputStreamToAnyURI(pipelineContext, connectionResult.resultInputStream, NetUtils.SESSION_SCOPE);
// XForms 1.1: "For a success response including a body that is both a non-XML media type (i.e. with a
// content type not matching any of the specifiers in [RFC 3023]) and a non-text type (i.e. with a content
// type not matching text/*), when the value of the replace attribute on element submission is "text",
// nothing in the document is replaced and submission processing concludes after dispatching
// xforms-submit-error with appropriate context information, including an error-type of resource-error."
val message = """Mediatype is neither text nor XML for replace="text": """ + connectionResult.getResponseMediaType
throw new XFormsSubmissionException(submission, message, "reading response body",
new XFormsSubmitErrorEvent(submission, XFormsSubmitErrorEvent.RESOURCE_ERROR, connectionResult))
}
def replace(connectionResult: ConnectionResult, p: XFormsModelSubmission#SubmissionParameters, p2: XFormsModelSubmission#SecondPassParameters) = {
// XForms 1.1: "If the replace attribute contains the value "text" and the submission response conforms to an
// XML mediatype (as defined by the content type specifiers in [RFC 3023]) or a text media type (as defined by
// a content type specifier of text/*), then the response data is encoded as text and replaces the content of
// the replacement target node."
// XForms 1.1: "If the processing of the targetref attribute fails, then submission processing ends after
// dispatching the event xforms-submit-error with an error-type of target-error."
def throwSubmissionException(message: String) =
throw new XFormsSubmissionException(submission, message, "processing targetref attribute",
new XFormsSubmitErrorEvent(submission, XFormsSubmitErrorEvent.TARGET_ERROR, connectionResult))
// Find target location
val destinationNodeInfo =
if (submission.getTargetref ne null) {
// Evaluate destination node
XPathCache.evaluateSingle(p.xpathContext, p.refNodeInfo, submission.getTargetref, containingDocument.getRequestStats.addXPathStat) match {
case nodeInfo: NodeInfo β nodeInfo
case _ β throwSubmissionException("""targetref attribute doesn't point to a node for replace="text".""")
}
} else {
// Use default destination
submission.findReplaceInstanceNoTargetref(p.refInstance).rootElement
}
def handleSetValueSuccess(oldValue: String) =
DataModel.logAndNotifyValueChange(containingDocument,
"submission", destinationNodeInfo, oldValue, responseBody, isCalculate = false)(containingDocument.getIndentedLogger(XFormsActions.LOGGING_CATEGORY))
def handleSetValueError(reason: Reason) =
throwSubmissionException(
reason match {
case DisallowedNodeReason β """targetref attribute doesn't point to an element without children or to an attribute for replace="text"."""
case ReadonlyNodeReason β """targetref attribute points to a readonly node for replace="text"."""
}
)
// Set value into the instance
// NOTE: Here we decided to use the actions logger, by compatibility with xf:setvalue. Anything we would like to log in "submission" mode?
DataModel.setValueIfChanged(destinationNodeInfo, responseBody, handleSetValueSuccess, handleSetValueError)
// Dispatch xforms-submit-done
submission.sendSubmitDone(connectionResult)
}
}
|
evlist/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/submission/TextReplacer.scala
|
Scala
|
lgpl-2.1
| 5,686 |
object P17 {
def split[A](n:Int, ls:List[A]):(List[A], List[A]) = {
def helper(i:Int, front:List[A], end:List[A]):(List[A], List[A]) = (i, end) match {
case (_, Nil) => (front.reverse, Nil)
case (0, _) => (front.reverse, end)
case (_, h::tail) => helper(i-1, h::front, tail)
}
helper(n, Nil, ls)
}
}
|
liefswanson/S-99
|
src/main/scala/17.scala
|
Scala
|
gpl-2.0
| 345 |
package com.danielasfregola.twitter4s.http.clients.rest.statuses.parameters
import com.danielasfregola.twitter4s.http.marshalling.Parameters
private[twitter4s] final case class RetweetersIdsParameters(id: Long, count: Int, cursor: Long, stringify_ids: Boolean)
extends Parameters
|
DanielaSfregola/twitter4s
|
src/main/scala/com/danielasfregola/twitter4s/http/clients/rest/statuses/parameters/RetweetersIdsParameters.scala
|
Scala
|
apache-2.0
| 286 |
import language.higherKinds
trait Foldable[F[_]] {
def foldMap[A,B:Monoid](foldable: F[A])(f: A => B): B
}
object Foldable {
def foldMap[A,B:Monoid,F[A]:Foldable](fa: F[A])(g: A => B) =
implicitly[Foldable[F]].foldMap(fa)(g)
def fold[A:Monoid,F[A]:Foldable](f: F[A]): A = ???
def toList[A,F[A]:Foldable](f: F[A]): List[A] = ???
def size[A,F[A]:Foldable](f: F[A]): Int = ???
}
|
grzegorzbalcerek/scala-exercises
|
Foldable/Foldable.scala
|
Scala
|
bsd-2-clause
| 390 |
package repositories.storage.dao.events
import no.uio.musit.models.{EventId, MuseumId}
import no.uio.musit.test.MusitSpecWithAppPerSuite
import no.uio.musit.test.matchers.MusitResultValues
import utils.testhelpers.{BaseDummyData, EventGenerators}
class EnvReqDaoSpec
extends MusitSpecWithAppPerSuite
with BaseDummyData
with EventGenerators
with MusitResultValues {
val dao = fromInstanceCache[EnvReqDao]
"The EnvReqDao" when {
"working with environment requirements" should {
"successfully insert a new environment requirement" in {
val mid = MuseumId(2)
val er = createEnvRequirement(Some(defaultNodeId))
dao.insert(mid, er).futureValue.successValue mustBe EventId(1L)
}
"return the environment requirement associated with the provided id" in {
val mid = MuseumId(2)
val er = createEnvRequirement(Some(defaultNodeId))
val eid = dao.insert(mid, er).futureValue.successValue
eid mustBe EventId(2L)
val res = dao.findById(mid, eid).futureValue.successValue.value
res.eventType mustBe er.eventType
res.note mustBe er.note
res.registeredBy mustBe Some(defaultActorId)
res.registeredDate must not be None
res.light mustBe er.light
res.temperature mustBe er.temperature
res.hypoxicAir mustBe er.hypoxicAir
res.airHumidity mustBe er.airHumidity
res.cleaning mustBe er.cleaning
}
}
}
}
|
kpmeen/musit
|
service_storagefacility/test/repositories/storage/dao/events/EnvReqDaoSpec.scala
|
Scala
|
gpl-2.0
| 1,481 |
// basic while loop
def whileLoop {
var i = 1
while(i <= 3) {
println("while: " + i)
i += 1
}
println();
}
whileLoop
// basic for loop
def forLoop {
for(i <- 0 until 3) {
println("for: " + i)
}
println();
}
forLoop
// ruby ruby ruby ruby
def rubyStyleForLoop {
val array = Array(1, 2, 3)
array.foreach { i =>
println("arrow: " + i)
}
println();
}
rubyStyleForLoop
|
Mastermindzh/Seven-Languages-in-Seven-Weeks
|
Scala/Day 1/loops.scala
|
Scala
|
mit
| 408 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.template
import io.gatling.charts.component.Component
import io.gatling.commons.shared.unstable.model.stats.Group
private[charts] class RequestDetailsPageTemplate(title: String, requestName: String, group: Option[Group], components: Component*)
extends PageTemplate(title, true, Some(requestName), group, components: _*)
|
gatling/gatling
|
gatling-charts/src/main/scala/io/gatling/charts/template/RequestDetailsPageTemplate.scala
|
Scala
|
apache-2.0
| 972 |
package com.trainologic.samples.petclinic.service
import com.trainologic.samples.petclinic._
import repository.MapBasedReadOnlyOwnerRepository
import cats.Id
import cats.data.Xor
import cats.data.Reader
import model.Owner
import org.atnos.eff.all._
import org.atnos.eff.syntax.all._
import repository.OwnerRepository
import org.atnos.eff._
import web.OwnerController
import cats.syntax.applicative._
import cats.syntax.apply._
import cats.Applicative
import cats.implicits._
object OwnerServicePure extends App {
def test1 = {
val owners: Map[Int, Owner] = Map(
1 -> Owner(Some(1), "john", "Davis", "TA", "TA", "0000"),
2 -> Owner(Some(2), "john2", "Davis", "TA", "TA", "0000"),
3 -> Owner(Some(3), "john3", "Bavis", "TA", "TA", "0000"))
val service1 = new ClinicServiceImpl[Id]
val check1 = for {
owners <- service1.findOwnerByLastName("Davis")
} yield owners.size == 2
val simpleRepo = MapBasedReadOnlyOwnerRepository(owners)
val result = runReader(simpleRepo)(check1).runXor.runNel.runPure
println(result)
/*
val theApplicative: Applicative[Eff[Fx3[Task, String Xor ?, Validate[String, ?]], ?]] = implicitly
for now it crashes compiler after moving to cats
val theProg = for {
lb <- theApplicative.replicateA(10, runReader(simpleRepo)(check1))
lb2 <- theApplicative.replicateA(10, runReader(h2Repo)(prog3))
lids <- theApplicative.replicateA(10, runReader(h2Repo)(prog2))
} yield lb ++ lb2 ++ lids
val results = awaitTask(theProg)(20 seconds).runDisjunction.runNel.run
println(results)*/
}
test1
}
|
Trainologic/petclinic_eff
|
src/test/scala/com/trainologic/samples/petclinic/service/OwnerTestPure.scala
|
Scala
|
apache-2.0
| 1,620 |
package nars.language
import java.util._
import nars.io.Symbols
import nars.storage.Memory
import SetExt._
//remove if not needed
import scala.collection.JavaConversions._
import CompoundTerm._
object SetExt {
/**
* Try to make a new set from one component. Called by the inference rules.
* @param t The compoment
* @param memory Reference to the memeory
* @return A compound generated or a term it reduced to
*/
def make(t: Term, memory: Memory): Term = {
val set = new TreeSet[Term]()
set.add(t)
make(set, memory)
}
/**
* Try to make a new SetExt. Called by StringParser.
* @return the Term generated from the arguments
* @param argList The list of components
* @param memory Reference to the memeory
*/
def make(argList: ArrayList[Term], memory: Memory): Term = {
val set = new TreeSet[Term](argList)
make(set, memory)
}
/**
* Try to make a new compound from a set of components. Called by the public make methods.
* @param set a set of Term as compoments
* @param memory Reference to the memeory
* @return the Term generated from the arguments
*/
def make(set: TreeSet[Term], memory: Memory): Term = {
if (set.isEmpty) {
return null
}
val argument = new ArrayList[Term](set)
val name = makeSetName(Symbols.SET_EXT_OPENER, argument, Symbols.SET_EXT_CLOSER)
val t = memory.nameToListedTerm(name)
if ((t != null)) t else new SetExt(argument)
}
}
/**
* An extensionally defined set, which contains one or more instances.
*/
class SetExt private (arg: ArrayList[Term]) extends CompoundTerm(arg) {
/**
* Constructor with full values, called by clone
* @param n The name of the term
* @param cs Component list
* @param open Open variable list
* @param i Syntactic complexity of the compound
*/
private def this(n: String,
cs: ArrayList[Term],
con: Boolean,
i: Short) {
// super(n, cs, con, i)
this(cs)
setName(n)
this.isConstant_ = con
this.complexity = i
}
/**
* Clone a SetExt
* @return A new object, to be casted into a SetExt
*/
override def clone(): AnyRef = {
new SetExt(name, cloneList(components).asInstanceOf[ArrayList[Term]], isConstant_, complexity)
}
/**
* Get the operator of the term.
* @return the operator of the term
*/
def operator(): String = "" + Symbols.SET_EXT_OPENER
/**
* Check if the compound is communitative.
* @return true for communitative
*/
override def isCommutative(): Boolean = true
/**
* Make a String representation of the set, override the default.
* @return true for communitative
*/
override def makeName(): String = {
makeSetName(Symbols.SET_EXT_OPENER, components, Symbols.SET_EXT_CLOSER)
}
}
|
automenta/opennars
|
nars_scala/src/main/scala/nars/language/SetExt.scala
|
Scala
|
gpl-2.0
| 2,785 |
package com.streamsend.migration
import java.io.File
import org.gradle.api.tasks.{Input, InputDirectory, InputFile}
import org.gradle.api.internal.ConventionTask
abstract class CassandraTask extends ConventionTask {
var configurationFile: File = _
var migrationDirectory: File = _
var migrationName: String = _
var environment: String = _
@InputFile
def getConfigurationFile = configurationFile
def setConfigurationFile(source: File) {
configurationFile = source
}
@InputDirectory
def getMigrationDirectory: File = migrationDirectory
def setMigrationDirectory(migrationDirectory: File){
this.migrationDirectory = migrationDirectory
}
@Input
def getMigrationName = migrationName
def setMigrationName(name: String) {
this.migrationName = name
}
@Input
def getEnvironment = environment
def setEnvironment(environment: String) {
this.environment = environment
}
}
|
iamsteveholmes/cassandra-migration-gradle-plugin
|
src/main/scala/com/streamsend/migration/CassandraTask.scala
|
Scala
|
mit
| 983 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnetexamples.cnntextclassification
import java.io.File
import java.net.URL
import org.apache.commons.io.FileUtils
import org.apache.mxnet.Context
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import org.slf4j.LoggerFactory
import scala.sys.process.Process
/**
* Integration test for imageClassifier example.
* This will run as a part of "make scalatest"
*/
class CNNClassifierExampleSuite extends FunSuite with BeforeAndAfterAll {
private val logger = LoggerFactory.getLogger(classOf[CNNClassifierExampleSuite])
test("Example CI - CNN Example") {
if (System.getenv().containsKey("SCALA_TEST_ON_GPU") &&
System.getenv("SCALA_TEST_ON_GPU").toInt == 1) {
val context = Context.gpu()
val tempDirPath = System.getProperty("java.io.tmpdir")
val w2vModelName = "GoogleNews-vectors-negative300-SLIM.bin"
logger.info("tempDirPath: %s".format(tempDirPath))
logger.info("Downloading CNN text...")
val baseUrl = "https://s3.us-east-2.amazonaws.com/mxnet-scala"
var tmpFile = new File(tempDirPath + "/CNN/rt-polarity.pos")
if (!tmpFile.exists()) {
FileUtils.copyURLToFile(new URL(baseUrl + "/scala-example-ci/CNN/rt-polarity.pos"),
tmpFile)
}
tmpFile = new File(tempDirPath + "/CNN/rt-polarity.neg")
if (!tmpFile.exists()) {
FileUtils.copyURLToFile(new URL(baseUrl + "/scala-example-ci/CNN/rt-polarity.neg"),
tmpFile)
}
logger.info("Downloading pretrianed Word2Vec Model, may take a while")
tmpFile = new File(tempDirPath + "/CNN/" + w2vModelName)
if (!tmpFile.exists()) {
FileUtils.copyURLToFile(new URL(baseUrl + "/scala-example-ci/CNN/" + w2vModelName),
tmpFile)
}
val modelDirPath = tempDirPath + File.separator + "CNN"
val output = CNNTextClassification.test(modelDirPath + File.separator + w2vModelName,
modelDirPath, context, modelDirPath)
Process("rm -rf " + modelDirPath) !
assert(output >= 0.4f)
} else {
logger.info("Skip this test as it intended for GPU only")
}
}
}
|
rahul003/mxnet
|
scala-package/examples/src/test/scala/org/apache/mxnetexamples/cnntextclassification/CNNClassifierExampleSuite.scala
|
Scala
|
apache-2.0
| 2,923 |
/******************************************************************************
* Copyright (c) 2014, Equal Experts Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of the Midas Project.
******************************************************************************/
package com.ee.midas.dsl.expressions
import org.specs2.mutable.Specification
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import org.bson.BasicBSONObject
import org.specs2.matcher.DataTables
@RunWith(classOf[JUnitRunner])
class FieldSpecs extends Specification with DataTables {
"Field" should {
"give the value of a field if present in the document" in {
//Given
val fieldExpression = Field("zip")
val zipValue = 4000058
val document = new BasicBSONObject("zip", zipValue)
//When
val result = fieldExpression.evaluate(document)
//Then
result mustEqual Literal(zipValue)
}
"give appropriate result if field is not present in the document" in {
//Given
val fieldExpression = Field("pin")
val document = new BasicBSONObject()
//When
val result = fieldExpression.evaluate(document)
//Then
result mustEqual Literal(null)
}
"give the value of a 2-level nested field" in {
//Given
val fieldExpression = Field("address.zip")
val zipValue = 4000058
val document = new BasicBSONObject("address", new BasicBSONObject("zip", zipValue))
//When
val result = fieldExpression.evaluate(document)
//Then
result mustEqual Literal(zipValue)
}
"give the value of a 3-level nested field" in {
//Given
val fieldExpression = Field("address.line.1")
val value = "Some Street"
val document = new BasicBSONObject("address",
new BasicBSONObject("line", new BasicBSONObject("1", value)))
//When
val result = fieldExpression.evaluate(document)
//Then
result mustEqual Literal(value)
}
"stringify just like how it is written" ^ {
"field" | "fieldString" |
Field("age") ! "Field(age)" |
Field("order.date") ! "Field(order.date)" |>
{ (field, fieldString) => field.toString mustEqual fieldString }
}
}
}
|
EqualExperts/Midas
|
src/test/scala/com/ee/midas/dsl/expressions/FieldSpecs.scala
|
Scala
|
bsd-2-clause
| 3,754 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600a.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600.v3.retriever.CT600BoxRetriever
case class LPQ04(value: Option[Boolean]) extends CtBoxIdentifier(name = "Is the company controlled by 5 or fewer participators none of whom are directors?") with CtOptionalBoolean with Input with ValidatableBox[CT600BoxRetriever] {
def validate(boxRetriever: CT600BoxRetriever): Set[CtValidation] = validateBooleanAsMandatory("LPQ04", this)
}
|
ahudspith-equalexperts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600a/v3/LPQ04.scala
|
Scala
|
apache-2.0
| 1,072 |
package com.xuanyuansen.algo.layers
import com.xuanyuansen.algo.params.LayerParam
/**
* Created by wangshuai on 16/7/28.
* base class of layer, each layer has its param
* if recurrent, each layer has many nodes
*/
class Layer {
}
class LossLayer extends Layer {
}
class LSTMLayer extends Layer {
}
class GRULayer extends Layer {
}
|
xuanyuansen/scalaLSTM
|
src/main/scala/com/xuanyuansen/algo/layers/Layer.scala
|
Scala
|
apache-2.0
| 345 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.